core.c 100 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023
  1. /*
  2. * linux/drivers/mmc/core/core.c
  3. *
  4. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  5. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
  6. * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
  7. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/completion.h>
  17. #include <linux/device.h>
  18. #include <linux/delay.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/err.h>
  21. #include <linux/leds.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/log2.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/suspend.h>
  27. #include <linux/fault-inject.h>
  28. #include <linux/random.h>
  29. #include <linux/wakelock.h>
  30. #include <linux/pm.h>
  31. #include <linux/slab.h>
  32. #include <linux/jiffies.h>
  33. #include <linux/mmc/card.h>
  34. #include <linux/mmc/host.h>
  35. #include <linux/mmc/mmc.h>
  36. #include <linux/mmc/sd.h>
  37. #include <linux/mmc/sdhci.h>
  38. #include "core.h"
  39. #include "bus.h"
  40. #include "host.h"
  41. #include "sdio_bus.h"
  42. #include "mmc_ops.h"
  43. #include "sd_ops.h"
  44. #include "sdio_ops.h"
  45. #ifdef CONFIG_MMC_SUPPORT_STLOG
  46. #include <linux/stlog.h>
  47. #else
  48. #define ST_LOG(fmt,...)
  49. #endif
  50. #include <trace/events/mmc.h>
  51. static void mmc_clk_scaling(struct mmc_host *host, bool from_wq);
  52. /* If the device is not responding */
  53. #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  54. /*
  55. * Background operations can take a long time, depending on the housekeeping
  56. * operations the card has to perform.
  57. */
  58. #define MMC_BKOPS_MAX_TIMEOUT (30 * 1000) /* max time to wait in ms */
  59. /* Flushing a large amount of cached data may take a long time. */
  60. #define MMC_FLUSH_REQ_TIMEOUT_MS 90000 /* msec */
  61. #define MMC_CACHE_DISBALE_TIMEOUT_MS 180000 /* msec */
  62. static struct workqueue_struct *workqueue;
  63. /*
  64. * Enabling software CRCs on the data blocks can be a significant (30%)
  65. * performance cost, and for other reasons may not always be desired.
  66. * So we allow it it to be disabled.
  67. */
  68. bool use_spi_crc = 1;
  69. module_param(use_spi_crc, bool, 0644);
  70. /*
  71. * We normally treat cards as removed during suspend if they are not
  72. * known to be on a non-removable bus, to avoid the risk of writing
  73. * back data to a different card after resume. Allow this to be
  74. * overridden if necessary.
  75. */
  76. #ifdef CONFIG_MMC_UNSAFE_RESUME
  77. bool mmc_assume_removable;
  78. #else
  79. bool mmc_assume_removable = 1;
  80. #endif
  81. EXPORT_SYMBOL(mmc_assume_removable);
  82. module_param_named(removable, mmc_assume_removable, bool, 0644);
  83. MODULE_PARM_DESC(
  84. removable,
  85. "MMC/SD cards are removable and may be removed during suspend");
  86. #define MMC_UPDATE_BKOPS_STATS_HPI(stats) \
  87. do { \
  88. spin_lock(&stats.lock); \
  89. if (stats.enabled) \
  90. stats.hpi++; \
  91. spin_unlock(&stats.lock); \
  92. } while (0);
  93. #define MMC_UPDATE_BKOPS_STATS_SUSPEND(stats) \
  94. do { \
  95. spin_lock(&stats.lock); \
  96. if (stats.enabled) \
  97. stats.suspend++; \
  98. spin_unlock(&stats.lock); \
  99. } while (0);
  100. #define MMC_UPDATE_STATS_BKOPS_SEVERITY_LEVEL(stats, level) \
  101. do { \
  102. if (level <= 0 || level > BKOPS_NUM_OF_SEVERITY_LEVELS) \
  103. break; \
  104. spin_lock(&stats.lock); \
  105. if (stats.enabled) \
  106. stats.bkops_level[level-1]++; \
  107. spin_unlock(&stats.lock); \
  108. } while (0);
  109. /*
  110. * Internal function. Schedule delayed work in the MMC work queue.
  111. */
  112. static int mmc_schedule_delayed_work(struct delayed_work *work,
  113. unsigned long delay)
  114. {
  115. return queue_delayed_work(workqueue, work, delay);
  116. }
  117. /*
  118. * Internal function. Flush all scheduled work from the MMC work queue.
  119. */
  120. static void mmc_flush_scheduled_work(void)
  121. {
  122. flush_workqueue(workqueue);
  123. }
  124. #ifdef CONFIG_FAIL_MMC_REQUEST
  125. /*
  126. * Internal function. Inject random data errors.
  127. * If mmc_data is NULL no errors are injected.
  128. */
  129. static void mmc_should_fail_request(struct mmc_host *host,
  130. struct mmc_request *mrq)
  131. {
  132. struct mmc_command *cmd = mrq->cmd;
  133. struct mmc_data *data = mrq->data;
  134. static const int data_errors[] = {
  135. -ETIMEDOUT,
  136. -EILSEQ,
  137. -EIO,
  138. };
  139. if (!data)
  140. return;
  141. if (cmd->error || data->error ||
  142. !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
  143. return;
  144. data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
  145. data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
  146. data->fault_injected = true;
  147. }
  148. #else /* CONFIG_FAIL_MMC_REQUEST */
  149. static inline void mmc_should_fail_request(struct mmc_host *host,
  150. struct mmc_request *mrq)
  151. {
  152. }
  153. #endif /* CONFIG_FAIL_MMC_REQUEST */
  154. static inline void
  155. mmc_clk_scaling_update_state(struct mmc_host *host, struct mmc_request *mrq)
  156. {
  157. if (mrq) {
  158. switch (mrq->cmd->opcode) {
  159. case MMC_READ_SINGLE_BLOCK:
  160. case MMC_READ_MULTIPLE_BLOCK:
  161. case MMC_WRITE_BLOCK:
  162. case MMC_WRITE_MULTIPLE_BLOCK:
  163. host->clk_scaling.invalid_state = false;
  164. break;
  165. default:
  166. host->clk_scaling.invalid_state = true;
  167. break;
  168. }
  169. } else {
  170. /*
  171. * force clock scaling transitions,
  172. * if other conditions are met
  173. */
  174. host->clk_scaling.invalid_state = false;
  175. }
  176. return;
  177. }
  178. static inline void mmc_update_clk_scaling(struct mmc_host *host)
  179. {
  180. if (host->clk_scaling.enable && !host->clk_scaling.invalid_state) {
  181. host->clk_scaling.busy_time_us +=
  182. ktime_to_us(ktime_sub(ktime_get(),
  183. host->clk_scaling.start_busy));
  184. host->clk_scaling.start_busy = ktime_get();
  185. }
  186. }
  187. /**
  188. * mmc_request_done - finish processing an MMC request
  189. * @host: MMC host which completed request
  190. * @mrq: MMC request which request
  191. *
  192. * MMC drivers should call this function when they have completed
  193. * their processing of a request.
  194. */
  195. void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
  196. {
  197. struct mmc_command *cmd = mrq->cmd;
  198. int err = cmd->error;
  199. #ifdef CONFIG_MMC_PERF_PROFILING
  200. ktime_t diff;
  201. #endif
  202. if (host->card)
  203. mmc_update_clk_scaling(host);
  204. if (err && cmd->retries && mmc_host_is_spi(host)) {
  205. if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
  206. cmd->retries = 0;
  207. }
  208. if (err && cmd->retries && !mmc_card_removed(host->card)) {
  209. /*
  210. * Request starter must handle retries - see
  211. * mmc_wait_for_req_done().
  212. */
  213. if (mrq->done)
  214. mrq->done(mrq);
  215. } else {
  216. mmc_should_fail_request(host, mrq);
  217. led_trigger_event(host->led, LED_OFF);
  218. pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
  219. mmc_hostname(host), cmd->opcode, err,
  220. cmd->resp[0], cmd->resp[1],
  221. cmd->resp[2], cmd->resp[3]);
  222. if (mrq->data) {
  223. #ifdef CONFIG_MMC_PERF_PROFILING
  224. if (host->perf_enable) {
  225. diff = ktime_sub(ktime_get(), host->perf.start);
  226. if (mrq->data->flags == MMC_DATA_READ) {
  227. host->perf.rbytes_drv +=
  228. mrq->data->bytes_xfered;
  229. host->perf.rtime_drv =
  230. ktime_add(host->perf.rtime_drv,
  231. diff);
  232. } else {
  233. host->perf.wbytes_drv +=
  234. mrq->data->bytes_xfered;
  235. host->perf.wtime_drv =
  236. ktime_add(host->perf.wtime_drv,
  237. diff);
  238. }
  239. }
  240. #endif
  241. pr_debug("%s: %d bytes transferred: %d\n",
  242. mmc_hostname(host),
  243. mrq->data->bytes_xfered, mrq->data->error);
  244. trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
  245. }
  246. if (mrq->stop) {
  247. pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
  248. mmc_hostname(host), mrq->stop->opcode,
  249. mrq->stop->error,
  250. mrq->stop->resp[0], mrq->stop->resp[1],
  251. mrq->stop->resp[2], mrq->stop->resp[3]);
  252. }
  253. if (mrq->done)
  254. mrq->done(mrq);
  255. mmc_host_clk_release(host);
  256. }
  257. }
  258. EXPORT_SYMBOL(mmc_request_done);
  259. static void
  260. mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  261. {
  262. #ifdef CONFIG_MMC_DEBUG
  263. unsigned int i, sz;
  264. struct scatterlist *sg;
  265. #endif
  266. if (mrq->sbc) {
  267. pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
  268. mmc_hostname(host), mrq->sbc->opcode,
  269. mrq->sbc->arg, mrq->sbc->flags);
  270. }
  271. pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
  272. mmc_hostname(host), mrq->cmd->opcode,
  273. mrq->cmd->arg, mrq->cmd->flags);
  274. if (mrq->data) {
  275. pr_debug("%s: blksz %d blocks %d flags %08x "
  276. "tsac %d ms nsac %d\n",
  277. mmc_hostname(host), mrq->data->blksz,
  278. mrq->data->blocks, mrq->data->flags,
  279. mrq->data->timeout_ns / 1000000,
  280. mrq->data->timeout_clks);
  281. }
  282. if (mrq->stop) {
  283. pr_debug("%s: CMD%u arg %08x flags %08x\n",
  284. mmc_hostname(host), mrq->stop->opcode,
  285. mrq->stop->arg, mrq->stop->flags);
  286. }
  287. WARN_ON(!host->claimed);
  288. mrq->cmd->error = 0;
  289. mrq->cmd->mrq = mrq;
  290. if (mrq->data) {
  291. BUG_ON(mrq->data->blksz > host->max_blk_size);
  292. BUG_ON(mrq->data->blocks > host->max_blk_count);
  293. BUG_ON(mrq->data->blocks * mrq->data->blksz >
  294. host->max_req_size);
  295. #ifdef CONFIG_MMC_DEBUG
  296. sz = 0;
  297. for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
  298. sz += sg->length;
  299. BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
  300. #endif
  301. mrq->cmd->data = mrq->data;
  302. mrq->data->error = 0;
  303. mrq->data->mrq = mrq;
  304. if (mrq->stop) {
  305. mrq->data->stop = mrq->stop;
  306. mrq->stop->error = 0;
  307. mrq->stop->mrq = mrq;
  308. }
  309. #ifdef CONFIG_MMC_PERF_PROFILING
  310. if (host->perf_enable)
  311. host->perf.start = ktime_get();
  312. #endif
  313. }
  314. mmc_host_clk_hold(host);
  315. led_trigger_event(host->led, LED_FULL);
  316. if (host->card && host->clk_scaling.enable) {
  317. /*
  318. * Check if we need to scale the clocks. Clocks
  319. * will be scaled up immediately if necessary
  320. * conditions are satisfied. Scaling down the
  321. * frequency will be done after current thread
  322. * releases host.
  323. */
  324. mmc_clk_scaling_update_state(host, mrq);
  325. if (!host->clk_scaling.invalid_state) {
  326. mmc_clk_scaling(host, false);
  327. host->clk_scaling.start_busy = ktime_get();
  328. }
  329. }
  330. host->ops->request(host, mrq);
  331. }
  332. void mmc_blk_init_bkops_statistics(struct mmc_card *card)
  333. {
  334. int i;
  335. struct mmc_bkops_stats *bkops_stats;
  336. if (!card)
  337. return;
  338. bkops_stats = &card->bkops_info.bkops_stats;
  339. spin_lock(&bkops_stats->lock);
  340. for (i = 0 ; i < BKOPS_NUM_OF_SEVERITY_LEVELS ; ++i)
  341. bkops_stats->bkops_level[i] = 0;
  342. bkops_stats->suspend = 0;
  343. bkops_stats->hpi = 0;
  344. bkops_stats->enabled = true;
  345. spin_unlock(&bkops_stats->lock);
  346. }
  347. EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
  348. /**
  349. * mmc_start_delayed_bkops() - Start a delayed work to check for
  350. * the need of non urgent BKOPS
  351. *
  352. * @card: MMC card to start BKOPS on
  353. */
  354. void mmc_start_delayed_bkops(struct mmc_card *card)
  355. {
  356. if (!card || !card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
  357. return;
  358. if (card->bkops_info.sectors_changed <
  359. card->bkops_info.min_sectors_to_queue_delayed_work)
  360. return;
  361. pr_debug("%s: %s: queueing delayed_bkops_work\n",
  362. mmc_hostname(card->host), __func__);
  363. /*
  364. * cancel_delayed_bkops_work will prevent a race condition between
  365. * fetching a request by the mmcqd and the delayed work, in case
  366. * it was removed from the queue work but not started yet
  367. */
  368. card->bkops_info.cancel_delayed_work = false;
  369. queue_delayed_work(system_nrt_wq, &card->bkops_info.dw,
  370. msecs_to_jiffies(
  371. card->bkops_info.delay_ms));
  372. }
  373. EXPORT_SYMBOL(mmc_start_delayed_bkops);
  374. /**
  375. * mmc_start_bkops - start BKOPS for supported cards
  376. * @card: MMC card to start BKOPS
  377. * @from_exception: A flag to indicate if this function was
  378. * called due to an exception raised by the card
  379. *
  380. * Start background operations whenever requested.
  381. * When the urgent BKOPS bit is set in a R1 command response
  382. * then background operations should be started immediately.
  383. */
  384. void mmc_start_bkops(struct mmc_card *card, bool from_exception)
  385. {
  386. int err;
  387. BUG_ON(!card);
  388. if (!card->ext_csd.bkops_en || !(card->host->caps2 & MMC_CAP2_INIT_BKOPS))
  389. return;
  390. if ((card->bkops_info.cancel_delayed_work) && !from_exception) {
  391. pr_debug("%s: %s: cancel_delayed_work was set, exit\n",
  392. mmc_hostname(card->host), __func__);
  393. card->bkops_info.cancel_delayed_work = false;
  394. return;
  395. }
  396. mmc_rpm_hold(card->host, &card->dev);
  397. /* In case of delayed bkops we might be in race with suspend. */
  398. if (!mmc_try_claim_host(card->host)) {
  399. mmc_rpm_release(card->host, &card->dev);
  400. return;
  401. }
  402. /*
  403. * Since the cancel_delayed_work can be changed while we are waiting
  404. * for the lock we will to re-check it
  405. */
  406. if ((card->bkops_info.cancel_delayed_work) && !from_exception) {
  407. pr_debug("%s: %s: cancel_delayed_work was set, exit\n",
  408. mmc_hostname(card->host), __func__);
  409. card->bkops_info.cancel_delayed_work = false;
  410. goto out;
  411. }
  412. if (mmc_card_doing_bkops(card)) {
  413. pr_debug("%s: %s: already doing bkops, exit\n",
  414. mmc_hostname(card->host), __func__);
  415. goto out;
  416. }
  417. if (from_exception && mmc_card_need_bkops(card))
  418. goto out;
  419. /*
  420. * If the need BKOPS flag is set, there is no need to check if BKOPS
  421. * is needed since we already know that it does
  422. */
  423. if (!mmc_card_need_bkops(card)) {
  424. err = mmc_read_bkops_status(card);
  425. if (err) {
  426. pr_err("%s: %s: Failed to read bkops status: %d\n",
  427. mmc_hostname(card->host), __func__, err);
  428. goto out;
  429. }
  430. if (!card->ext_csd.raw_bkops_status)
  431. goto out;
  432. pr_info("%s: %s: raw_bkops_status=0x%x, from_exception=%d\n",
  433. mmc_hostname(card->host), __func__,
  434. card->ext_csd.raw_bkops_status,
  435. from_exception);
  436. }
  437. /*
  438. * If the function was called due to exception, BKOPS will be performed
  439. * after handling the last pending request
  440. */
  441. if (from_exception) {
  442. pr_debug("%s: %s: Level %d from exception, exit",
  443. mmc_hostname(card->host), __func__,
  444. card->ext_csd.raw_bkops_status);
  445. mmc_card_set_need_bkops(card);
  446. goto out;
  447. }
  448. pr_info("%s: %s: Starting bkops\n", mmc_hostname(card->host), __func__);
  449. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  450. EXT_CSD_BKOPS_START, 1, 0, false, false);
  451. if (err) {
  452. pr_warn("%s: %s: Error %d when starting bkops\n",
  453. mmc_hostname(card->host), __func__, err);
  454. goto out;
  455. }
  456. MMC_UPDATE_STATS_BKOPS_SEVERITY_LEVEL(card->bkops_info.bkops_stats,
  457. card->ext_csd.raw_bkops_status);
  458. mmc_card_clr_need_bkops(card);
  459. mmc_card_set_doing_bkops(card);
  460. out:
  461. mmc_release_host(card->host);
  462. mmc_rpm_release(card->host, &card->dev);
  463. }
  464. EXPORT_SYMBOL(mmc_start_bkops);
  465. /**
  466. * mmc_start_idle_time_bkops() - check if a non urgent BKOPS is
  467. * needed
  468. * @work: The idle time BKOPS work
  469. */
  470. void mmc_start_idle_time_bkops(struct work_struct *work)
  471. {
  472. struct mmc_card *card = container_of(work, struct mmc_card,
  473. bkops_info.dw.work);
  474. /*
  475. * Prevent a race condition between mmc_stop_bkops and the delayed
  476. * BKOPS work in case the delayed work is executed on another CPU
  477. */
  478. if (card->bkops_info.cancel_delayed_work)
  479. return;
  480. mmc_start_bkops(card, false);
  481. }
  482. EXPORT_SYMBOL(mmc_start_idle_time_bkops);
  483. /*
  484. * mmc_wait_data_done() - done callback for data request
  485. * @mrq: done data request
  486. *
  487. * Wakes up mmc context, passed as a callback to host controller driver
  488. */
  489. static void mmc_wait_data_done(struct mmc_request *mrq)
  490. {
  491. mrq->host->context_info.is_done_rcv = true;
  492. wake_up_interruptible(&mrq->host->context_info.wait);
  493. }
  494. static void mmc_wait_done(struct mmc_request *mrq)
  495. {
  496. complete(&mrq->completion);
  497. }
  498. /*
  499. *__mmc_start_data_req() - starts data request
  500. * @host: MMC host to start the request
  501. * @mrq: data request to start
  502. *
  503. * Sets the done callback to be called when request is completed by the card.
  504. * Starts data mmc request execution
  505. */
  506. static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
  507. {
  508. mrq->done = mmc_wait_data_done;
  509. mrq->host = host;
  510. if (mmc_card_removed(host->card)) {
  511. mrq->cmd->error = -ENOMEDIUM;
  512. mmc_wait_data_done(mrq);
  513. return -ENOMEDIUM;
  514. }
  515. mmc_start_request(host, mrq);
  516. return 0;
  517. }
  518. static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
  519. {
  520. init_completion(&mrq->completion);
  521. mrq->done = mmc_wait_done;
  522. if (mmc_card_removed(host->card)) {
  523. mrq->cmd->error = -ENOMEDIUM;
  524. complete(&mrq->completion);
  525. return -ENOMEDIUM;
  526. }
  527. mmc_start_request(host, mrq);
  528. return 0;
  529. }
  530. /*
  531. * mmc_should_stop_curr_req() - check for stop flow rationality
  532. * @host: MMC host running request.
  533. *
  534. * Check possibility to interrupt current running request
  535. * Returns true in case it is worth to stop transfer,
  536. * false otherwise
  537. */
  538. static bool mmc_should_stop_curr_req(struct mmc_host *host)
  539. {
  540. int remainder;
  541. if (host->areq->cmd_flags & REQ_URGENT ||
  542. !(host->areq->cmd_flags & REQ_WRITE) ||
  543. (host->areq->cmd_flags & REQ_FUA))
  544. return false;
  545. mmc_host_clk_hold(host);
  546. remainder = (host->ops->get_xfer_remain) ?
  547. host->ops->get_xfer_remain(host) : -1;
  548. mmc_host_clk_release(host);
  549. return (remainder > 0);
  550. }
  551. /*
  552. * mmc_stop_request() - Stops current running request
  553. * @host: MMC host to prepare the command.
  554. *
  555. * Triggers stop flow in the host driver and sends CMD12 (stop command) to the
  556. * card. Sends HPI to get the card out of R1_STATE_PRG immediately
  557. *
  558. * Returns 0 when success, error propagated otherwise
  559. */
  560. static int mmc_stop_request(struct mmc_host *host)
  561. {
  562. struct mmc_command cmd = {0};
  563. struct mmc_card *card = host->card;
  564. int err = 0;
  565. u32 status;
  566. if (!host->ops->stop_request || !card->ext_csd.hpi_en) {
  567. pr_warn("%s: host ops stop_request() or HPI not supported\n",
  568. mmc_hostname(host));
  569. return -ENOTSUPP;
  570. }
  571. mmc_host_clk_hold(host);
  572. err = host->ops->stop_request(host);
  573. if (err) {
  574. pr_err("%s: Call to host->ops->stop_request() failed (%d)\n",
  575. mmc_hostname(host), err);
  576. goto out;
  577. }
  578. cmd.opcode = MMC_STOP_TRANSMISSION;
  579. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  580. err = mmc_wait_for_cmd(host, &cmd, 0);
  581. if (err) {
  582. err = mmc_send_status(card, &status);
  583. if (err) {
  584. pr_err("%s: Get card status fail\n",
  585. mmc_hostname(card->host));
  586. goto out;
  587. }
  588. switch (R1_CURRENT_STATE(status)) {
  589. case R1_STATE_DATA:
  590. case R1_STATE_RCV:
  591. pr_err("%s: CMD12 fails with error (%d)\n",
  592. mmc_hostname(host), err);
  593. goto out;
  594. default:
  595. break;
  596. }
  597. }
  598. err = mmc_interrupt_hpi(card);
  599. if (err) {
  600. pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
  601. mmc_hostname(host), err);
  602. goto out;
  603. }
  604. out:
  605. mmc_host_clk_release(host);
  606. return err;
  607. }
  608. /*
  609. * mmc_wait_for_data_req_done() - wait for request completed
  610. * @host: MMC host to prepare the command.
  611. * @mrq: MMC request to wait for
  612. *
  613. * Blocks MMC context till host controller will ack end of data request
  614. * execution or new request notification arrives from the block layer.
  615. * Handles command retries.
  616. *
  617. * Returns enum mmc_blk_status after checking errors.
  618. */
  619. static int mmc_wait_for_data_req_done(struct mmc_host *host,
  620. struct mmc_request *mrq,
  621. struct mmc_async_req *next_req)
  622. {
  623. struct mmc_command *cmd;
  624. struct mmc_context_info *context_info = &host->context_info;
  625. bool pending_is_urgent = false;
  626. bool is_urgent = false;
  627. int err, ret;
  628. unsigned long flags;
  629. while (1) {
  630. ret = wait_io_event_interruptible(context_info->wait,
  631. (context_info->is_done_rcv ||
  632. context_info->is_new_req ||
  633. context_info->is_urgent));
  634. spin_lock_irqsave(&context_info->lock, flags);
  635. is_urgent = context_info->is_urgent;
  636. context_info->is_waiting_last_req = false;
  637. spin_unlock_irqrestore(&context_info->lock, flags);
  638. if (context_info->is_done_rcv) {
  639. context_info->is_done_rcv = false;
  640. context_info->is_new_req = false;
  641. cmd = mrq->cmd;
  642. if (!cmd->error || !cmd->retries ||
  643. mmc_card_removed(host->card)) {
  644. err = host->areq->err_check(host->card,
  645. host->areq);
  646. if (pending_is_urgent || is_urgent) {
  647. /*
  648. * all the success/partial operations
  649. * are done in an addition to handling
  650. * the urgent request
  651. */
  652. if ((err == MMC_BLK_PARTIAL) ||
  653. (err == MMC_BLK_SUCCESS))
  654. err = pending_is_urgent ?
  655. MMC_BLK_URGENT_DONE
  656. : MMC_BLK_URGENT;
  657. /* reset is_urgent for next request */
  658. context_info->is_urgent = false;
  659. }
  660. break; /* return err */
  661. } else {
  662. pr_info("%s: req failed (CMD%u): %d, retrying...\n",
  663. mmc_hostname(host),
  664. cmd->opcode, cmd->error);
  665. cmd->retries--;
  666. cmd->error = 0;
  667. host->ops->request(host, mrq);
  668. /*
  669. * ignore urgent flow, request retry has greater
  670. * priority than urgent flow
  671. */
  672. context_info->is_urgent = false;
  673. /* wait for done/new/urgent event again */
  674. continue;
  675. }
  676. } else if (context_info->is_new_req && !is_urgent) {
  677. context_info->is_new_req = false;
  678. if (!next_req) {
  679. err = MMC_BLK_NEW_REQUEST;
  680. break; /* return err */
  681. }
  682. } else if (context_info->is_urgent) {
  683. /*
  684. * The case when block layer sent next urgent
  685. * notification before it receives end_io on
  686. * the current
  687. */
  688. if (pending_is_urgent)
  689. continue; /* wait for done/new/urgent event */
  690. context_info->is_urgent = false;
  691. context_info->is_new_req = false;
  692. if (mmc_should_stop_curr_req(host)) {
  693. /*
  694. * We are going to stop the ongoing request.
  695. * Update stuff that we ought to do when the
  696. * request actually completes.
  697. */
  698. mmc_update_clk_scaling(host);
  699. err = mmc_stop_request(host);
  700. if (err == MMC_BLK_NO_REQ_TO_STOP) {
  701. pending_is_urgent = true;
  702. /* wait for done/new/urgent event */
  703. continue;
  704. } else if (err && !context_info->is_done_rcv) {
  705. err = MMC_BLK_ABORT;
  706. break;
  707. }
  708. /* running request has finished at this point */
  709. if (context_info->is_done_rcv) {
  710. err = host->areq->err_check(host->card,
  711. host->areq);
  712. context_info->is_done_rcv = false;
  713. break; /* return err */
  714. } else {
  715. mmc_host_clk_release(host);
  716. }
  717. err = host->areq->update_interrupted_req(
  718. host->card, host->areq);
  719. if (!err)
  720. err = MMC_BLK_URGENT;
  721. break; /* return err */
  722. } else {
  723. /*
  724. * The flow will back to wait for is_done_rcv,
  725. * but in this case original is_urgent cleared.
  726. * Mark pending_is_urgent to differentiate the
  727. * case, when is_done_rcv and is_urgent really
  728. * concurrent.
  729. */
  730. pending_is_urgent = true;
  731. continue; /* wait for done/new/urgent event */
  732. }
  733. } else {
  734. pr_warn("%s: mmc thread unblocked from waiting by signal, ret=%d\n",
  735. mmc_hostname(host),
  736. ret);
  737. continue;
  738. }
  739. } /* while */
  740. return err;
  741. }
  742. static void mmc_wait_for_req_done(struct mmc_host *host,
  743. struct mmc_request *mrq)
  744. {
  745. struct mmc_command *cmd;
  746. while (1) {
  747. wait_for_completion_io(&mrq->completion);
  748. cmd = mrq->cmd;
  749. /*
  750. * If host has timed out waiting for the commands which can be
  751. * HPIed then let the caller handle the timeout error as it may
  752. * want to send the HPI command to bring the card out of
  753. * programming state.
  754. */
  755. if (cmd->ignore_timeout && cmd->error == -ETIMEDOUT)
  756. break;
  757. if (!cmd->error || !cmd->retries ||
  758. mmc_card_removed(host->card))
  759. break;
  760. pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
  761. mmc_hostname(host), cmd->opcode, cmd->error);
  762. cmd->retries--;
  763. cmd->error = 0;
  764. host->ops->request(host, mrq);
  765. }
  766. }
  767. /**
  768. * mmc_pre_req - Prepare for a new request
  769. * @host: MMC host to prepare command
  770. * @mrq: MMC request to prepare for
  771. * @is_first_req: true if there is no previous started request
  772. * that may run in parellel to this call, otherwise false
  773. *
  774. * mmc_pre_req() is called in prior to mmc_start_req() to let
  775. * host prepare for the new request. Preparation of a request may be
  776. * performed while another request is running on the host.
  777. */
  778. static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
  779. bool is_first_req)
  780. {
  781. if (host->ops->pre_req) {
  782. mmc_host_clk_hold(host);
  783. host->ops->pre_req(host, mrq, is_first_req);
  784. mmc_host_clk_release(host);
  785. }
  786. }
  787. /**
  788. * mmc_post_req - Post process a completed request
  789. * @host: MMC host to post process command
  790. * @mrq: MMC request to post process for
  791. * @err: Error, if non zero, clean up any resources made in pre_req
  792. *
  793. * Let the host post process a completed request. Post processing of
  794. * a request may be performed while another reuqest is running.
  795. */
  796. static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
  797. int err)
  798. {
  799. if (host->ops->post_req) {
  800. mmc_host_clk_hold(host);
  801. host->ops->post_req(host, mrq, err);
  802. mmc_host_clk_release(host);
  803. }
  804. }
  805. /**
  806. * mmc_start_req - start a non-blocking request
  807. * @host: MMC host to start command
  808. * @areq: async request to start
  809. * @error: out parameter returns 0 for success, otherwise non zero
  810. *
  811. * Start a new MMC custom command request for a host.
  812. * If there is on ongoing async request wait for completion
  813. * of that request and start the new one and return.
  814. * Does not wait for the new request to complete.
  815. *
  816. * Returns the completed request, NULL in case of none completed.
  817. * Wait for the an ongoing request (previoulsy started) to complete and
  818. * return the completed request. If there is no ongoing request, NULL
  819. * is returned without waiting. NULL is not an error condition.
  820. */
  821. struct mmc_async_req *mmc_start_req(struct mmc_host *host,
  822. struct mmc_async_req *areq, int *error)
  823. {
  824. int err = 0;
  825. int start_err = 0;
  826. struct mmc_async_req *data = host->areq;
  827. unsigned long flags;
  828. bool is_urgent;
  829. /* Prepare a new request */
  830. if (areq) {
  831. trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
  832. areq->mrq->cmd->arg,
  833. areq->mrq->data);
  834. /*
  835. * start waiting here for possible interrupt
  836. * because mmc_pre_req() taking long time
  837. */
  838. mmc_pre_req(host, areq->mrq, !host->areq);
  839. }
  840. if (host->areq) {
  841. err = mmc_wait_for_data_req_done(host, host->areq->mrq,
  842. areq);
  843. if (err == MMC_BLK_URGENT || err == MMC_BLK_URGENT_DONE) {
  844. mmc_post_req(host, host->areq->mrq, 0);
  845. host->areq = NULL;
  846. if (areq) {
  847. if (!(areq->cmd_flags &
  848. MMC_REQ_NOREINSERT_MASK)) {
  849. areq->reinsert_req(areq);
  850. mmc_post_req(host, areq->mrq, 0);
  851. } else {
  852. start_err = __mmc_start_data_req(host,
  853. areq->mrq);
  854. if (start_err)
  855. mmc_post_req(host, areq->mrq,
  856. -EINVAL);
  857. else
  858. host->areq = areq;
  859. }
  860. }
  861. goto exit;
  862. } else if (err == MMC_BLK_NEW_REQUEST) {
  863. if (error)
  864. *error = err;
  865. /*
  866. * The previous request was not completed,
  867. * nothing to return
  868. */
  869. return NULL;
  870. }
  871. /*
  872. * Check BKOPS urgency for each R1 response
  873. */
  874. if (host->card && mmc_card_mmc(host->card) &&
  875. ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
  876. (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
  877. (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
  878. mmc_start_bkops(host->card, true);
  879. pr_debug("%s: %s: completed BKOPs due to exception",
  880. mmc_hostname(host), __func__);
  881. }
  882. }
  883. if (!err && areq) {
  884. /* urgent notification may come again */
  885. spin_lock_irqsave(&host->context_info.lock, flags);
  886. is_urgent = host->context_info.is_urgent;
  887. host->context_info.is_urgent = false;
  888. spin_unlock_irqrestore(&host->context_info.lock, flags);
  889. if (!is_urgent || (areq->cmd_flags & REQ_URGENT)) {
  890. start_err = __mmc_start_data_req(host, areq->mrq);
  891. } else {
  892. /* previous request was done */
  893. err = MMC_BLK_URGENT_DONE;
  894. if (host->areq) {
  895. mmc_post_req(host, host->areq->mrq, 0);
  896. host->areq = NULL;
  897. }
  898. areq->reinsert_req(areq);
  899. mmc_post_req(host, areq->mrq, 0);
  900. goto exit;
  901. }
  902. }
  903. if (host->areq)
  904. mmc_post_req(host, host->areq->mrq, 0);
  905. /* Cancel a prepared request if it was not started. */
  906. if ((err || start_err) && areq)
  907. mmc_post_req(host, areq->mrq, -EINVAL);
  908. if (err)
  909. host->areq = NULL;
  910. else
  911. host->areq = areq;
  912. exit:
  913. if (error)
  914. *error = err;
  915. return data;
  916. }
  917. EXPORT_SYMBOL(mmc_start_req);
  918. /**
  919. * mmc_wait_for_req - start a request and wait for completion
  920. * @host: MMC host to start command
  921. * @mrq: MMC request to start
  922. *
  923. * Start a new MMC custom command request for a host, and wait
  924. * for the command to complete. Does not attempt to parse the
  925. * response.
  926. */
  927. void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
  928. {
  929. #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
  930. if (mmc_bus_needs_resume(host))
  931. mmc_resume_bus(host);
  932. #endif
  933. __mmc_start_req(host, mrq);
  934. mmc_wait_for_req_done(host, mrq);
  935. }
  936. EXPORT_SYMBOL(mmc_wait_for_req);
  937. bool mmc_card_is_prog_state(struct mmc_card *card)
  938. {
  939. bool rc;
  940. struct mmc_command cmd;
  941. mmc_claim_host(card->host);
  942. memset(&cmd, 0, sizeof(struct mmc_command));
  943. cmd.opcode = MMC_SEND_STATUS;
  944. if (!mmc_host_is_spi(card->host))
  945. cmd.arg = card->rca << 16;
  946. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  947. rc = mmc_wait_for_cmd(card->host, &cmd, 0);
  948. if (rc) {
  949. pr_err("%s: Get card status fail. rc=%d\n",
  950. mmc_hostname(card->host), rc);
  951. rc = false;
  952. goto out;
  953. }
  954. if (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)
  955. rc = true;
  956. else
  957. rc = false;
  958. out:
  959. mmc_release_host(card->host);
  960. return rc;
  961. }
  962. EXPORT_SYMBOL(mmc_card_is_prog_state);
  963. /**
  964. * mmc_interrupt_hpi - Issue for High priority Interrupt
  965. * @card: the MMC card associated with the HPI transfer
  966. *
  967. * Issued High Priority Interrupt, and check for card status
  968. * until out-of prg-state.
  969. */
  970. int mmc_interrupt_hpi(struct mmc_card *card)
  971. {
  972. int err;
  973. u32 status;
  974. unsigned long prg_wait;
  975. BUG_ON(!card);
  976. if (!card->ext_csd.hpi_en) {
  977. pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
  978. return 1;
  979. }
  980. mmc_claim_host(card->host);
  981. err = mmc_send_status(card, &status);
  982. if (err) {
  983. pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
  984. goto out;
  985. }
  986. switch (R1_CURRENT_STATE(status)) {
  987. case R1_STATE_IDLE:
  988. case R1_STATE_READY:
  989. case R1_STATE_STBY:
  990. case R1_STATE_TRAN:
  991. /*
  992. * In idle and transfer states, HPI is not needed and the caller
  993. * can issue the next intended command immediately
  994. */
  995. goto out;
  996. case R1_STATE_PRG:
  997. break;
  998. default:
  999. /* In all other states, it's illegal to issue HPI */
  1000. pr_debug("%s: HPI cannot be sent. Card state=%d\n",
  1001. mmc_hostname(card->host), R1_CURRENT_STATE(status));
  1002. err = -EINVAL;
  1003. goto out;
  1004. }
  1005. err = mmc_send_hpi_cmd(card, &status);
  1006. prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
  1007. do {
  1008. err = mmc_send_status(card, &status);
  1009. if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
  1010. break;
  1011. if (time_after(jiffies, prg_wait)) {
  1012. err = mmc_send_status(card, &status);
  1013. if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
  1014. err = -ETIMEDOUT;
  1015. else
  1016. break;
  1017. }
  1018. } while (!err);
  1019. out:
  1020. mmc_release_host(card->host);
  1021. return err;
  1022. }
  1023. EXPORT_SYMBOL(mmc_interrupt_hpi);
  1024. /**
  1025. * mmc_wait_for_cmd - start a command and wait for completion
  1026. * @host: MMC host to start command
  1027. * @cmd: MMC command to start
  1028. * @retries: maximum number of retries
  1029. *
  1030. * Start a new MMC command for a host, and wait for the command
  1031. * to complete. Return any error that occurred while the command
  1032. * was executing. Do not attempt to parse the response.
  1033. */
  1034. int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
  1035. {
  1036. struct mmc_request mrq = {NULL};
  1037. WARN_ON(!host->claimed);
  1038. memset(cmd->resp, 0, sizeof(cmd->resp));
  1039. cmd->retries = retries;
  1040. mrq.cmd = cmd;
  1041. cmd->data = NULL;
  1042. mmc_wait_for_req(host, &mrq);
  1043. return cmd->error;
  1044. }
  1045. EXPORT_SYMBOL(mmc_wait_for_cmd);
  1046. /**
  1047. * mmc_stop_bkops - stop ongoing BKOPS
  1048. * @card: MMC card to check BKOPS
  1049. *
  1050. * Send HPI command to stop ongoing background operations to
  1051. * allow rapid servicing of foreground operations, e.g. read/
  1052. * writes. Wait until the card comes out of the programming state
  1053. * to avoid errors in servicing read/write requests.
  1054. *
  1055. * The function should be called with host claimed.
  1056. */
  1057. int mmc_stop_bkops(struct mmc_card *card)
  1058. {
  1059. int err = 0;
  1060. BUG_ON(!card);
  1061. /*
  1062. * Notify the delayed work to be cancelled, in case it was already
  1063. * removed from the queue, but was not started yet
  1064. */
  1065. card->bkops_info.cancel_delayed_work = true;
  1066. if (delayed_work_pending(&card->bkops_info.dw))
  1067. cancel_delayed_work_sync(&card->bkops_info.dw);
  1068. if (!mmc_card_doing_bkops(card))
  1069. goto out;
  1070. /*
  1071. * If idle time bkops is running on the card, let's not get into
  1072. * suspend.
  1073. */
  1074. if (!mmc_use_core_runtime_pm(card->host) && mmc_card_doing_bkops(card)
  1075. && (card->host->parent->power.runtime_status == RPM_SUSPENDING)
  1076. && mmc_card_is_prog_state(card)) {
  1077. err = -EBUSY;
  1078. goto out;
  1079. }
  1080. err = mmc_interrupt_hpi(card);
  1081. /*
  1082. * If err is EINVAL, we can't issue an HPI.
  1083. * It should complete the BKOPS.
  1084. */
  1085. if (!err || (err == -EINVAL)) {
  1086. mmc_card_clr_doing_bkops(card);
  1087. err = 0;
  1088. }
  1089. MMC_UPDATE_BKOPS_STATS_HPI(card->bkops_info.bkops_stats);
  1090. out:
  1091. return err;
  1092. }
  1093. EXPORT_SYMBOL(mmc_stop_bkops);
  1094. int mmc_read_bkops_status(struct mmc_card *card)
  1095. {
  1096. int err;
  1097. u8 *ext_csd;
  1098. /*
  1099. * In future work, we should consider storing the entire ext_csd.
  1100. */
  1101. ext_csd = kmalloc(512, GFP_KERNEL);
  1102. if (!ext_csd) {
  1103. pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
  1104. mmc_hostname(card->host));
  1105. return -ENOMEM;
  1106. }
  1107. if (card->bkops_info.bkops_stats.ignore_card_bkops_status) {
  1108. pr_debug("%s: skipping read raw_bkops_status in unittest mode",
  1109. __func__);
  1110. return 0;
  1111. }
  1112. mmc_claim_host(card->host);
  1113. err = mmc_send_ext_csd(card, ext_csd);
  1114. mmc_release_host(card->host);
  1115. if (err)
  1116. goto out;
  1117. card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
  1118. card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
  1119. out:
  1120. kfree(ext_csd);
  1121. return err;
  1122. }
  1123. EXPORT_SYMBOL(mmc_read_bkops_status);
  1124. /**
  1125. * mmc_set_data_timeout - set the timeout for a data command
  1126. * @data: data phase for command
  1127. * @card: the MMC card associated with the data transfer
  1128. *
  1129. * Computes the data timeout parameters according to the
  1130. * correct algorithm given the card type.
  1131. */
  1132. void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
  1133. {
  1134. unsigned int mult;
  1135. /*
  1136. * SDIO cards only define an upper 1 s limit on access.
  1137. */
  1138. if (mmc_card_sdio(card)) {
  1139. data->timeout_ns = 1000000000;
  1140. data->timeout_clks = 0;
  1141. return;
  1142. }
  1143. /*
  1144. * SD cards use a 100 multiplier rather than 10
  1145. */
  1146. mult = mmc_card_sd(card) ? 100 : 10;
  1147. /*
  1148. * Scale up the multiplier (and therefore the timeout) by
  1149. * the r2w factor for writes.
  1150. */
  1151. if (data->flags & MMC_DATA_WRITE)
  1152. mult <<= card->csd.r2w_factor;
  1153. /* max time value is 4.2s */
  1154. if ((card->csd.tacc_ns/1000 * mult) > 4294967)
  1155. data->timeout_ns = 0xffffffff;
  1156. else
  1157. data->timeout_ns = card->csd.tacc_ns * mult;
  1158. data->timeout_clks = card->csd.tacc_clks * mult;
  1159. /*
  1160. * SD cards also have an upper limit on the timeout.
  1161. */
  1162. if (mmc_card_sd(card)) {
  1163. unsigned int timeout_us, limit_us;
  1164. timeout_us = data->timeout_ns / 1000;
  1165. if (mmc_host_clk_rate(card->host))
  1166. timeout_us += data->timeout_clks * 1000 /
  1167. (mmc_host_clk_rate(card->host) / 1000);
  1168. if (data->flags & MMC_DATA_WRITE)
  1169. /*
  1170. * The MMC spec "It is strongly recommended
  1171. * for hosts to implement more than 500ms
  1172. * timeout value even if the card indicates
  1173. * the 250ms maximum busy length." Even the
  1174. * previous value of 300ms is known to be
  1175. * insufficient for some cards.
  1176. */
  1177. limit_us = 3000000;
  1178. else
  1179. limit_us = 100000;
  1180. /*
  1181. * SDHC cards always use these fixed values.
  1182. */
  1183. if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
  1184. data->timeout_ns = limit_us * 1000;
  1185. data->timeout_clks = 0;
  1186. }
  1187. }
  1188. /*
  1189. * Some cards require longer data read timeout than indicated in CSD.
  1190. * Address this by setting the read timeout to a "reasonably high"
  1191. * value. For the cards tested, 300ms has proven enough. If necessary,
  1192. * this value can be increased if other problematic cards require this.
  1193. */
  1194. if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
  1195. data->timeout_ns = 300000000;
  1196. data->timeout_clks = 0;
  1197. }
  1198. /*
  1199. * Some cards need very high timeouts if driven in SPI mode.
  1200. * The worst observed timeout was 900ms after writing a
  1201. * continuous stream of data until the internal logic
  1202. * overflowed.
  1203. */
  1204. if (mmc_host_is_spi(card->host)) {
  1205. if (data->flags & MMC_DATA_WRITE) {
  1206. if (data->timeout_ns < 1000000000)
  1207. data->timeout_ns = 1000000000; /* 1s */
  1208. } else {
  1209. if (data->timeout_ns < 100000000)
  1210. data->timeout_ns = 100000000; /* 100ms */
  1211. }
  1212. }
  1213. /* Increase the timeout values for some bad INAND MCP devices */
  1214. if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
  1215. data->timeout_ns = 4000000000u; /* 4s */
  1216. data->timeout_clks = 0;
  1217. }
  1218. /* Some emmc cards require a longer read/write time */
  1219. if (card->quirks & MMC_QUIRK_BROKEN_DATA_TIMEOUT) {
  1220. if (data->timeout_ns < 4000000000u)
  1221. data->timeout_ns = 4000000000u; /* 4s */
  1222. }
  1223. }
  1224. EXPORT_SYMBOL(mmc_set_data_timeout);
  1225. /**
  1226. * mmc_align_data_size - pads a transfer size to a more optimal value
  1227. * @card: the MMC card associated with the data transfer
  1228. * @sz: original transfer size
  1229. *
  1230. * Pads the original data size with a number of extra bytes in
  1231. * order to avoid controller bugs and/or performance hits
  1232. * (e.g. some controllers revert to PIO for certain sizes).
  1233. *
  1234. * Returns the improved size, which might be unmodified.
  1235. *
  1236. * Note that this function is only relevant when issuing a
  1237. * single scatter gather entry.
  1238. */
  1239. unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
  1240. {
  1241. /*
  1242. * FIXME: We don't have a system for the controller to tell
  1243. * the core about its problems yet, so for now we just 32-bit
  1244. * align the size.
  1245. */
  1246. sz = ((sz + 3) / 4) * 4;
  1247. return sz;
  1248. }
  1249. EXPORT_SYMBOL(mmc_align_data_size);
  1250. /**
  1251. * __mmc_claim_host - exclusively claim a host
  1252. * @host: mmc host to claim
  1253. * @abort: whether or not the operation should be aborted
  1254. *
  1255. * Claim a host for a set of operations. If @abort is non null and
  1256. * dereference a non-zero value then this will return prematurely with
  1257. * that non-zero value without acquiring the lock. Returns zero
  1258. * with the lock held otherwise.
  1259. */
  1260. int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
  1261. {
  1262. DECLARE_WAITQUEUE(wait, current);
  1263. unsigned long flags;
  1264. int stop;
  1265. might_sleep();
  1266. add_wait_queue(&host->wq, &wait);
  1267. spin_lock_irqsave(&host->lock, flags);
  1268. while (1) {
  1269. set_current_state(TASK_UNINTERRUPTIBLE);
  1270. stop = abort ? atomic_read(abort) : 0;
  1271. if (stop || !host->claimed || host->claimer == current)
  1272. break;
  1273. spin_unlock_irqrestore(&host->lock, flags);
  1274. schedule();
  1275. spin_lock_irqsave(&host->lock, flags);
  1276. }
  1277. set_current_state(TASK_RUNNING);
  1278. if (!stop) {
  1279. host->claimed = 1;
  1280. host->claimer = current;
  1281. host->claim_cnt += 1;
  1282. } else
  1283. wake_up(&host->wq);
  1284. spin_unlock_irqrestore(&host->lock, flags);
  1285. remove_wait_queue(&host->wq, &wait);
  1286. if (host->ops->enable && !stop && host->claim_cnt == 1)
  1287. host->ops->enable(host);
  1288. return stop;
  1289. }
  1290. EXPORT_SYMBOL(__mmc_claim_host);
  1291. /**
  1292. * mmc_try_claim_host - try exclusively to claim a host
  1293. * @host: mmc host to claim
  1294. *
  1295. * Returns %1 if the host is claimed, %0 otherwise.
  1296. */
  1297. int mmc_try_claim_host(struct mmc_host *host)
  1298. {
  1299. int claimed_host = 0;
  1300. unsigned long flags;
  1301. spin_lock_irqsave(&host->lock, flags);
  1302. if (!host->claimed || host->claimer == current) {
  1303. host->claimed = 1;
  1304. host->claimer = current;
  1305. host->claim_cnt += 1;
  1306. claimed_host = 1;
  1307. }
  1308. spin_unlock_irqrestore(&host->lock, flags);
  1309. if (host->ops->enable && claimed_host && host->claim_cnt == 1)
  1310. host->ops->enable(host);
  1311. return claimed_host;
  1312. }
  1313. EXPORT_SYMBOL(mmc_try_claim_host);
  1314. /**
  1315. * mmc_release_host - release a host
  1316. * @host: mmc host to release
  1317. *
  1318. * Release a MMC host, allowing others to claim the host
  1319. * for their operations.
  1320. */
  1321. void mmc_release_host(struct mmc_host *host)
  1322. {
  1323. unsigned long flags;
  1324. WARN_ON(!host->claimed);
  1325. if (host->ops->disable && host->claim_cnt == 1)
  1326. host->ops->disable(host);
  1327. spin_lock_irqsave(&host->lock, flags);
  1328. if (--host->claim_cnt) {
  1329. /* Release for nested claim */
  1330. spin_unlock_irqrestore(&host->lock, flags);
  1331. } else {
  1332. host->claimed = 0;
  1333. host->claimer = NULL;
  1334. spin_unlock_irqrestore(&host->lock, flags);
  1335. wake_up(&host->wq);
  1336. }
  1337. }
  1338. EXPORT_SYMBOL(mmc_release_host);
  1339. /*
  1340. * Internal function that does the actual ios call to the host driver,
  1341. * optionally printing some debug output.
  1342. */
  1343. void mmc_set_ios(struct mmc_host *host)
  1344. {
  1345. struct mmc_ios *ios = &host->ios;
  1346. pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
  1347. "width %u timing %u\n",
  1348. mmc_hostname(host), ios->clock, ios->bus_mode,
  1349. ios->power_mode, ios->chip_select, ios->vdd,
  1350. ios->bus_width, ios->timing);
  1351. if (ios->clock > 0)
  1352. mmc_set_ungated(host);
  1353. host->ops->set_ios(host, ios);
  1354. if (ios->old_rate != ios->clock) {
  1355. if (likely(ios->clk_ts)) {
  1356. char trace_info[80];
  1357. snprintf(trace_info, 80,
  1358. "%s: freq_KHz %d --> %d | t = %d",
  1359. mmc_hostname(host), ios->old_rate / 1000,
  1360. ios->clock / 1000, jiffies_to_msecs(
  1361. (long)jiffies - (long)ios->clk_ts));
  1362. trace_mmc_clk(trace_info);
  1363. }
  1364. ios->old_rate = ios->clock;
  1365. ios->clk_ts = jiffies;
  1366. }
  1367. }
  1368. EXPORT_SYMBOL(mmc_set_ios);
  1369. /*
  1370. * Control chip select pin on a host.
  1371. */
  1372. void mmc_set_chip_select(struct mmc_host *host, int mode)
  1373. {
  1374. mmc_host_clk_hold(host);
  1375. host->ios.chip_select = mode;
  1376. mmc_set_ios(host);
  1377. mmc_host_clk_release(host);
  1378. }
  1379. /*
  1380. * Sets the host clock to the highest possible frequency that
  1381. * is below "hz".
  1382. */
  1383. static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
  1384. {
  1385. WARN_ON(hz < host->f_min);
  1386. if (hz > host->f_max)
  1387. hz = host->f_max;
  1388. host->ios.clock = hz;
  1389. mmc_set_ios(host);
  1390. }
  1391. void mmc_set_clock(struct mmc_host *host, unsigned int hz)
  1392. {
  1393. mmc_host_clk_hold(host);
  1394. __mmc_set_clock(host, hz);
  1395. mmc_host_clk_release(host);
  1396. }
  1397. #ifdef CONFIG_MMC_CLKGATE
  1398. /*
  1399. * This gates the clock by setting it to 0 Hz.
  1400. */
  1401. void mmc_gate_clock(struct mmc_host *host)
  1402. {
  1403. unsigned long flags;
  1404. WARN_ON(!host->ios.clock);
  1405. spin_lock_irqsave(&host->clk_lock, flags);
  1406. host->clk_old = host->ios.clock;
  1407. host->ios.clock = 0;
  1408. host->clk_gated = true;
  1409. spin_unlock_irqrestore(&host->clk_lock, flags);
  1410. mmc_set_ios(host);
  1411. }
  1412. /*
  1413. * This restores the clock from gating by using the cached
  1414. * clock value.
  1415. */
  1416. void mmc_ungate_clock(struct mmc_host *host)
  1417. {
  1418. /*
  1419. * We should previously have gated the clock, so the clock shall
  1420. * be 0 here! The clock may however be 0 during initialization,
  1421. * when some request operations are performed before setting
  1422. * the frequency. When ungate is requested in that situation
  1423. * we just ignore the call.
  1424. */
  1425. if (host->clk_old) {
  1426. WARN_ON(host->ios.clock);
  1427. /* This call will also set host->clk_gated to false */
  1428. __mmc_set_clock(host, host->clk_old);
  1429. }
  1430. }
  1431. void mmc_set_ungated(struct mmc_host *host)
  1432. {
  1433. unsigned long flags;
  1434. /*
  1435. * We've been given a new frequency while the clock is gated,
  1436. * so make sure we regard this as ungating it.
  1437. */
  1438. spin_lock_irqsave(&host->clk_lock, flags);
  1439. host->clk_gated = false;
  1440. spin_unlock_irqrestore(&host->clk_lock, flags);
  1441. }
  1442. #else
  1443. void mmc_set_ungated(struct mmc_host *host)
  1444. {
  1445. }
  1446. #endif
  1447. /*
  1448. * Change the bus mode (open drain/push-pull) of a host.
  1449. */
  1450. void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  1451. {
  1452. mmc_host_clk_hold(host);
  1453. host->ios.bus_mode = mode;
  1454. mmc_set_ios(host);
  1455. mmc_host_clk_release(host);
  1456. }
  1457. /*
  1458. * Change data bus width of a host.
  1459. */
  1460. void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
  1461. {
  1462. mmc_host_clk_hold(host);
  1463. host->ios.bus_width = width;
  1464. mmc_set_ios(host);
  1465. mmc_host_clk_release(host);
  1466. }
  1467. /**
  1468. * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
  1469. * @vdd: voltage (mV)
  1470. * @low_bits: prefer low bits in boundary cases
  1471. *
  1472. * This function returns the OCR bit number according to the provided @vdd
  1473. * value. If conversion is not possible a negative errno value returned.
  1474. *
  1475. * Depending on the @low_bits flag the function prefers low or high OCR bits
  1476. * on boundary voltages. For example,
  1477. * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
  1478. * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
  1479. *
  1480. * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
  1481. */
  1482. static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
  1483. {
  1484. const int max_bit = ilog2(MMC_VDD_35_36);
  1485. int bit;
  1486. if (vdd < 1650 || vdd > 3600)
  1487. return -EINVAL;
  1488. if (vdd >= 1650 && vdd <= 1950)
  1489. return ilog2(MMC_VDD_165_195);
  1490. if (low_bits)
  1491. vdd -= 1;
  1492. /* Base 2000 mV, step 100 mV, bit's base 8. */
  1493. bit = (vdd - 2000) / 100 + 8;
  1494. if (bit > max_bit)
  1495. return max_bit;
  1496. return bit;
  1497. }
  1498. /**
  1499. * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
  1500. * @vdd_min: minimum voltage value (mV)
  1501. * @vdd_max: maximum voltage value (mV)
  1502. *
  1503. * This function returns the OCR mask bits according to the provided @vdd_min
  1504. * and @vdd_max values. If conversion is not possible the function returns 0.
  1505. *
  1506. * Notes wrt boundary cases:
  1507. * This function sets the OCR bits for all boundary voltages, for example
  1508. * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
  1509. * MMC_VDD_34_35 mask.
  1510. */
  1511. u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
  1512. {
  1513. u32 mask = 0;
  1514. if (vdd_max < vdd_min)
  1515. return 0;
  1516. /* Prefer high bits for the boundary vdd_max values. */
  1517. vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
  1518. if (vdd_max < 0)
  1519. return 0;
  1520. /* Prefer low bits for the boundary vdd_min values. */
  1521. vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
  1522. if (vdd_min < 0)
  1523. return 0;
  1524. /* Fill the mask, from max bit to min bit. */
  1525. while (vdd_max >= vdd_min)
  1526. mask |= 1 << vdd_max--;
  1527. return mask;
  1528. }
  1529. EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
  1530. #ifdef CONFIG_REGULATOR
  1531. /**
  1532. * mmc_regulator_get_ocrmask - return mask of supported voltages
  1533. * @supply: regulator to use
  1534. *
  1535. * This returns either a negative errno, or a mask of voltages that
  1536. * can be provided to MMC/SD/SDIO devices using the specified voltage
  1537. * regulator. This would normally be called before registering the
  1538. * MMC host adapter.
  1539. */
  1540. int mmc_regulator_get_ocrmask(struct regulator *supply)
  1541. {
  1542. int result = 0;
  1543. int count;
  1544. int i;
  1545. count = regulator_count_voltages(supply);
  1546. if (count < 0)
  1547. return count;
  1548. for (i = 0; i < count; i++) {
  1549. int vdd_uV;
  1550. int vdd_mV;
  1551. vdd_uV = regulator_list_voltage(supply, i);
  1552. if (vdd_uV <= 0)
  1553. continue;
  1554. vdd_mV = vdd_uV / 1000;
  1555. result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
  1556. }
  1557. return result;
  1558. }
  1559. EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
  1560. /**
  1561. * mmc_regulator_set_ocr - set regulator to match host->ios voltage
  1562. * @mmc: the host to regulate
  1563. * @supply: regulator to use
  1564. * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
  1565. *
  1566. * Returns zero on success, else negative errno.
  1567. *
  1568. * MMC host drivers may use this to enable or disable a regulator using
  1569. * a particular supply voltage. This would normally be called from the
  1570. * set_ios() method.
  1571. */
  1572. int mmc_regulator_set_ocr(struct mmc_host *mmc,
  1573. struct regulator *supply,
  1574. unsigned short vdd_bit)
  1575. {
  1576. int result = 0;
  1577. int min_uV, max_uV;
  1578. if (vdd_bit) {
  1579. int tmp;
  1580. int voltage;
  1581. /* REVISIT mmc_vddrange_to_ocrmask() may have set some
  1582. * bits this regulator doesn't quite support ... don't
  1583. * be too picky, most cards and regulators are OK with
  1584. * a 0.1V range goof (it's a small error percentage).
  1585. */
  1586. tmp = vdd_bit - ilog2(MMC_VDD_165_195);
  1587. if (tmp == 0) {
  1588. min_uV = 1650 * 1000;
  1589. max_uV = 1950 * 1000;
  1590. } else {
  1591. min_uV = 1900 * 1000 + tmp * 100 * 1000;
  1592. max_uV = min_uV + 100 * 1000;
  1593. }
  1594. /* avoid needless changes to this voltage; the regulator
  1595. * might not allow this operation
  1596. */
  1597. voltage = regulator_get_voltage(supply);
  1598. if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
  1599. min_uV = max_uV = voltage;
  1600. if (voltage < 0)
  1601. result = voltage;
  1602. else if (voltage < min_uV || voltage > max_uV)
  1603. result = regulator_set_voltage(supply, min_uV, max_uV);
  1604. else
  1605. result = 0;
  1606. if (result == 0 && !mmc->regulator_enabled) {
  1607. result = regulator_enable(supply);
  1608. if (!result)
  1609. mmc->regulator_enabled = true;
  1610. }
  1611. } else if (mmc->regulator_enabled) {
  1612. result = regulator_disable(supply);
  1613. if (result == 0)
  1614. mmc->regulator_enabled = false;
  1615. }
  1616. if (result)
  1617. dev_err(mmc_dev(mmc),
  1618. "could not set regulator OCR (%d)\n", result);
  1619. return result;
  1620. }
  1621. EXPORT_SYMBOL(mmc_regulator_set_ocr);
  1622. #endif /* CONFIG_REGULATOR */
  1623. /*
  1624. * Mask off any voltages we don't support and select
  1625. * the lowest voltage
  1626. */
  1627. u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
  1628. {
  1629. int bit;
  1630. ocr &= host->ocr_avail;
  1631. bit = ffs(ocr);
  1632. if (bit) {
  1633. bit -= 1;
  1634. ocr &= 3 << bit;
  1635. mmc_host_clk_hold(host);
  1636. host->ios.vdd = bit;
  1637. mmc_set_ios(host);
  1638. mmc_host_clk_release(host);
  1639. } else {
  1640. pr_warning("%s: host doesn't support card's voltages\n",
  1641. mmc_hostname(host));
  1642. ocr = 0;
  1643. }
  1644. return ocr;
  1645. }
  1646. int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
  1647. {
  1648. struct mmc_command cmd = {0};
  1649. int err = 0;
  1650. BUG_ON(!host);
  1651. /*
  1652. * Send CMD11 only if the request is to switch the card to
  1653. * 1.8V signalling.
  1654. */
  1655. if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
  1656. cmd.opcode = SD_SWITCH_VOLTAGE;
  1657. cmd.arg = 0;
  1658. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  1659. err = mmc_wait_for_cmd(host, &cmd, 0);
  1660. if (err)
  1661. return err;
  1662. if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
  1663. return -EIO;
  1664. }
  1665. host->ios.signal_voltage = signal_voltage;
  1666. if (host->ops->start_signal_voltage_switch) {
  1667. mmc_host_clk_hold(host);
  1668. err = host->ops->start_signal_voltage_switch(host, &host->ios);
  1669. mmc_host_clk_release(host);
  1670. }
  1671. return err;
  1672. }
  1673. /*
  1674. * Select timing parameters for host.
  1675. */
  1676. void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  1677. {
  1678. mmc_host_clk_hold(host);
  1679. host->ios.timing = timing;
  1680. mmc_set_ios(host);
  1681. mmc_host_clk_release(host);
  1682. }
  1683. /*
  1684. * Select appropriate driver type for host.
  1685. */
  1686. void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
  1687. {
  1688. mmc_host_clk_hold(host);
  1689. host->ios.drv_type = drv_type;
  1690. mmc_set_ios(host);
  1691. mmc_host_clk_release(host);
  1692. }
  1693. /*
  1694. * Apply power to the MMC stack. This is a two-stage process.
  1695. * First, we enable power to the card without the clock running.
  1696. * We then wait a bit for the power to stabilise. Finally,
  1697. * enable the bus drivers and clock to the card.
  1698. *
  1699. * We must _NOT_ enable the clock prior to power stablising.
  1700. *
  1701. * If a host does all the power sequencing itself, ignore the
  1702. * initial MMC_POWER_UP stage.
  1703. */
  1704. void mmc_power_up(struct mmc_host *host)
  1705. {
  1706. int bit;
  1707. mmc_host_clk_hold(host);
  1708. /* If ocr is set, we use it */
  1709. if (host->ocr)
  1710. bit = ffs(host->ocr) - 1;
  1711. else
  1712. bit = fls(host->ocr_avail) - 1;
  1713. host->ios.vdd = bit;
  1714. if (mmc_host_is_spi(host))
  1715. host->ios.chip_select = MMC_CS_HIGH;
  1716. else {
  1717. host->ios.chip_select = MMC_CS_DONTCARE;
  1718. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  1719. }
  1720. host->ios.power_mode = MMC_POWER_UP;
  1721. host->ios.bus_width = MMC_BUS_WIDTH_1;
  1722. host->ios.timing = MMC_TIMING_LEGACY;
  1723. mmc_set_ios(host);
  1724. /*
  1725. * This delay should be sufficient to allow the power supply
  1726. * to reach the minimum voltage.
  1727. */
  1728. mmc_delay(10);
  1729. host->ios.clock = host->f_init;
  1730. host->ios.power_mode = MMC_POWER_ON;
  1731. mmc_set_ios(host);
  1732. /*
  1733. * This delay must be at least 74 clock sizes, or 1 ms, or the
  1734. * time required to reach a stable voltage.
  1735. */
  1736. mmc_delay(10);
  1737. mmc_host_clk_release(host);
  1738. }
  1739. void mmc_power_off(struct mmc_host *host)
  1740. {
  1741. mmc_host_clk_hold(host);
  1742. host->ios.clock = 0;
  1743. host->ios.vdd = 0;
  1744. /*
  1745. * Reset ocr mask to be the highest possible voltage supported for
  1746. * this mmc host. This value will be used at next power up.
  1747. */
  1748. host->ocr = 1 << (fls(host->ocr_avail) - 1);
  1749. if (!mmc_host_is_spi(host)) {
  1750. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  1751. host->ios.chip_select = MMC_CS_DONTCARE;
  1752. }
  1753. host->ios.power_mode = MMC_POWER_OFF;
  1754. host->ios.bus_width = MMC_BUS_WIDTH_1;
  1755. host->ios.timing = MMC_TIMING_LEGACY;
  1756. mmc_set_ios(host);
  1757. /*
  1758. * Some configurations, such as the 802.11 SDIO card in the OLPC
  1759. * XO-1.5, require a short delay after poweroff before the card
  1760. * can be successfully turned on again.
  1761. */
  1762. mmc_delay(1);
  1763. mmc_host_clk_release(host);
  1764. }
  1765. void mmc_power_cycle(struct mmc_host *host)
  1766. {
  1767. mmc_power_off(host);
  1768. /* Wait at least 1 ms according to SD spec */
  1769. mmc_delay(1);
  1770. mmc_power_up(host);
  1771. }
  1772. /*
  1773. * Cleanup when the last reference to the bus operator is dropped.
  1774. */
  1775. static void __mmc_release_bus(struct mmc_host *host)
  1776. {
  1777. BUG_ON(!host);
  1778. BUG_ON(host->bus_refs);
  1779. BUG_ON(!host->bus_dead);
  1780. host->bus_ops = NULL;
  1781. }
  1782. /*
  1783. * Increase reference count of bus operator
  1784. */
  1785. static inline void mmc_bus_get(struct mmc_host *host)
  1786. {
  1787. unsigned long flags;
  1788. spin_lock_irqsave(&host->lock, flags);
  1789. host->bus_refs++;
  1790. spin_unlock_irqrestore(&host->lock, flags);
  1791. }
  1792. /*
  1793. * Decrease reference count of bus operator and free it if
  1794. * it is the last reference.
  1795. */
  1796. static inline void mmc_bus_put(struct mmc_host *host)
  1797. {
  1798. unsigned long flags;
  1799. spin_lock_irqsave(&host->lock, flags);
  1800. host->bus_refs--;
  1801. if ((host->bus_refs == 0) && host->bus_ops)
  1802. __mmc_release_bus(host);
  1803. spin_unlock_irqrestore(&host->lock, flags);
  1804. }
  1805. int mmc_resume_bus(struct mmc_host *host)
  1806. {
  1807. unsigned long flags;
  1808. if (!mmc_bus_needs_resume(host))
  1809. return -EINVAL;
  1810. printk("%s: Starting deferred resume\n", mmc_hostname(host));
  1811. spin_lock_irqsave(&host->lock, flags);
  1812. host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
  1813. host->rescan_disable = 0;
  1814. spin_unlock_irqrestore(&host->lock, flags);
  1815. mmc_bus_get(host);
  1816. if (host->bus_ops && !host->bus_dead) {
  1817. mmc_power_up(host);
  1818. BUG_ON(!host->bus_ops->resume);
  1819. host->bus_ops->resume(host);
  1820. }
  1821. mmc_bus_put(host);
  1822. printk("%s: Deferred resume completed\n", mmc_hostname(host));
  1823. return 0;
  1824. }
  1825. EXPORT_SYMBOL(mmc_resume_bus);
  1826. /*
  1827. * Assign a mmc bus handler to a host. Only one bus handler may control a
  1828. * host at any given time.
  1829. */
  1830. void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
  1831. {
  1832. unsigned long flags;
  1833. BUG_ON(!host);
  1834. BUG_ON(!ops);
  1835. WARN_ON(!host->claimed);
  1836. spin_lock_irqsave(&host->lock, flags);
  1837. BUG_ON(host->bus_ops);
  1838. BUG_ON(host->bus_refs);
  1839. host->bus_ops = ops;
  1840. host->bus_refs = 1;
  1841. host->bus_dead = 0;
  1842. spin_unlock_irqrestore(&host->lock, flags);
  1843. }
  1844. /*
  1845. * Remove the current bus handler from a host.
  1846. */
  1847. void mmc_detach_bus(struct mmc_host *host)
  1848. {
  1849. unsigned long flags;
  1850. BUG_ON(!host);
  1851. WARN_ON(!host->claimed);
  1852. WARN_ON(!host->bus_ops);
  1853. spin_lock_irqsave(&host->lock, flags);
  1854. host->bus_dead = 1;
  1855. spin_unlock_irqrestore(&host->lock, flags);
  1856. mmc_bus_put(host);
  1857. }
  1858. /**
  1859. * mmc_detect_change - process change of state on a MMC socket
  1860. * @host: host which changed state.
  1861. * @delay: optional delay to wait before detection (jiffies)
  1862. *
  1863. * MMC drivers should call this when they detect a card has been
  1864. * inserted or removed. The MMC layer will confirm that any
  1865. * present card is still functional, and initialize any newly
  1866. * inserted.
  1867. */
  1868. void mmc_detect_change(struct mmc_host *host, unsigned long delay)
  1869. {
  1870. #ifdef CONFIG_MMC_DEBUG
  1871. unsigned long flags;
  1872. spin_lock_irqsave(&host->lock, flags);
  1873. WARN_ON(host->removed);
  1874. spin_unlock_irqrestore(&host->lock, flags);
  1875. #endif
  1876. host->detect_change = 1;
  1877. mmc_schedule_delayed_work(&host->detect, delay);
  1878. }
  1879. EXPORT_SYMBOL(mmc_detect_change);
  1880. void mmc_init_erase(struct mmc_card *card)
  1881. {
  1882. unsigned int sz;
  1883. if (is_power_of_2(card->erase_size))
  1884. card->erase_shift = ffs(card->erase_size) - 1;
  1885. else
  1886. card->erase_shift = 0;
  1887. /*
  1888. * It is possible to erase an arbitrarily large area of an SD or MMC
  1889. * card. That is not desirable because it can take a long time
  1890. * (minutes) potentially delaying more important I/O, and also the
  1891. * timeout calculations become increasingly hugely over-estimated.
  1892. * Consequently, 'pref_erase' is defined as a guide to limit erases
  1893. * to that size and alignment.
  1894. *
  1895. * For SD cards that define Allocation Unit size, limit erases to one
  1896. * Allocation Unit at a time. For MMC cards that define High Capacity
  1897. * Erase Size, whether it is switched on or not, limit to that size.
  1898. * Otherwise just have a stab at a good value. For modern cards it
  1899. * will end up being 4MiB. Note that if the value is too small, it
  1900. * can end up taking longer to erase.
  1901. */
  1902. if (mmc_card_sd(card) && card->ssr.au) {
  1903. card->pref_erase = card->ssr.au;
  1904. card->erase_shift = ffs(card->ssr.au) - 1;
  1905. } else if (card->ext_csd.hc_erase_size) {
  1906. card->pref_erase = card->ext_csd.hc_erase_size;
  1907. } else {
  1908. sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
  1909. if (sz < 128)
  1910. card->pref_erase = 512 * 1024 / 512;
  1911. else if (sz < 512)
  1912. card->pref_erase = 1024 * 1024 / 512;
  1913. else if (sz < 1024)
  1914. card->pref_erase = 2 * 1024 * 1024 / 512;
  1915. else
  1916. card->pref_erase = 4 * 1024 * 1024 / 512;
  1917. if (card->pref_erase < card->erase_size)
  1918. card->pref_erase = card->erase_size;
  1919. else {
  1920. sz = card->pref_erase % card->erase_size;
  1921. if (sz)
  1922. card->pref_erase += card->erase_size - sz;
  1923. }
  1924. }
  1925. }
  1926. static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
  1927. unsigned int arg, unsigned int qty)
  1928. {
  1929. unsigned int erase_timeout;
  1930. if (arg == MMC_DISCARD_ARG ||
  1931. (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
  1932. erase_timeout = card->ext_csd.trim_timeout;
  1933. } else if (card->ext_csd.erase_group_def & 1) {
  1934. /* High Capacity Erase Group Size uses HC timeouts */
  1935. if (arg == MMC_TRIM_ARG)
  1936. erase_timeout = card->ext_csd.trim_timeout;
  1937. else
  1938. erase_timeout = card->ext_csd.hc_erase_timeout;
  1939. } else {
  1940. /* CSD Erase Group Size uses write timeout */
  1941. unsigned int mult = (10 << card->csd.r2w_factor);
  1942. unsigned int timeout_clks = card->csd.tacc_clks * mult;
  1943. unsigned int timeout_us;
  1944. /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
  1945. if (card->csd.tacc_ns < 1000000)
  1946. timeout_us = (card->csd.tacc_ns * mult) / 1000;
  1947. else
  1948. timeout_us = (card->csd.tacc_ns / 1000) * mult;
  1949. /*
  1950. * ios.clock is only a target. The real clock rate might be
  1951. * less but not that much less, so fudge it by multiplying by 2.
  1952. */
  1953. timeout_clks <<= 1;
  1954. timeout_us += (timeout_clks * 1000) /
  1955. (mmc_host_clk_rate(card->host) / 1000);
  1956. erase_timeout = timeout_us / 1000;
  1957. /*
  1958. * Theoretically, the calculation could underflow so round up
  1959. * to 1ms in that case.
  1960. */
  1961. if (!erase_timeout)
  1962. erase_timeout = 1;
  1963. }
  1964. /* Multiplier for secure operations */
  1965. if (arg & MMC_SECURE_ARGS) {
  1966. if (arg == MMC_SECURE_ERASE_ARG)
  1967. erase_timeout *= card->ext_csd.sec_erase_mult;
  1968. else
  1969. erase_timeout *= card->ext_csd.sec_trim_mult;
  1970. }
  1971. erase_timeout *= qty;
  1972. /*
  1973. * Ensure at least a 1 second timeout for SPI as per
  1974. * 'mmc_set_data_timeout()'
  1975. */
  1976. if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
  1977. erase_timeout = 1000;
  1978. return erase_timeout;
  1979. }
  1980. static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
  1981. unsigned int arg,
  1982. unsigned int qty)
  1983. {
  1984. unsigned int erase_timeout;
  1985. if (card->ssr.erase_timeout) {
  1986. /* Erase timeout specified in SD Status Register (SSR) */
  1987. erase_timeout = card->ssr.erase_timeout * qty +
  1988. card->ssr.erase_offset;
  1989. } else {
  1990. /*
  1991. * Erase timeout not specified in SD Status Register (SSR) so
  1992. * use 250ms per write block.
  1993. */
  1994. erase_timeout = 250 * qty;
  1995. }
  1996. /* Must not be less than 1 second */
  1997. if (erase_timeout < 1000)
  1998. erase_timeout = 1000;
  1999. return erase_timeout;
  2000. }
  2001. static unsigned int mmc_erase_timeout(struct mmc_card *card,
  2002. unsigned int arg,
  2003. unsigned int qty)
  2004. {
  2005. if (mmc_card_sd(card))
  2006. return mmc_sd_erase_timeout(card, arg, qty);
  2007. else
  2008. return mmc_mmc_erase_timeout(card, arg, qty);
  2009. }
  2010. #define UNSTUFF_BITS(resp, start, size) \
  2011. ({ \
  2012. const int __size = size; \
  2013. const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1;\
  2014. const int __off = 3 - ((start) / 32); \
  2015. const int __shft = (start) & 31; \
  2016. u32 __res; \
  2017. \
  2018. __res = resp[__off] >> __shft; \
  2019. if (__size + __shft > 32) \
  2020. __res |= resp[__off-1] << ((32 - __shft) % 32); \
  2021. __res & __mask; \
  2022. })
  2023. static int mmc_do_erase(struct mmc_card *card, unsigned int from,
  2024. unsigned int to, unsigned int arg)
  2025. {
  2026. struct mmc_command cmd = {0};
  2027. unsigned int qty = 0;
  2028. unsigned long timeout;
  2029. unsigned int fr, nr;
  2030. int err;
  2031. u32 *resp = card->raw_csd;
  2032. fr = from;
  2033. nr = to - from + 1;
  2034. trace_mmc_blk_erase_start(arg, fr, nr);
  2035. /* For WriteProtection */
  2036. if (UNSTUFF_BITS(resp, 12, 2)) {
  2037. printk(KERN_ERR "eMMC set Write Protection mode, Can't be written or erased.");
  2038. err = -EIO;
  2039. goto out;
  2040. }
  2041. /*
  2042. * qty is used to calculate the erase timeout which depends on how many
  2043. * erase groups (or allocation units in SD terminology) are affected.
  2044. * We count erasing part of an erase group as one erase group.
  2045. * For SD, the allocation units are always a power of 2. For MMC, the
  2046. * erase group size is almost certainly also power of 2, but it does not
  2047. * seem to insist on that in the JEDEC standard, so we fall back to
  2048. * division in that case. SD may not specify an allocation unit size,
  2049. * in which case the timeout is based on the number of write blocks.
  2050. *
  2051. * Note that the timeout for secure trim 2 will only be correct if the
  2052. * number of erase groups specified is the same as the total of all
  2053. * preceding secure trim 1 commands. Since the power may have been
  2054. * lost since the secure trim 1 commands occurred, it is generally
  2055. * impossible to calculate the secure trim 2 timeout correctly.
  2056. */
  2057. if (card->erase_shift)
  2058. qty += ((to >> card->erase_shift) -
  2059. (from >> card->erase_shift)) + 1;
  2060. else if (mmc_card_sd(card))
  2061. qty += to - from + 1;
  2062. else
  2063. qty += ((to / card->erase_size) -
  2064. (from / card->erase_size)) + 1;
  2065. if (!mmc_card_blockaddr(card)) {
  2066. from <<= 9;
  2067. to <<= 9;
  2068. }
  2069. if (mmc_card_sd(card))
  2070. cmd.opcode = SD_ERASE_WR_BLK_START;
  2071. else
  2072. cmd.opcode = MMC_ERASE_GROUP_START;
  2073. cmd.arg = from;
  2074. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  2075. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2076. if (err) {
  2077. pr_err("mmc_erase: group start error %d, "
  2078. "status %#x\n", err, cmd.resp[0]);
  2079. err = -EIO;
  2080. goto out;
  2081. }
  2082. memset(&cmd, 0, sizeof(struct mmc_command));
  2083. if (mmc_card_sd(card))
  2084. cmd.opcode = SD_ERASE_WR_BLK_END;
  2085. else
  2086. cmd.opcode = MMC_ERASE_GROUP_END;
  2087. cmd.arg = to;
  2088. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  2089. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2090. if (err) {
  2091. pr_err("mmc_erase: group end error %d, status %#x\n",
  2092. err, cmd.resp[0]);
  2093. err = -EIO;
  2094. goto out;
  2095. }
  2096. memset(&cmd, 0, sizeof(struct mmc_command));
  2097. cmd.opcode = MMC_ERASE;
  2098. cmd.arg = arg;
  2099. cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  2100. cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
  2101. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2102. if (err) {
  2103. pr_err("mmc_erase: erase error %d, status %#x\n",
  2104. err, cmd.resp[0]);
  2105. err = -EIO;
  2106. goto out;
  2107. }
  2108. if (mmc_host_is_spi(card->host))
  2109. goto out;
  2110. timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
  2111. do {
  2112. memset(&cmd, 0, sizeof(struct mmc_command));
  2113. cmd.opcode = MMC_SEND_STATUS;
  2114. cmd.arg = card->rca << 16;
  2115. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  2116. /* Do not retry else we can't see errors */
  2117. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2118. if (err || (cmd.resp[0] & 0xFDF92000)) {
  2119. pr_err("error %d requesting status %#x\n",
  2120. err, cmd.resp[0]);
  2121. err = -EIO;
  2122. goto out;
  2123. }
  2124. /* Timeout if the device never becomes ready for data and
  2125. * never leaves the program state.
  2126. */
  2127. if (time_after(jiffies, timeout)) {
  2128. pr_err("%s: Card stuck in programming state! %s\n",
  2129. mmc_hostname(card->host), __func__);
  2130. err = -EIO;
  2131. goto out;
  2132. }
  2133. if (cmd.resp[0] & R1_WP_ERASE_SKIP) {
  2134. printk(KERN_ERR "error %d requesting status %#x (R1_WP_ERASE_SKIP)\n",
  2135. err, cmd.resp[0]);
  2136. err = -EIO;
  2137. goto out;
  2138. }
  2139. } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
  2140. (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
  2141. out:
  2142. trace_mmc_blk_erase_end(arg, fr, nr);
  2143. return err;
  2144. }
  2145. /**
  2146. * mmc_erase - erase sectors.
  2147. * @card: card to erase
  2148. * @from: first sector to erase
  2149. * @nr: number of sectors to erase
  2150. * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
  2151. *
  2152. * Caller must claim host before calling this function.
  2153. */
  2154. int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
  2155. unsigned int arg)
  2156. {
  2157. unsigned int rem, to = from + nr;
  2158. if (!(card->host->caps & MMC_CAP_ERASE) ||
  2159. !(card->csd.cmdclass & CCC_ERASE))
  2160. return -EOPNOTSUPP;
  2161. if (!card->erase_size)
  2162. return -EOPNOTSUPP;
  2163. if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
  2164. return -EOPNOTSUPP;
  2165. if ((arg & MMC_SECURE_ARGS) &&
  2166. !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
  2167. return -EOPNOTSUPP;
  2168. if ((arg & MMC_TRIM_ARGS) &&
  2169. !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
  2170. return -EOPNOTSUPP;
  2171. if (arg == MMC_SECURE_ERASE_ARG) {
  2172. if (from % card->erase_size || nr % card->erase_size)
  2173. return -EINVAL;
  2174. }
  2175. if (arg == MMC_ERASE_ARG) {
  2176. rem = from % card->erase_size;
  2177. if (rem) {
  2178. rem = card->erase_size - rem;
  2179. from += rem;
  2180. if (nr > rem)
  2181. nr -= rem;
  2182. else
  2183. return 0;
  2184. }
  2185. rem = nr % card->erase_size;
  2186. if (rem)
  2187. nr -= rem;
  2188. }
  2189. if (nr == 0)
  2190. return 0;
  2191. to = from + nr;
  2192. if (to <= from)
  2193. return -EINVAL;
  2194. /* to set the address in 16k (32sectors) */
  2195. if(arg == MMC_TRIM_ARG) {
  2196. if ((from % 32) != 0)
  2197. from = ((from >> 5) + 1) << 5;
  2198. to = (to >> 5) << 5;
  2199. if (from >= to)
  2200. return 0;
  2201. }
  2202. /* 'from' and 'to' are inclusive */
  2203. to -= 1;
  2204. return mmc_do_erase(card, from, to, arg);
  2205. }
  2206. EXPORT_SYMBOL(mmc_erase);
  2207. int mmc_can_erase(struct mmc_card *card)
  2208. {
  2209. if ((card->host->caps & MMC_CAP_ERASE) &&
  2210. (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
  2211. return 1;
  2212. return 0;
  2213. }
  2214. EXPORT_SYMBOL(mmc_can_erase);
  2215. int mmc_can_trim(struct mmc_card *card)
  2216. {
  2217. if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
  2218. return 1;
  2219. return 0;
  2220. }
  2221. EXPORT_SYMBOL(mmc_can_trim);
  2222. int mmc_can_discard(struct mmc_card *card)
  2223. {
  2224. /*
  2225. * As there's no way to detect the discard support bit at v4.5
  2226. * use the s/w feature support filed.
  2227. */
  2228. if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
  2229. return 1;
  2230. return 0;
  2231. }
  2232. EXPORT_SYMBOL(mmc_can_discard);
  2233. int mmc_can_sanitize(struct mmc_card *card)
  2234. {
  2235. if (!mmc_can_trim(card) && !mmc_can_erase(card))
  2236. return 0;
  2237. if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
  2238. && (card->host->caps2 & MMC_CAP2_SANITIZE))
  2239. return 1;
  2240. return 0;
  2241. }
  2242. EXPORT_SYMBOL(mmc_can_sanitize);
  2243. int mmc_can_secure_erase_trim(struct mmc_card *card)
  2244. {
  2245. if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
  2246. return 1;
  2247. return 0;
  2248. }
  2249. EXPORT_SYMBOL(mmc_can_secure_erase_trim);
  2250. int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
  2251. unsigned int nr)
  2252. {
  2253. if (!card->erase_size)
  2254. return 0;
  2255. if (from % card->erase_size || nr % card->erase_size)
  2256. return 0;
  2257. return 1;
  2258. }
  2259. EXPORT_SYMBOL(mmc_erase_group_aligned);
  2260. static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
  2261. unsigned int arg)
  2262. {
  2263. struct mmc_host *host = card->host;
  2264. unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
  2265. unsigned int last_timeout = 0;
  2266. if (card->erase_shift)
  2267. max_qty = UINT_MAX >> card->erase_shift;
  2268. else if (mmc_card_sd(card))
  2269. max_qty = UINT_MAX;
  2270. else
  2271. max_qty = UINT_MAX / card->erase_size;
  2272. /* Find the largest qty with an OK timeout */
  2273. do {
  2274. y = 0;
  2275. for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
  2276. timeout = mmc_erase_timeout(card, arg, qty + x);
  2277. if (timeout > host->max_discard_to)
  2278. break;
  2279. if (timeout < last_timeout)
  2280. break;
  2281. last_timeout = timeout;
  2282. y = x;
  2283. }
  2284. qty += y;
  2285. } while (y);
  2286. if (!qty)
  2287. return 0;
  2288. if (qty == 1)
  2289. return 1;
  2290. /* Convert qty to sectors */
  2291. if (card->erase_shift)
  2292. max_discard = --qty << card->erase_shift;
  2293. else if (mmc_card_sd(card))
  2294. max_discard = qty;
  2295. else
  2296. max_discard = --qty * card->erase_size;
  2297. return max_discard;
  2298. }
  2299. unsigned int mmc_calc_max_discard(struct mmc_card *card)
  2300. {
  2301. struct mmc_host *host = card->host;
  2302. unsigned int max_discard, max_trim;
  2303. if (!host->max_discard_to)
  2304. return UINT_MAX;
  2305. /*
  2306. * Without erase_group_def set, MMC erase timeout depends on clock
  2307. * frequence which can change. In that case, the best choice is
  2308. * just the preferred erase size.
  2309. */
  2310. if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
  2311. return card->pref_erase;
  2312. max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
  2313. if (mmc_can_trim(card)) {
  2314. max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
  2315. if (max_trim < max_discard)
  2316. max_discard = max_trim;
  2317. } else if (max_discard < card->erase_size) {
  2318. max_discard = 0;
  2319. }
  2320. pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
  2321. mmc_hostname(host), max_discard, host->max_discard_to);
  2322. return max_discard;
  2323. }
  2324. EXPORT_SYMBOL(mmc_calc_max_discard);
  2325. int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
  2326. {
  2327. struct mmc_command cmd = {0};
  2328. if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
  2329. return 0;
  2330. cmd.opcode = MMC_SET_BLOCKLEN;
  2331. cmd.arg = blocklen;
  2332. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  2333. return mmc_wait_for_cmd(card->host, &cmd, 5);
  2334. }
  2335. EXPORT_SYMBOL(mmc_set_blocklen);
  2336. int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
  2337. bool is_rel_write)
  2338. {
  2339. struct mmc_command cmd = {0};
  2340. cmd.opcode = MMC_SET_BLOCK_COUNT;
  2341. cmd.arg = blockcount & 0x0000FFFF;
  2342. if (is_rel_write)
  2343. cmd.arg |= 1 << 31;
  2344. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  2345. return mmc_wait_for_cmd(card->host, &cmd, 5);
  2346. }
  2347. EXPORT_SYMBOL(mmc_set_blockcount);
  2348. static void mmc_hw_reset_for_init(struct mmc_host *host)
  2349. {
  2350. if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
  2351. return;
  2352. mmc_host_clk_hold(host);
  2353. host->ops->hw_reset(host);
  2354. mmc_host_clk_release(host);
  2355. }
  2356. int mmc_can_reset(struct mmc_card *card)
  2357. {
  2358. u8 rst_n_function;
  2359. if (mmc_card_sdio(card))
  2360. return 0;
  2361. if (mmc_card_mmc(card) && (card->host->caps & MMC_CAP_HW_RESET)) {
  2362. rst_n_function = card->ext_csd.rst_n_function;
  2363. if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) !=
  2364. EXT_CSD_RST_N_ENABLED)
  2365. return 0;
  2366. }
  2367. return 1;
  2368. }
  2369. EXPORT_SYMBOL(mmc_can_reset);
  2370. static int mmc_do_hw_reset(struct mmc_host *host, int check)
  2371. {
  2372. struct mmc_card *card = host->card;
  2373. if (!host->bus_ops->power_restore)
  2374. return -EOPNOTSUPP;
  2375. if (!card)
  2376. return -EINVAL;
  2377. if (!mmc_can_reset(card))
  2378. return -EOPNOTSUPP;
  2379. mmc_host_clk_hold(host);
  2380. mmc_set_clock(host, host->f_init);
  2381. if (mmc_card_mmc(card) && host->ops->hw_reset)
  2382. host->ops->hw_reset(host);
  2383. else
  2384. mmc_power_cycle(host);
  2385. /* If the reset has happened, then a status command will fail */
  2386. if (check) {
  2387. struct mmc_command cmd = {0};
  2388. int err;
  2389. cmd.opcode = MMC_SEND_STATUS;
  2390. if (!mmc_host_is_spi(card->host))
  2391. cmd.arg = card->rca << 16;
  2392. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  2393. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2394. if (!err) {
  2395. mmc_host_clk_release(host);
  2396. return -ENOSYS;
  2397. }
  2398. }
  2399. host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
  2400. if (mmc_host_is_spi(host)) {
  2401. host->ios.chip_select = MMC_CS_HIGH;
  2402. host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
  2403. } else {
  2404. host->ios.chip_select = MMC_CS_DONTCARE;
  2405. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  2406. }
  2407. host->ios.bus_width = MMC_BUS_WIDTH_1;
  2408. host->ios.timing = MMC_TIMING_LEGACY;
  2409. mmc_set_ios(host);
  2410. mmc_host_clk_release(host);
  2411. return host->bus_ops->power_restore(host);
  2412. }
  2413. int mmc_hw_reset(struct mmc_host *host)
  2414. {
  2415. return mmc_do_hw_reset(host, 0);
  2416. }
  2417. EXPORT_SYMBOL(mmc_hw_reset);
  2418. int mmc_hw_reset_check(struct mmc_host *host)
  2419. {
  2420. return mmc_do_hw_reset(host, 1);
  2421. }
  2422. EXPORT_SYMBOL(mmc_hw_reset_check);
  2423. /**
  2424. * mmc_reset_clk_scale_stats() - reset clock scaling statistics
  2425. * @host: pointer to mmc host structure
  2426. */
  2427. void mmc_reset_clk_scale_stats(struct mmc_host *host)
  2428. {
  2429. host->clk_scaling.busy_time_us = 0;
  2430. host->clk_scaling.window_time = jiffies;
  2431. }
  2432. EXPORT_SYMBOL_GPL(mmc_reset_clk_scale_stats);
  2433. /**
  2434. * mmc_get_max_frequency() - get max. frequency supported
  2435. * @host: pointer to mmc host structure
  2436. *
  2437. * Returns max. frequency supported by card/host. If the
  2438. * timing mode is SDR50/SDR104/HS200/DDR50 return appropriate
  2439. * max. frequency in these modes else, use the current frequency.
  2440. * Also, allow host drivers to overwrite the frequency in case
  2441. * they support "get_max_frequency" host ops.
  2442. */
  2443. unsigned long mmc_get_max_frequency(struct mmc_host *host)
  2444. {
  2445. unsigned long freq;
  2446. unsigned char timing;
  2447. if (host->ops && host->ops->get_max_frequency) {
  2448. freq = host->ops->get_max_frequency(host);
  2449. goto out;
  2450. }
  2451. if (mmc_card_hs400(host->card))
  2452. timing = MMC_TIMING_MMC_HS400;
  2453. else
  2454. timing = host->ios.timing;
  2455. switch (timing) {
  2456. case MMC_TIMING_UHS_SDR50:
  2457. freq = UHS_SDR50_MAX_DTR;
  2458. break;
  2459. case MMC_TIMING_UHS_SDR104:
  2460. freq = UHS_SDR104_MAX_DTR;
  2461. break;
  2462. case MMC_TIMING_MMC_HS200:
  2463. freq = MMC_HS200_MAX_DTR;
  2464. break;
  2465. case MMC_TIMING_UHS_DDR50:
  2466. freq = UHS_DDR50_MAX_DTR;
  2467. break;
  2468. case MMC_TIMING_MMC_HS400:
  2469. freq = MMC_HS400_MAX_DTR;
  2470. break;
  2471. default:
  2472. mmc_host_clk_hold(host);
  2473. freq = host->ios.clock;
  2474. mmc_host_clk_release(host);
  2475. break;
  2476. }
  2477. out:
  2478. return freq;
  2479. }
  2480. EXPORT_SYMBOL_GPL(mmc_get_max_frequency);
  2481. /**
  2482. * mmc_get_min_frequency() - get min. frequency supported
  2483. * @host: pointer to mmc host structure
  2484. *
  2485. * Returns min. frequency supported by card/host which doesn't impair
  2486. * performance for most usecases. If the timing mode is SDR50/SDR104/HS200
  2487. * return 50MHz value. If timing mode is DDR50 return 25MHz so that
  2488. * throughput would be equivalent to SDR50/SDR104 in 50MHz. Also, allow
  2489. * host drivers to overwrite the frequency in case they support
  2490. * "get_min_frequency" host ops.
  2491. */
  2492. static unsigned long mmc_get_min_frequency(struct mmc_host *host)
  2493. {
  2494. unsigned long freq;
  2495. if (host->ops && host->ops->get_min_frequency) {
  2496. freq = host->ops->get_min_frequency(host);
  2497. goto out;
  2498. }
  2499. switch (host->ios.timing) {
  2500. case MMC_TIMING_UHS_SDR50:
  2501. case MMC_TIMING_UHS_SDR104:
  2502. freq = UHS_SDR25_MAX_DTR;
  2503. break;
  2504. case MMC_TIMING_MMC_HS200:
  2505. freq = MMC_HIGH_52_MAX_DTR;
  2506. break;
  2507. case MMC_TIMING_MMC_HS400:
  2508. freq = MMC_HIGH_52_MAX_DTR;
  2509. break;
  2510. case MMC_TIMING_UHS_DDR50:
  2511. freq = UHS_DDR50_MAX_DTR / 2;
  2512. break;
  2513. default:
  2514. mmc_host_clk_hold(host);
  2515. freq = host->ios.clock;
  2516. mmc_host_clk_release(host);
  2517. break;
  2518. }
  2519. out:
  2520. return freq;
  2521. }
  2522. /*
  2523. * Scale down clocks to minimum frequency supported.
  2524. * The delayed work re-arms itself in case it cannot
  2525. * claim the host.
  2526. */
  2527. static void mmc_clk_scale_work(struct work_struct *work)
  2528. {
  2529. struct mmc_host *host = container_of(work, struct mmc_host,
  2530. clk_scaling.work.work);
  2531. if (!host->card || !host->bus_ops ||
  2532. !host->bus_ops->change_bus_speed ||
  2533. !host->clk_scaling.enable || !host->ios.clock)
  2534. return;
  2535. mmc_rpm_hold(host, &host->card->dev);
  2536. if (!mmc_try_claim_host(host)) {
  2537. /* retry after a timer tick */
  2538. queue_delayed_work(system_nrt_wq, &host->clk_scaling.work, 1);
  2539. goto out;
  2540. }
  2541. mmc_clk_scaling(host, true);
  2542. mmc_release_host(host);
  2543. out:
  2544. mmc_rpm_release(host, &host->card->dev);
  2545. return;
  2546. }
  2547. static bool mmc_is_vaild_state_for_clk_scaling(struct mmc_host *host,
  2548. enum mmc_load state)
  2549. {
  2550. struct mmc_card *card = host->card;
  2551. u32 status;
  2552. bool ret = false;
  2553. /*
  2554. * If the current partition type is RPMB, clock switching may not
  2555. * work properly as sending tuning command (CMD21) is illegal in
  2556. * this mode.
  2557. * In case invalid_state is set, we forbid clock scaling, unless,
  2558. * its down-scale and "scale_down_in_low_wr_load" is set.
  2559. */
  2560. if (!card || (mmc_card_mmc(card) &&
  2561. card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) ||
  2562. (host->clk_scaling.invalid_state &&
  2563. !(state == MMC_LOAD_LOW &&
  2564. host->clk_scaling.scale_down_in_low_wr_load)))
  2565. goto out;
  2566. if (mmc_send_status(card, &status)) {
  2567. pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
  2568. goto out;
  2569. }
  2570. switch (R1_CURRENT_STATE(status)) {
  2571. case R1_STATE_TRAN:
  2572. ret = true;
  2573. break;
  2574. default:
  2575. break;
  2576. }
  2577. out:
  2578. return ret;
  2579. }
  2580. static int mmc_clk_update_freq(struct mmc_host *host,
  2581. unsigned long freq, enum mmc_load state)
  2582. {
  2583. int err = 0;
  2584. if (host->ops->notify_load) {
  2585. err = host->ops->notify_load(host, state);
  2586. if (err)
  2587. goto out;
  2588. }
  2589. if (freq != host->clk_scaling.curr_freq) {
  2590. if (!mmc_is_vaild_state_for_clk_scaling(host, state)) {
  2591. err = -EAGAIN;
  2592. goto error;
  2593. }
  2594. err = host->bus_ops->change_bus_speed(host, &freq);
  2595. if (!err)
  2596. host->clk_scaling.curr_freq = freq;
  2597. else
  2598. pr_err("%s: %s: failed (%d) at freq=%lu\n",
  2599. mmc_hostname(host), __func__, err, freq);
  2600. }
  2601. error:
  2602. if (err) {
  2603. /* restore previous state */
  2604. if (host->ops->notify_load)
  2605. host->ops->notify_load(host, host->clk_scaling.state);
  2606. }
  2607. out:
  2608. return err;
  2609. }
  2610. /**
  2611. * mmc_clk_scaling() - clock scaling decision algorithm
  2612. * @host: pointer to mmc host structure
  2613. * @from_wq: variable that specifies the context in which
  2614. * mmc_clk_scaling() is called.
  2615. *
  2616. * Calculate load percentage based on host busy time
  2617. * and total sampling interval and decide clock scaling
  2618. * based on scale up/down thresholds.
  2619. * If load is greater than up threshold increase the
  2620. * frequency to maximum as supported by host. Else,
  2621. * if load is less than down threshold, scale down the
  2622. * frequency to minimum supported by the host. Otherwise,
  2623. * retain current frequency and do nothing.
  2624. */
  2625. static void mmc_clk_scaling(struct mmc_host *host, bool from_wq)
  2626. {
  2627. int err = 0;
  2628. struct mmc_card *card = host->card;
  2629. unsigned long total_time_ms = 0;
  2630. unsigned long busy_time_ms = 0;
  2631. unsigned long freq;
  2632. unsigned int up_threshold = host->clk_scaling.up_threshold;
  2633. unsigned int down_threshold = host->clk_scaling.down_threshold;
  2634. bool queue_scale_down_work = false;
  2635. enum mmc_load state;
  2636. if (!card || !host->bus_ops || !host->bus_ops->change_bus_speed) {
  2637. pr_err("%s: %s: invalid entry\n", mmc_hostname(host), __func__);
  2638. goto out;
  2639. }
  2640. /* Check if the clocks are already gated. */
  2641. if (!host->ios.clock)
  2642. goto out;
  2643. if (time_is_after_jiffies(host->clk_scaling.window_time +
  2644. msecs_to_jiffies(host->clk_scaling.polling_delay_ms)))
  2645. goto out;
  2646. /* handle time wrap */
  2647. total_time_ms = jiffies_to_msecs((long)jiffies -
  2648. (long)host->clk_scaling.window_time);
  2649. /* Check if we re-enter during clock switching */
  2650. if (unlikely(host->clk_scaling.in_progress))
  2651. goto out;
  2652. host->clk_scaling.in_progress = true;
  2653. busy_time_ms = host->clk_scaling.busy_time_us / USEC_PER_MSEC;
  2654. freq = host->clk_scaling.curr_freq;
  2655. state = host->clk_scaling.state;
  2656. /*
  2657. * Note that the max. and min. frequency should be based
  2658. * on the timing modes that the card and host handshake
  2659. * during initialization.
  2660. */
  2661. if ((busy_time_ms * 100 > total_time_ms * up_threshold)) {
  2662. freq = mmc_get_max_frequency(host);
  2663. state = MMC_LOAD_HIGH;
  2664. } else if ((busy_time_ms * 100 < total_time_ms * down_threshold)) {
  2665. if (!from_wq)
  2666. queue_scale_down_work = true;
  2667. freq = mmc_get_min_frequency(host);
  2668. state = MMC_LOAD_LOW;
  2669. }
  2670. if (state != host->clk_scaling.state) {
  2671. if (!queue_scale_down_work) {
  2672. if (!from_wq)
  2673. cancel_delayed_work_sync(
  2674. &host->clk_scaling.work);
  2675. err = mmc_clk_update_freq(host, freq, state);
  2676. if (!err)
  2677. host->clk_scaling.state = state;
  2678. else if (err == -EAGAIN)
  2679. goto no_reset_stats;
  2680. } else {
  2681. /*
  2682. * We hold claim host while queueing the scale down
  2683. * work, so delay atleast one timer tick to release
  2684. * host and re-claim while scaling down the clocks.
  2685. */
  2686. queue_delayed_work(system_nrt_wq,
  2687. &host->clk_scaling.work, 1);
  2688. goto no_reset_stats;
  2689. }
  2690. }
  2691. mmc_reset_clk_scale_stats(host);
  2692. no_reset_stats:
  2693. host->clk_scaling.in_progress = false;
  2694. out:
  2695. return;
  2696. }
  2697. /**
  2698. * mmc_disable_clk_scaling() - Disable clock scaling
  2699. * @host: pointer to mmc host structure
  2700. *
  2701. * Disables clock scaling temporarily by setting enable
  2702. * property to false. To disable completely, one also
  2703. * need to set 'initialized' variable to false.
  2704. */
  2705. void mmc_disable_clk_scaling(struct mmc_host *host)
  2706. {
  2707. cancel_delayed_work_sync(&host->clk_scaling.work);
  2708. host->clk_scaling.enable = false;
  2709. }
  2710. EXPORT_SYMBOL_GPL(mmc_disable_clk_scaling);
  2711. /**
  2712. * mmc_can_scale_clk() - Check if clock scaling is initialized
  2713. * @host: pointer to mmc host structure
  2714. */
  2715. bool mmc_can_scale_clk(struct mmc_host *host)
  2716. {
  2717. return host->clk_scaling.initialized;
  2718. }
  2719. EXPORT_SYMBOL_GPL(mmc_can_scale_clk);
  2720. /**
  2721. * mmc_init_clk_scaling() - Initialize clock scaling
  2722. * @host: pointer to mmc host structure
  2723. *
  2724. * Initialize clock scaling for supported hosts.
  2725. * It is assumed that the caller ensure clock is
  2726. * running at maximum possible frequency before
  2727. * calling this function.
  2728. */
  2729. void mmc_init_clk_scaling(struct mmc_host *host)
  2730. {
  2731. if (!host->card || !(host->caps2 & MMC_CAP2_CLK_SCALE))
  2732. return;
  2733. INIT_DELAYED_WORK(&host->clk_scaling.work, mmc_clk_scale_work);
  2734. host->clk_scaling.curr_freq = mmc_get_max_frequency(host);
  2735. if (host->ops->notify_load)
  2736. host->ops->notify_load(host, MMC_LOAD_HIGH);
  2737. host->clk_scaling.state = MMC_LOAD_HIGH;
  2738. mmc_reset_clk_scale_stats(host);
  2739. host->clk_scaling.enable = true;
  2740. host->clk_scaling.initialized = true;
  2741. pr_debug("%s: clk scaling enabled\n", mmc_hostname(host));
  2742. }
  2743. EXPORT_SYMBOL_GPL(mmc_init_clk_scaling);
  2744. /**
  2745. * mmc_exit_clk_scaling() - Disable clock scaling
  2746. * @host: pointer to mmc host structure
  2747. *
  2748. * Disable clock scaling permanently.
  2749. */
  2750. void mmc_exit_clk_scaling(struct mmc_host *host)
  2751. {
  2752. cancel_delayed_work_sync(&host->clk_scaling.work);
  2753. memset(&host->clk_scaling, 0, sizeof(host->clk_scaling));
  2754. }
  2755. EXPORT_SYMBOL_GPL(mmc_exit_clk_scaling);
  2756. static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
  2757. {
  2758. #if defined(CONFIG_BCM4354)
  2759. struct sdhci_host *sd_host = NULL;
  2760. #endif
  2761. host->f_init = freq;
  2762. #ifdef CONFIG_MMC_DEBUG
  2763. pr_info("%s: %s: trying to init card at %u Hz\n",
  2764. mmc_hostname(host), __func__, host->f_init);
  2765. #endif
  2766. #if defined(CONFIG_BCM4354)
  2767. /* To detect absence of wifi chipset */
  2768. sd_host = (struct sdhci_host *)mmc_priv(host);
  2769. if (sd_host != NULL) {
  2770. if (sd_host->flags & SDHCI_DEVICE_DEAD) {
  2771. pr_err("%s: host(%s), SDHCI_DEVICE_DEAD so return! \n", __func__, mmc_hostname(host));
  2772. return -EIO;
  2773. }
  2774. }
  2775. #endif
  2776. mmc_power_up(host);
  2777. /*
  2778. * Some eMMCs (with VCCQ always on) may not be reset after power up, so
  2779. * do a hardware reset if possible.
  2780. */
  2781. mmc_hw_reset_for_init(host);
  2782. /* Initialization should be done at 3.3 V I/O voltage. */
  2783. mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
  2784. /*
  2785. * sdio_reset sends CMD52 to reset card. Since we do not know
  2786. * if the card is being re-initialized, just send it. CMD52
  2787. * should be ignored by SD/eMMC cards.
  2788. */
  2789. sdio_reset(host);
  2790. mmc_go_idle(host);
  2791. mmc_send_if_cond(host, host->ocr_avail);
  2792. /* Order's important: probe SDIO, then SD, then MMC */
  2793. if (!mmc_attach_sdio(host))
  2794. return 0;
  2795. if (!mmc_attach_sd(host))
  2796. return 0;
  2797. if (!mmc_attach_mmc(host))
  2798. return 0;
  2799. mmc_power_off(host);
  2800. return -EIO;
  2801. }
  2802. int _mmc_detect_card_removed(struct mmc_host *host)
  2803. {
  2804. int ret;
  2805. if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
  2806. return 0;
  2807. if (!host->card || mmc_card_removed(host->card))
  2808. return 1;
  2809. ret = host->bus_ops->alive(host);
  2810. /*
  2811. * Card detect status and alive check may be out of sync if card is
  2812. * removed slowly, when card detect switch changes while card/slot
  2813. * pads are still contacted in hardware (refer to "SD Card Mechanical
  2814. * Addendum, Appendix C: Card Detection Switch"). So reschedule a
  2815. * detect work 200ms later for this case.
  2816. */
  2817. if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
  2818. mmc_detect_change(host, msecs_to_jiffies(200));
  2819. pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
  2820. }
  2821. if (ret) {
  2822. mmc_card_set_removed(host->card);
  2823. pr_debug("%s: card remove detected\n", mmc_hostname(host));
  2824. ST_LOG("<%s> %s: card remove detected\n", __func__,mmc_hostname(host));
  2825. }
  2826. return ret;
  2827. }
  2828. int mmc_detect_card_removed(struct mmc_host *host)
  2829. {
  2830. struct mmc_card *card = host->card;
  2831. int ret;
  2832. WARN_ON(!host->claimed);
  2833. if (!card)
  2834. return 1;
  2835. ret = mmc_card_removed(card);
  2836. /*
  2837. * The card will be considered unchanged unless we have been asked to
  2838. * detect a change or host requires polling to provide card detection.
  2839. */
  2840. if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
  2841. !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
  2842. return ret;
  2843. host->detect_change = 0;
  2844. if (!ret) {
  2845. ret = _mmc_detect_card_removed(host);
  2846. if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
  2847. /*
  2848. * Schedule a detect work as soon as possible to let a
  2849. * rescan handle the card removal.
  2850. */
  2851. cancel_delayed_work(&host->detect);
  2852. mmc_detect_change(host, 0);
  2853. }
  2854. }
  2855. return ret;
  2856. }
  2857. EXPORT_SYMBOL(mmc_detect_card_removed);
  2858. void mmc_rescan(struct work_struct *work)
  2859. {
  2860. struct mmc_host *host =
  2861. container_of(work, struct mmc_host, detect.work);
  2862. bool extend_wakelock = false;
  2863. if (host->rescan_disable)
  2864. return;
  2865. mmc_bus_get(host);
  2866. mmc_rpm_hold(host, &host->class_dev);
  2867. /*
  2868. * if there is a _removable_ card registered, check whether it is
  2869. * still present
  2870. */
  2871. if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
  2872. && !(host->caps & MMC_CAP_NONREMOVABLE))
  2873. host->bus_ops->detect(host);
  2874. host->detect_change = 0;
  2875. /* If the card was removed the bus will be marked
  2876. * as dead - extend the wakelock so userspace
  2877. * can respond */
  2878. if (host->bus_dead)
  2879. extend_wakelock = 1;
  2880. /*
  2881. * Let mmc_bus_put() free the bus/bus_ops if we've found that
  2882. * the card is no longer present.
  2883. */
  2884. mmc_bus_put(host);
  2885. mmc_bus_get(host);
  2886. /* if there still is a card present, stop here */
  2887. if (host->bus_ops != NULL) {
  2888. mmc_rpm_release(host, &host->class_dev);
  2889. mmc_bus_put(host);
  2890. goto out;
  2891. }
  2892. mmc_rpm_release(host, &host->class_dev);
  2893. /*
  2894. * Only we can add a new handler, so it's safe to
  2895. * release the lock here.
  2896. */
  2897. mmc_bus_put(host);
  2898. if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
  2899. mmc_claim_host(host);
  2900. mmc_power_off(host);
  2901. mmc_release_host(host);
  2902. goto out;
  2903. }
  2904. ST_LOG("<%s> %s insertion detected",__func__,host->class_dev.kobj.name);
  2905. mmc_rpm_hold(host, &host->class_dev);
  2906. mmc_claim_host(host);
  2907. if (!mmc_rescan_try_freq(host, host->f_min))
  2908. extend_wakelock = true;
  2909. mmc_release_host(host);
  2910. mmc_rpm_release(host, &host->class_dev);
  2911. out:
  2912. /* only extend the wakelock, if suspend has not started yet */
  2913. if (extend_wakelock && !host->rescan_disable)
  2914. wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
  2915. if (host->caps & MMC_CAP_NEEDS_POLL)
  2916. mmc_schedule_delayed_work(&host->detect, HZ);
  2917. }
  2918. void mmc_start_host(struct mmc_host *host)
  2919. {
  2920. mmc_power_off(host);
  2921. #if defined(CONFIG_MACH_HLTESKT)||defined(CONFIG_MACH_HLTEKTT)||defined(CONFIG_MACH_HLTELGT)\
  2922. || defined(CONFIG_MACH_FLTESKT) || defined(CONFIG_MACH_LT03SKT) || defined(CONFIG_MACH_LT03KTT) || defined(CONFIG_MACH_LT03LGT)\
  2923. || defined(CONFIG_MACH_HLTEDCM) || defined(CONFIG_MACH_HLTEKDI) \
  2924. || defined(CONFIG_MACH_JS01LTEDCM) || defined(CONFIG_MACH_JS01LTESBM) \
  2925. || defined(CONFIG_MACH_H3GDUOS_CTC) || defined(CONFIG_MACH_H3GDUOS_CU)\
  2926. || defined(CONFIG_MACH_FRESCOLTESKT)||defined(CONFIG_MACH_FRESCOLTEKTT)||defined(CONFIG_MACH_FRESCOLTELGT) || defined(CONFIG_MACH_HLTE_CHN_CMCC)
  2927. if ((fw_dl_complete!=true) && (!strcmp(mmc_hostname(host),"mmc2"))){
  2928. pr_info("%s: %s: %d, Call mmc_rescan after 2sec\n", mmc_hostname(host), __func__,fw_dl_complete);
  2929. mmc_detect_change(host, msecs_to_jiffies(2000));
  2930. }
  2931. else
  2932. #endif
  2933. mmc_detect_change(host, 0);
  2934. }
  2935. void mmc_stop_host(struct mmc_host *host)
  2936. {
  2937. #ifdef CONFIG_MMC_DEBUG
  2938. unsigned long flags;
  2939. spin_lock_irqsave(&host->lock, flags);
  2940. host->removed = 1;
  2941. spin_unlock_irqrestore(&host->lock, flags);
  2942. #endif
  2943. cancel_delayed_work_sync(&host->detect);
  2944. mmc_flush_scheduled_work();
  2945. /* clear pm flags now and let card drivers set them as needed */
  2946. host->pm_flags = 0;
  2947. mmc_bus_get(host);
  2948. if (host->bus_ops && !host->bus_dead) {
  2949. /* Calling bus_ops->remove() with a claimed host can deadlock */
  2950. if (host->bus_ops->remove)
  2951. host->bus_ops->remove(host);
  2952. mmc_claim_host(host);
  2953. mmc_detach_bus(host);
  2954. mmc_power_off(host);
  2955. mmc_release_host(host);
  2956. mmc_bus_put(host);
  2957. return;
  2958. }
  2959. mmc_bus_put(host);
  2960. BUG_ON(host->card);
  2961. mmc_power_off(host);
  2962. }
  2963. int mmc_power_save_host(struct mmc_host *host)
  2964. {
  2965. int ret = 0;
  2966. #ifdef CONFIG_MMC_DEBUG
  2967. pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
  2968. #endif
  2969. mmc_bus_get(host);
  2970. if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
  2971. mmc_bus_put(host);
  2972. return -EINVAL;
  2973. }
  2974. if (host->bus_ops->power_save)
  2975. ret = host->bus_ops->power_save(host);
  2976. mmc_bus_put(host);
  2977. mmc_power_off(host);
  2978. return ret;
  2979. }
  2980. EXPORT_SYMBOL(mmc_power_save_host);
  2981. int mmc_power_restore_host(struct mmc_host *host)
  2982. {
  2983. int ret;
  2984. #ifdef CONFIG_MMC_DEBUG
  2985. pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
  2986. #endif
  2987. mmc_bus_get(host);
  2988. if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
  2989. mmc_bus_put(host);
  2990. return -EINVAL;
  2991. }
  2992. mmc_power_up(host);
  2993. ret = host->bus_ops->power_restore(host);
  2994. mmc_bus_put(host);
  2995. return ret;
  2996. }
  2997. EXPORT_SYMBOL(mmc_power_restore_host);
  2998. int mmc_card_awake(struct mmc_host *host)
  2999. {
  3000. int err = -ENOSYS;
  3001. if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
  3002. return 0;
  3003. mmc_bus_get(host);
  3004. if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
  3005. err = host->bus_ops->awake(host);
  3006. mmc_bus_put(host);
  3007. return err;
  3008. }
  3009. EXPORT_SYMBOL(mmc_card_awake);
  3010. int mmc_card_sleep(struct mmc_host *host)
  3011. {
  3012. int err = -ENOSYS;
  3013. if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
  3014. return 0;
  3015. mmc_bus_get(host);
  3016. if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
  3017. err = host->bus_ops->sleep(host);
  3018. mmc_bus_put(host);
  3019. return err;
  3020. }
  3021. EXPORT_SYMBOL(mmc_card_sleep);
  3022. int mmc_card_can_sleep(struct mmc_host *host)
  3023. {
  3024. struct mmc_card *card = host->card;
  3025. if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
  3026. return 1;
  3027. return 0;
  3028. }
  3029. EXPORT_SYMBOL(mmc_card_can_sleep);
  3030. /*
  3031. * Flush the cache to the non-volatile storage.
  3032. */
  3033. int mmc_flush_cache(struct mmc_card *card)
  3034. {
  3035. struct mmc_host *host = card->host;
  3036. int err = 0, rc;
  3037. if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
  3038. (card->quirks & MMC_QUIRK_CACHE_DISABLE))
  3039. return err;
  3040. if (mmc_card_mmc(card) &&
  3041. (card->ext_csd.cache_size > 0) &&
  3042. (card->ext_csd.cache_ctrl & 1)) {
  3043. err = mmc_switch_ignore_timeout(card, EXT_CSD_CMD_SET_NORMAL,
  3044. EXT_CSD_FLUSH_CACHE, 1,
  3045. MMC_FLUSH_REQ_TIMEOUT_MS);
  3046. if (err == -ETIMEDOUT) {
  3047. pr_err("%s: cache flush timeout\n",
  3048. mmc_hostname(card->host));
  3049. rc = mmc_interrupt_hpi(card);
  3050. if (rc)
  3051. pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
  3052. mmc_hostname(host), rc);
  3053. } else if (err) {
  3054. pr_err("%s: cache flush error %d\n",
  3055. mmc_hostname(card->host), err);
  3056. }
  3057. }
  3058. return err;
  3059. }
  3060. EXPORT_SYMBOL(mmc_flush_cache);
  3061. /*
  3062. * Turn the cache ON/OFF.
  3063. * Turning the cache OFF shall trigger flushing of the data
  3064. * to the non-volatile storage.
  3065. * This function should be called with host claimed
  3066. */
  3067. int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
  3068. {
  3069. struct mmc_card *card = host->card;
  3070. unsigned int timeout = card->ext_csd.generic_cmd6_time;
  3071. int err = 0, rc;
  3072. if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
  3073. mmc_card_is_removable(host) ||
  3074. (card->quirks & MMC_QUIRK_CACHE_DISABLE))
  3075. return err;
  3076. if (card && mmc_card_mmc(card) &&
  3077. (card->ext_csd.cache_size > 0)) {
  3078. enable = !!enable;
  3079. if (card->ext_csd.cache_ctrl ^ enable) {
  3080. if (!enable)
  3081. timeout = MMC_CACHE_DISBALE_TIMEOUT_MS;
  3082. err = mmc_switch_ignore_timeout(card,
  3083. EXT_CSD_CMD_SET_NORMAL,
  3084. EXT_CSD_CACHE_CTRL, enable, timeout);
  3085. if (err == -ETIMEDOUT && !enable) {
  3086. pr_err("%s:cache disable operation timeout\n",
  3087. mmc_hostname(card->host));
  3088. rc = mmc_interrupt_hpi(card);
  3089. if (rc)
  3090. pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
  3091. mmc_hostname(host), rc);
  3092. } else if (err) {
  3093. pr_err("%s: cache %s error %d\n",
  3094. mmc_hostname(card->host),
  3095. enable ? "on" : "off",
  3096. err);
  3097. } else {
  3098. card->ext_csd.cache_ctrl = enable;
  3099. }
  3100. }
  3101. }
  3102. return err;
  3103. }
  3104. EXPORT_SYMBOL(mmc_cache_ctrl);
  3105. #ifdef CONFIG_PM
  3106. /**
  3107. * mmc_suspend_host - suspend a host
  3108. * @host: mmc host
  3109. */
  3110. int mmc_suspend_host(struct mmc_host *host)
  3111. {
  3112. int err = 0;
  3113. if (mmc_bus_needs_resume(host))
  3114. return 0;
  3115. mmc_bus_get(host);
  3116. if (host->bus_ops && !host->bus_dead) {
  3117. /*
  3118. * A long response time is not acceptable for device drivers
  3119. * when doing suspend. Prevent mmc_claim_host in the suspend
  3120. * sequence, to potentially wait "forever" by trying to
  3121. * pre-claim the host.
  3122. *
  3123. * Skip try claim host for SDIO cards, doing so fixes deadlock
  3124. * conditions. The function driver suspend may again call into
  3125. * SDIO driver within a different context for enabling power
  3126. * save mode in the card and hence wait in mmc_claim_host
  3127. * causing deadlock.
  3128. */
  3129. if (!(host->card && mmc_card_sdio(host->card)))
  3130. if (!mmc_try_claim_host(host))
  3131. err = -EBUSY;
  3132. if (!err) {
  3133. if (host->bus_ops->suspend) {
  3134. err = mmc_stop_bkops(host->card);
  3135. if (err)
  3136. goto stop_bkops_err;
  3137. err = host->bus_ops->suspend(host);
  3138. MMC_UPDATE_BKOPS_STATS_SUSPEND(host->
  3139. card->bkops_info.bkops_stats);
  3140. }
  3141. if (!(host->card && mmc_card_sdio(host->card)))
  3142. mmc_release_host(host);
  3143. if (err == -ENOSYS || !host->bus_ops->resume) {
  3144. /*
  3145. * We simply "remove" the card in this case.
  3146. * It will be redetected on resume. (Calling
  3147. * bus_ops->remove() with a claimed host can
  3148. * deadlock.)
  3149. */
  3150. if (host->bus_ops->remove)
  3151. host->bus_ops->remove(host);
  3152. mmc_claim_host(host);
  3153. mmc_detach_bus(host);
  3154. mmc_power_off(host);
  3155. mmc_release_host(host);
  3156. host->pm_flags = 0;
  3157. err = 0;
  3158. }
  3159. }
  3160. }
  3161. mmc_bus_put(host);
  3162. if (!err && !mmc_card_keep_power(host))
  3163. mmc_power_off(host);
  3164. if (host->card && host->card->type == MMC_TYPE_SD)
  3165. mdelay(50);
  3166. return err;
  3167. stop_bkops_err:
  3168. if (!(host->card && mmc_card_sdio(host->card)))
  3169. mmc_release_host(host);
  3170. return err;
  3171. }
  3172. EXPORT_SYMBOL(mmc_suspend_host);
  3173. /**
  3174. * mmc_resume_host - resume a previously suspended host
  3175. * @host: mmc host
  3176. */
  3177. int mmc_resume_host(struct mmc_host *host)
  3178. {
  3179. int err = 0;
  3180. mmc_bus_get(host);
  3181. if (mmc_bus_manual_resume(host)) {
  3182. host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
  3183. mmc_bus_put(host);
  3184. return 0;
  3185. }
  3186. if (host->bus_ops && !host->bus_dead) {
  3187. if (!mmc_card_keep_power(host)) {
  3188. mmc_power_up(host);
  3189. mmc_select_voltage(host, host->ocr);
  3190. /*
  3191. * Tell runtime PM core we just powered up the card,
  3192. * since it still believes the card is powered off.
  3193. * Note that currently runtime PM is only enabled
  3194. * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
  3195. */
  3196. if (mmc_card_sdio(host->card) &&
  3197. (host->caps & MMC_CAP_POWER_OFF_CARD)) {
  3198. pm_runtime_disable(&host->card->dev);
  3199. pm_runtime_set_active(&host->card->dev);
  3200. pm_runtime_enable(&host->card->dev);
  3201. }
  3202. }
  3203. BUG_ON(!host->bus_ops->resume);
  3204. err = host->bus_ops->resume(host);
  3205. if (err) {
  3206. pr_warning("%s: error %d during resume "
  3207. "(card was removed?)\n",
  3208. mmc_hostname(host), err);
  3209. err = 0;
  3210. }
  3211. }
  3212. host->pm_flags &= ~MMC_PM_KEEP_POWER;
  3213. mmc_bus_put(host);
  3214. return err;
  3215. }
  3216. EXPORT_SYMBOL(mmc_resume_host);
  3217. /* Do the card removal on suspend if card is assumed removeable
  3218. * Do that in pm notifier while userspace isn't yet frozen, so we will be able
  3219. to sync the card.
  3220. */
  3221. int mmc_pm_notify(struct notifier_block *notify_block,
  3222. unsigned long mode, void *unused)
  3223. {
  3224. struct mmc_host *host = container_of(
  3225. notify_block, struct mmc_host, pm_notify);
  3226. unsigned long flags;
  3227. int err = 0;
  3228. switch (mode) {
  3229. case PM_HIBERNATION_PREPARE:
  3230. case PM_SUSPEND_PREPARE:
  3231. case PM_RESTORE_PREPARE:
  3232. if (host->card && mmc_card_mmc(host->card)) {
  3233. mmc_claim_host(host);
  3234. err = mmc_stop_bkops(host->card);
  3235. mmc_release_host(host);
  3236. if (err) {
  3237. pr_err("%s: didn't stop bkops\n",
  3238. mmc_hostname(host));
  3239. return err;
  3240. }
  3241. }
  3242. spin_lock_irqsave(&host->lock, flags);
  3243. if (mmc_bus_needs_resume(host)) {
  3244. spin_unlock_irqrestore(&host->lock, flags);
  3245. break;
  3246. }
  3247. /* since its suspending anyway, disable rescan */
  3248. host->rescan_disable = 1;
  3249. spin_unlock_irqrestore(&host->lock, flags);
  3250. /* Wait for pending detect work to be completed */
  3251. if (!(host->caps & MMC_CAP_NEEDS_POLL))
  3252. flush_work(&host->detect.work);
  3253. /*
  3254. * In some cases, the detect work might be scheduled
  3255. * just before rescan_disable is set to true.
  3256. * Cancel such the scheduled works.
  3257. */
  3258. cancel_delayed_work_sync(&host->detect);
  3259. /*
  3260. * It is possible that the wake-lock has been acquired, since
  3261. * its being suspended, release the wakelock
  3262. */
  3263. if (wake_lock_active(&host->detect_wake_lock))
  3264. wake_unlock(&host->detect_wake_lock);
  3265. if (!host->bus_ops || host->bus_ops->suspend)
  3266. break;
  3267. /* Calling bus_ops->remove() with a claimed host can deadlock */
  3268. if (host->bus_ops->remove)
  3269. host->bus_ops->remove(host);
  3270. mmc_claim_host(host);
  3271. mmc_detach_bus(host);
  3272. mmc_power_off(host);
  3273. mmc_release_host(host);
  3274. host->pm_flags = 0;
  3275. break;
  3276. case PM_POST_SUSPEND:
  3277. case PM_POST_HIBERNATION:
  3278. case PM_POST_RESTORE:
  3279. spin_lock_irqsave(&host->lock, flags);
  3280. if (mmc_bus_manual_resume(host)) {
  3281. spin_unlock_irqrestore(&host->lock, flags);
  3282. break;
  3283. }
  3284. host->rescan_disable = 0;
  3285. spin_unlock_irqrestore(&host->lock, flags);
  3286. mmc_detect_change(host, 0);
  3287. break;
  3288. default:
  3289. return -EINVAL;
  3290. }
  3291. return 0;
  3292. }
  3293. #endif
  3294. #ifdef CONFIG_MMC_EMBEDDED_SDIO
  3295. void mmc_set_embedded_sdio_data(struct mmc_host *host,
  3296. struct sdio_cis *cis,
  3297. struct sdio_cccr *cccr,
  3298. struct sdio_embedded_func *funcs,
  3299. int num_funcs)
  3300. {
  3301. host->embedded_sdio_data.cis = cis;
  3302. host->embedded_sdio_data.cccr = cccr;
  3303. host->embedded_sdio_data.funcs = funcs;
  3304. host->embedded_sdio_data.num_funcs = num_funcs;
  3305. }
  3306. EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
  3307. #endif
  3308. #ifdef CONFIG_PM_RUNTIME
  3309. void mmc_dump_dev_pm_state(struct mmc_host *host, struct device *dev)
  3310. {
  3311. pr_err("%s: %s: err: runtime_error: %d\n", dev_name(dev),
  3312. mmc_hostname(host), dev->power.runtime_error);
  3313. pr_err("%s: %s: disable_depth: %d runtime_status: %d idle_notification: %d\n",
  3314. dev_name(dev), mmc_hostname(host), dev->power.disable_depth,
  3315. dev->power.runtime_status,
  3316. dev->power.idle_notification);
  3317. pr_err("%s: %s: request_pending: %d, request: %d\n",
  3318. dev_name(dev), mmc_hostname(host),
  3319. dev->power.request_pending, dev->power.request);
  3320. }
  3321. void mmc_rpm_hold(struct mmc_host *host, struct device *dev)
  3322. {
  3323. int ret = 0;
  3324. if (!mmc_use_core_runtime_pm(host))
  3325. return;
  3326. ret = pm_runtime_get_sync(dev);
  3327. if ((ret < 0) &&
  3328. (dev->power.runtime_error || (dev->power.disable_depth > 0))) {
  3329. pr_err("%s: %s: %s: pm_runtime_get_sync: err: %d\n",
  3330. dev_name(dev), mmc_hostname(host), __func__, ret);
  3331. mmc_dump_dev_pm_state(host, dev);
  3332. if (pm_runtime_suspended(dev))
  3333. BUG_ON(1);
  3334. }
  3335. }
  3336. EXPORT_SYMBOL(mmc_rpm_hold);
  3337. void mmc_rpm_release(struct mmc_host *host, struct device *dev)
  3338. {
  3339. int ret = 0;
  3340. if (!mmc_use_core_runtime_pm(host))
  3341. return;
  3342. ret = pm_runtime_put_sync(dev);
  3343. if ((ret < 0) &&
  3344. (dev->power.runtime_error || (dev->power.disable_depth > 0))) {
  3345. pr_err("%s: %s: %s: pm_runtime_put_sync: err: %d\n",
  3346. dev_name(dev), mmc_hostname(host), __func__, ret);
  3347. mmc_dump_dev_pm_state(host, dev);
  3348. }
  3349. }
  3350. EXPORT_SYMBOL(mmc_rpm_release);
  3351. #else
  3352. void mmc_rpm_hold(struct mmc_host *host, struct device *dev) {}
  3353. EXPORT_SYMBOL(mmc_rpm_hold);
  3354. void mmc_rpm_release(struct mmc_host *host, struct device *dev) {}
  3355. EXPORT_SYMBOL(mmc_rpm_release);
  3356. #endif
  3357. /**
  3358. * mmc_init_context_info() - init synchronization context
  3359. * @host: mmc host
  3360. *
  3361. * Init struct context_info needed to implement asynchronous
  3362. * request mechanism, used by mmc core, host driver and mmc requests
  3363. * supplier.
  3364. */
  3365. void mmc_init_context_info(struct mmc_host *host)
  3366. {
  3367. spin_lock_init(&host->context_info.lock);
  3368. host->context_info.is_new_req = false;
  3369. host->context_info.is_done_rcv = false;
  3370. host->context_info.is_waiting_last_req = false;
  3371. init_waitqueue_head(&host->context_info.wait);
  3372. }
  3373. #define MIN_WAIT_MS 5
  3374. static int mmc_wait_trans_state(struct mmc_card *card, unsigned int wait_ms)
  3375. {
  3376. int waited = 0;
  3377. int status = 0;
  3378. mmc_send_status(card, &status);
  3379. while (R1_CURRENT_STATE(status) != R1_STATE_TRAN) {
  3380. if (waited > wait_ms)
  3381. return 0;
  3382. mdelay(MIN_WAIT_MS);
  3383. waited += MIN_WAIT_MS;
  3384. mmc_send_status(card, &status);
  3385. }
  3386. return waited;
  3387. }
  3388. /*
  3389. * Turn the bkops mode ON/OFF.
  3390. */
  3391. int mmc_bkops_enable(struct mmc_host *host, u8 value)
  3392. {
  3393. struct mmc_card *card = host->card;
  3394. unsigned long flags;
  3395. int err = 0;
  3396. u8 ext_csd[512];
  3397. if (!card)
  3398. return err;
  3399. mmc_claim_host(host);
  3400. /* read ext_csd to get EXT_CSD_BKOPS_EN field value */
  3401. err = mmc_send_ext_csd(card, ext_csd);
  3402. if (err) {
  3403. /* try again after some delay. (send HPI if needed) */
  3404. if (err == -ETIMEDOUT && mmc_card_doing_bkops(card)) {
  3405. err = mmc_stop_bkops(card);
  3406. if (err) {
  3407. pr_err("%s: failed to stop bkops. err = %d\n",
  3408. mmc_hostname(card->host), err);
  3409. goto bkops_out;
  3410. }
  3411. }
  3412. /* Max HPI latency is 100 ms */
  3413. mmc_wait_trans_state(card, 100);
  3414. err = mmc_send_ext_csd(card, ext_csd);
  3415. if (err) {
  3416. pr_err("%s: error %d sending ext_csd\n",
  3417. mmc_hostname(card->host), err);
  3418. goto bkops_out;
  3419. }
  3420. }
  3421. /* set value to put EXT_CSD_BKOPS_EN field */
  3422. value |= ext_csd[EXT_CSD_BKOPS_EN] & 0x1;
  3423. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  3424. EXT_CSD_BKOPS_EN, value,
  3425. card->ext_csd.generic_cmd6_time);
  3426. if (err) {
  3427. pr_err("%s: bkops mode error %d\n", mmc_hostname(host), err);
  3428. goto bkops_out;
  3429. }
  3430. /* read ext_csd again to get EXT_CSD_BKOPS_EN field value */
  3431. mmc_wait_trans_state(card, 20);
  3432. err = mmc_send_ext_csd(card, ext_csd);
  3433. if (!err) {
  3434. spin_lock_irqsave(&card->bkops_lock, flags);
  3435. card->bkops_enable = ext_csd[EXT_CSD_BKOPS_EN];
  3436. spin_unlock_irqrestore(&card->bkops_lock, flags);
  3437. } else {
  3438. pr_err("%s: error %d confirming ext_csd value\n",
  3439. mmc_hostname(card->host), err);
  3440. }
  3441. bkops_out:
  3442. mmc_release_host(host);
  3443. return err;
  3444. }
  3445. EXPORT_SYMBOL(mmc_bkops_enable);
  3446. static int __init mmc_init(void)
  3447. {
  3448. int ret;
  3449. workqueue = alloc_ordered_workqueue("kmmcd", 0);
  3450. if (!workqueue)
  3451. return -ENOMEM;
  3452. ret = mmc_register_bus();
  3453. if (ret)
  3454. goto destroy_workqueue;
  3455. ret = mmc_register_host_class();
  3456. if (ret)
  3457. goto unregister_bus;
  3458. ret = sdio_register_bus();
  3459. if (ret)
  3460. goto unregister_host_class;
  3461. return 0;
  3462. unregister_host_class:
  3463. mmc_unregister_host_class();
  3464. unregister_bus:
  3465. mmc_unregister_bus();
  3466. destroy_workqueue:
  3467. destroy_workqueue(workqueue);
  3468. return ret;
  3469. }
  3470. static void __exit mmc_exit(void)
  3471. {
  3472. sdio_unregister_bus();
  3473. mmc_unregister_host_class();
  3474. mmc_unregister_bus();
  3475. destroy_workqueue(workqueue);
  3476. }
  3477. subsys_initcall(mmc_init);
  3478. module_exit(mmc_exit);
  3479. MODULE_LICENSE("GPL");