core.c 105 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225
  1. /*
  2. * linux/drivers/mmc/core/core.c
  3. *
  4. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  5. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
  6. * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
  7. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/completion.h>
  17. #include <linux/device.h>
  18. #include <linux/delay.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/err.h>
  21. #include <linux/leds.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/log2.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/pm_wakeup.h>
  27. #include <linux/suspend.h>
  28. #include <linux/fault-inject.h>
  29. #include <linux/random.h>
  30. #include <linux/slab.h>
  31. #include <linux/of.h>
  32. #include <uapi/linux/sched/types.h>
  33. #include <linux/mmc/card.h>
  34. #include <linux/mmc/host.h>
  35. #include <linux/mmc/mmc.h>
  36. #include <linux/mmc/sd.h>
  37. #include <linux/mmc/slot-gpio.h>
  38. #include <mt-plat/mtk_io_boost.h>
  39. #include <mt-plat/aee.h>
  40. #define CREATE_TRACE_POINTS
  41. #include <trace/events/mmc.h>
  42. #include "core.h"
  43. #include "card.h"
  44. #include "bus.h"
  45. #include "host.h"
  46. #include "sdio_bus.h"
  47. #include "pwrseq.h"
  48. #include "mmc_ops.h"
  49. #include "sd_ops.h"
  50. #include "sdio_ops.h"
  51. #include "mtk_mmc_block.h"
  52. #include "queue.h"
  53. /* If the device is not responding */
  54. #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  55. /* The max erase timeout, used when host->max_busy_timeout isn't specified */
  56. #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
  57. static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
  58. /*
  59. * Enabling software CRCs on the data blocks can be a significant (30%)
  60. * performance cost, and for other reasons may not always be desired.
  61. * So we allow it it to be disabled.
  62. */
  63. bool use_spi_crc = 1;
  64. module_param(use_spi_crc, bool, 0);
  65. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  66. static void mmc_enqueue_queue(struct mmc_host *host, struct mmc_request *mrq)
  67. {
  68. unsigned long flags;
  69. if (mrq->cmd->opcode == MMC_EXECUTE_READ_TASK ||
  70. mrq->cmd->opcode == MMC_EXECUTE_WRITE_TASK) {
  71. spin_lock_irqsave(&host->dat_que_lock, flags);
  72. if (mrq->flags)
  73. list_add(&mrq->link, &host->dat_que);
  74. else
  75. list_add_tail(&mrq->link, &host->dat_que);
  76. spin_unlock_irqrestore(&host->dat_que_lock, flags);
  77. } else {
  78. spin_lock_irqsave(&host->cmd_que_lock, flags);
  79. if (mrq->flags)
  80. list_add(&mrq->link, &host->cmd_que);
  81. else
  82. list_add_tail(&mrq->link, &host->cmd_que);
  83. spin_unlock_irqrestore(&host->cmd_que_lock, flags);
  84. }
  85. }
  86. static void mmc_dequeue_queue(struct mmc_host *host, struct mmc_request *mrq)
  87. {
  88. unsigned long flags;
  89. if (mrq->cmd->opcode == MMC_EXECUTE_READ_TASK ||
  90. mrq->cmd->opcode == MMC_EXECUTE_WRITE_TASK) {
  91. spin_lock_irqsave(&host->dat_que_lock, flags);
  92. list_del_init(&mrq->link);
  93. spin_unlock_irqrestore(&host->dat_que_lock, flags);
  94. }
  95. }
  96. static void mmc_clr_dat_mrq_que_flag(struct mmc_host *host)
  97. {
  98. unsigned int i;
  99. for (i = 0; i < host->card->ext_csd.cmdq_depth; i++)
  100. host->data_mrq_queued[i] = false;
  101. }
  102. static void mmc_clr_dat_list(struct mmc_host *host)
  103. {
  104. unsigned long flags;
  105. struct mmc_request *mrq = NULL;
  106. struct mmc_request *mrq_next = NULL;
  107. spin_lock_irqsave(&host->dat_que_lock, flags);
  108. list_for_each_entry_safe(mrq, mrq_next, &host->dat_que, link) {
  109. list_del_init(&mrq->link);
  110. }
  111. spin_unlock_irqrestore(&host->dat_que_lock, flags);
  112. mmc_clr_dat_mrq_que_flag(host);
  113. }
  114. static int mmc_restore_tasks(struct mmc_host *host)
  115. {
  116. struct mmc_request *mrq_cmd = NULL;
  117. unsigned int i = 0;
  118. unsigned int task_id;
  119. unsigned int tasks;
  120. tasks = host->task_id_index;
  121. for (task_id = 0; task_id < host->card->ext_csd.cmdq_depth; task_id++) {
  122. if (tasks & 0x1) {
  123. mrq_cmd = host->areq_que[task_id]->mrq_que;
  124. mmc_enqueue_queue(host, mrq_cmd);
  125. clear_bit(task_id, &host->task_id_index);
  126. i++;
  127. }
  128. tasks >>= 1;
  129. }
  130. return i;
  131. }
  132. static struct mmc_request *mmc_get_cmd_que(struct mmc_host *host)
  133. {
  134. struct mmc_request *mrq = NULL;
  135. if (!list_empty(&host->cmd_que)) {
  136. mrq = list_first_entry(&host->cmd_que,
  137. struct mmc_request, link);
  138. list_del_init(&mrq->link);
  139. }
  140. return mrq;
  141. }
  142. static struct mmc_request *mmc_get_dat_que(struct mmc_host *host)
  143. {
  144. struct mmc_request *mrq = NULL;
  145. if (!list_empty(&host->dat_que)) {
  146. mrq = list_first_entry(&host->dat_que,
  147. struct mmc_request, link);
  148. }
  149. return mrq;
  150. }
  151. static int mmc_blk_status_check(struct mmc_card *card, unsigned int *status)
  152. {
  153. struct mmc_command cmd = {0};
  154. int err, retries = 3;
  155. cmd.opcode = MMC_SEND_STATUS;
  156. cmd.arg = card->rca << 16;
  157. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  158. err = mmc_wait_for_cmd(card->host, &cmd, retries);
  159. if (err == 0)
  160. *status = cmd.resp[0];
  161. else
  162. pr_err("%s: err %d\n", __func__, err);
  163. return err;
  164. }
  165. static void mmc_discard_cmdq(struct mmc_host *host)
  166. {
  167. memset(&host->deq_cmd, 0, sizeof(struct mmc_command));
  168. memset(&host->deq_mrq, 0, sizeof(struct mmc_request));
  169. host->deq_cmd.opcode = MMC_CMDQ_TASK_MGMT;
  170. host->deq_cmd.arg = 1;
  171. host->deq_cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1B | MMC_CMD_AC;
  172. host->deq_mrq.data = NULL;
  173. host->deq_mrq.cmd = &host->deq_cmd;
  174. host->deq_mrq.done = mmc_wait_cmdq_done;
  175. host->deq_mrq.host = host;
  176. host->deq_mrq.cmd->retries = 3;
  177. host->deq_mrq.cmd->error = 0;
  178. host->deq_mrq.cmd->mrq = &host->deq_mrq;
  179. while (1) {
  180. host->ops->request(host, &host->deq_mrq);
  181. if (!host->deq_mrq.cmd->error ||
  182. !host->deq_mrq.cmd->retries)
  183. break;
  184. pr_err("%s: req failed (CMD%u): %d, retrying...\n",
  185. __func__,
  186. host->deq_mrq.cmd->opcode,
  187. host->deq_mrq.cmd->error);
  188. host->deq_mrq.cmd->retries--;
  189. host->deq_mrq.cmd->error = 0;
  190. };
  191. pr_notice("%s: CMDQ send distard (CMD48)\n", __func__);
  192. }
  193. static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
  194. int err);
  195. /* add for emmc reset when error happen */
  196. int emmc_resetting_when_cmdq;
  197. static int mmc_reset_for_cmdq(struct mmc_host *host)
  198. {
  199. int err, ret;
  200. emmc_resetting_when_cmdq = 1;
  201. err = mmc_hw_reset(host);
  202. /* Ensure we switch back to the correct partition */
  203. if (err != -EOPNOTSUPP) {
  204. u8 part_config = host->card->ext_csd.part_config;
  205. part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
  206. /* only enable cq at user */
  207. part_config |= 0;
  208. ret = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
  209. EXT_CSD_PART_CONFIG, part_config,
  210. host->card->ext_csd.part_time);
  211. if (ret)
  212. return ret;
  213. /* enable cmdq at all partition */
  214. ret = mmc_cmdq_enable(host->card);
  215. if (ret)
  216. return ret;
  217. host->card->ext_csd.part_config = part_config;
  218. }
  219. emmc_resetting_when_cmdq = 0;
  220. return err;
  221. }
  222. /*
  223. * check CMDQ QSR
  224. */
  225. void mmc_do_check(struct mmc_host *host)
  226. {
  227. memset(&host->que_cmd, 0, sizeof(struct mmc_command));
  228. memset(&host->que_mrq, 0, sizeof(struct mmc_request));
  229. host->que_cmd.opcode = MMC_SEND_STATUS;
  230. host->que_cmd.arg = host->card->rca << 16 | 1 << 15;
  231. host->que_cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  232. host->que_cmd.data = NULL;
  233. host->que_mrq.cmd = &host->que_cmd;
  234. host->que_mrq.done = mmc_wait_cmdq_done;
  235. host->que_mrq.host = host;
  236. host->que_mrq.cmd->retries = 3;
  237. host->que_mrq.cmd->error = 0;
  238. host->que_mrq.cmd->mrq = &host->que_mrq;
  239. while (1) {
  240. host->ops->request(host, &host->que_mrq);
  241. /* add for emmc reset when error happen */
  242. if (host->que_mrq.cmd->error && !host->que_mrq.cmd->retries) {
  243. /* wait data irq handle done otherwice timing issue will happen */
  244. msleep(2000);
  245. if (mmc_reset_for_cmdq(host)) {
  246. pr_notice("[CQ] reinit fail\n");
  247. BUG_ON(1);
  248. }
  249. mmc_clr_dat_list(host);
  250. mmc_restore_tasks(host);
  251. atomic_set(&host->cq_wait_rdy, 0);
  252. atomic_set(&host->cq_rdy_cnt, 0);
  253. }
  254. if (!host->que_mrq.cmd->error ||
  255. !host->que_mrq.cmd->retries)
  256. break;
  257. pr_err("%s: req failed (CMD%u): %d, retrying...\n",
  258. __func__,
  259. host->que_mrq.cmd->opcode,
  260. host->que_mrq.cmd->error);
  261. host->que_mrq.cmd->retries--;
  262. host->que_mrq.cmd->error = 0;
  263. };
  264. }
  265. static void mmc_prep_chk_mrq(struct mmc_host *host)
  266. {
  267. memset(&host->chk_cmd, 0, sizeof(struct mmc_command));
  268. memset(&host->chk_mrq, 0, sizeof(struct mmc_request));
  269. host->chk_cmd.opcode = MMC_SEND_STATUS;
  270. host->chk_cmd.arg = host->card->rca << 16;
  271. host->chk_cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  272. host->chk_cmd.data = NULL;
  273. host->chk_mrq.cmd = &host->chk_cmd;
  274. host->chk_mrq.done = mmc_wait_cmdq_done;
  275. host->chk_mrq.host = host;
  276. host->chk_mrq.cmd->error = 0;
  277. host->chk_mrq.cmd->mrq = &host->chk_mrq;
  278. }
  279. static void mmc_prep_areq_que(struct mmc_host *host,
  280. struct mmc_async_req *areq_que)
  281. {
  282. areq_que->mrq->done = mmc_wait_cmdq_done;
  283. areq_que->mrq->host = host;
  284. areq_que->mrq->cmd->error = 0;
  285. areq_que->mrq->cmd->mrq = areq_que->mrq;
  286. areq_que->mrq->cmd->data =
  287. areq_que->mrq->data;
  288. areq_que->mrq->data->error = 0;
  289. areq_que->mrq->data->mrq = areq_que->mrq;
  290. if (areq_que->mrq->stop) {
  291. areq_que->mrq->data->stop =
  292. areq_que->mrq->stop;
  293. areq_que->mrq->stop->error = 0;
  294. areq_que->mrq->stop->mrq = areq_que->mrq;
  295. }
  296. }
  297. /*
  298. * check status register
  299. */
  300. void mmc_do_status(struct mmc_host *host)
  301. {
  302. mmc_prep_chk_mrq(host);
  303. host->ops->request(host, &host->chk_mrq);
  304. }
  305. /*
  306. * send stop command
  307. */
  308. void mmc_do_stop(struct mmc_host *host)
  309. {
  310. memset(&host->que_cmd, 0, sizeof(struct mmc_command));
  311. memset(&host->que_mrq, 0, sizeof(struct mmc_request));
  312. host->que_cmd.opcode = MMC_STOP_TRANSMISSION;
  313. host->que_cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  314. host->que_mrq.cmd = &host->que_cmd;
  315. host->que_mrq.done = mmc_wait_cmdq_done;
  316. host->que_mrq.host = host;
  317. host->que_mrq.cmd->retries = 3;
  318. host->que_mrq.cmd->error = 0;
  319. host->que_mrq.cmd->mrq = &host->que_mrq;
  320. while (1) {
  321. host->ops->request(host, &host->que_mrq);
  322. if (!host->que_mrq.cmd->error ||
  323. !host->que_mrq.cmd->retries)
  324. break;
  325. pr_err("%s: req failed (CMD%u): %d, retrying...\n",
  326. __func__,
  327. host->que_mrq.cmd->opcode,
  328. host->que_mrq.cmd->error);
  329. host->que_mrq.cmd->retries--;
  330. host->que_mrq.cmd->error = 0;
  331. };
  332. }
  333. static int mmc_wait_tran(struct mmc_host *host)
  334. {
  335. u32 status;
  336. int err;
  337. unsigned long timeout;
  338. timeout = jiffies + msecs_to_jiffies(10 * 1000);
  339. do {
  340. err = mmc_blk_status_check(host->card, &status);
  341. if (err) {
  342. pr_notice("[CQ] check card status error = %d\n", err);
  343. return 1;
  344. }
  345. if ((R1_CURRENT_STATE(status) == R1_STATE_DATA) ||
  346. (R1_CURRENT_STATE(status) == R1_STATE_RCV))
  347. mmc_do_stop(host);
  348. if (time_after(jiffies, timeout)) {
  349. pr_err("%s: Card stuck in %d state! %s\n",
  350. mmc_hostname(host),
  351. R1_CURRENT_STATE(status), __func__);
  352. return 1;
  353. }
  354. } while (R1_CURRENT_STATE(status) != R1_STATE_TRAN);
  355. return 0;
  356. }
  357. /*
  358. * check write
  359. */
  360. static int mmc_check_write(struct mmc_host *host, struct mmc_request *mrq)
  361. {
  362. int ret = 0;
  363. u32 status = 0;
  364. struct mmc_queue_req *mq_rq;
  365. struct mmc_async_req *areq_active;
  366. if (mrq->cmd->opcode == MMC_EXECUTE_WRITE_TASK) {
  367. ret = mmc_blk_status_check(host->card, &status);
  368. if ((status & R1_WP_VIOLATION) || host->wp_error ||
  369. R1_CURRENT_STATE(status) != R1_STATE_TRAN) {
  370. mrq->data->error = -EROFS;
  371. areq_active =
  372. host->areq_que[(mrq->cmd->arg >> 16) & 0x1f];
  373. mq_rq = container_of(areq_active, struct mmc_queue_req,
  374. areq);
  375. pr_notice(
  376. "[%s]: data error = %d, status=0x%x, line:%d, block addr:0x%x\n",
  377. __func__, mrq->data->error, status,
  378. __LINE__, mq_rq->brq.que.arg);
  379. mmc_wait_tran(host);
  380. host->wp_error = 0;
  381. }
  382. mrq->data->error = 0;
  383. atomic_set(&host->cq_w, false);
  384. }
  385. return ret;
  386. }
  387. /* Sleep when polling cmd13' for over 1ms */
  388. #define CMD13_TMO_NS (1000 * 1000)
  389. int mmc_run_queue_thread(void *data)
  390. {
  391. struct mmc_host *host = data;
  392. struct mmc_request *cmd_mrq = NULL;
  393. struct mmc_request *dat_mrq = NULL;
  394. struct mmc_request *done_mrq = NULL;
  395. unsigned int task_id, areq_cnt_chk, tmo;
  396. bool is_done = false;
  397. int err;
  398. u64 chk_time = 0;
  399. struct sched_param scheduler_params = {0};
  400. /* Set as RT priority */
  401. scheduler_params.sched_priority = 1;
  402. sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
  403. pr_notice("[CQ] start cmdq thread\n");
  404. mt_bio_queue_alloc(current, NULL);
  405. mtk_iobst_register_tid(current->pid);
  406. while (1) {
  407. set_current_state(TASK_RUNNING);
  408. mt_biolog_cmdq_check();
  409. /* End request stage 1/2 */
  410. if (atomic_read(&host->cq_rw)
  411. || (atomic_read(&host->areq_cnt) <= 1)) {
  412. if (host->done_mrq) {
  413. done_mrq = host->done_mrq;
  414. host->done_mrq = NULL;
  415. }
  416. }
  417. if (done_mrq) {
  418. if (done_mrq->data->error || done_mrq->cmd->error) {
  419. mmc_wait_tran(host);
  420. mmc_discard_cmdq(host);
  421. mmc_wait_tran(host);
  422. mmc_clr_dat_list(host);
  423. atomic_set(&host->cq_rdy_cnt, 0);
  424. if (host->ops->execute_tuning) {
  425. err = host->ops->execute_tuning(host,
  426. MMC_SEND_TUNING_BLOCK_HS200);
  427. if (err && mmc_reset_for_cmdq(host)) {
  428. pr_notice("[CQ] reinit fail\n");
  429. BUG_ON(1);
  430. } else
  431. pr_notice("[CQ] tuning pass\n");
  432. }
  433. host->cur_rw_task = CQ_TASK_IDLE;
  434. task_id = (done_mrq->cmd->arg >> 16) & 0x1f;
  435. host->ops->request(host,
  436. host->areq_que[task_id]->mrq_que);
  437. atomic_set(&host->cq_wait_rdy, 1);
  438. done_mrq = NULL;
  439. }
  440. atomic_set(&host->cq_rw, false);
  441. if (done_mrq && !done_mrq->data->error
  442. && !done_mrq->cmd->error) {
  443. task_id = (done_mrq->cmd->arg >> 16) & 0x1f;
  444. mt_biolog_cmdq_dma_end(task_id);
  445. mmc_check_write(host, done_mrq);
  446. host->cur_rw_task = CQ_TASK_IDLE;
  447. is_done = true;
  448. mmc_complete_mqr_crypto(host);
  449. if (atomic_read(&host->cq_tuning_now) == 1) {
  450. mmc_restore_tasks(host);
  451. atomic_set(&host->cq_tuning_now, 0);
  452. }
  453. }
  454. }
  455. /* Send Command 46/47 (DMA) */
  456. if (!atomic_read(&host->cq_rw)) {
  457. spin_lock_irq(&host->dat_que_lock);
  458. dat_mrq = mmc_get_dat_que(host);
  459. spin_unlock_irq(&host->dat_que_lock);
  460. if (dat_mrq) {
  461. WARN_ON(
  462. dat_mrq->cmd->opcode != MMC_EXECUTE_WRITE_TASK
  463. && dat_mrq->cmd->opcode != MMC_EXECUTE_READ_TASK);
  464. if (dat_mrq->cmd->opcode
  465. == MMC_EXECUTE_WRITE_TASK)
  466. atomic_set(&host->cq_w, true);
  467. atomic_set(&host->cq_rw, true);
  468. task_id = ((dat_mrq->cmd->arg >> 16) & 0x1f);
  469. host->cur_rw_task = task_id;
  470. err = mmc_swcq_prepare_mqr_crypto(host,
  471. dat_mrq);
  472. if (err) {
  473. pr_info("eMMC crypto fail %d\n", err);
  474. WARN_ON(1);
  475. }
  476. host->ops->request(host, dat_mrq);
  477. mt_biolog_cmdq_dma_start(task_id);
  478. atomic_dec(&host->cq_rdy_cnt);
  479. dat_mrq = NULL;
  480. }
  481. }
  482. /* End request stage 2/2 */
  483. if (is_done) {
  484. task_id = (done_mrq->cmd->arg >> 16) & 0x1f;
  485. mt_biolog_cmdq_isdone_start(task_id,
  486. host->areq_que[task_id]->mrq_que);
  487. err = done_mrq->areq->err_check(host->card,
  488. done_mrq->areq);
  489. mmc_post_req(host, done_mrq, 0);
  490. mt_biolog_cmdq_isdone_end(task_id);
  491. mt_biolog_cmdq_check();
  492. mmc_blk_end_queued_req(host, done_mrq->areq, task_id,
  493. err);
  494. done_mrq = NULL;
  495. is_done = false;
  496. }
  497. /* Send Command 44/45 */
  498. if (atomic_read(&host->cq_tuning_now) == 0) {
  499. spin_lock_irq(&host->cmd_que_lock);
  500. cmd_mrq = mmc_get_cmd_que(host);
  501. spin_unlock_irq(&host->cmd_que_lock);
  502. while (cmd_mrq) {
  503. task_id = ((cmd_mrq->sbc->arg >> 16) & 0x1f);
  504. mt_biolog_cmdq_queue_task(task_id, cmd_mrq);
  505. if (host->task_id_index & (1 << task_id)) {
  506. pr_err(
  507. "[%s] BUG!!! task_id %d used, task_id_index 0x%08lx, areq_cnt = %d, cq_wait_rdy = %d\n",
  508. __func__, task_id, host->task_id_index,
  509. atomic_read(&host->areq_cnt),
  510. atomic_read(&host->cq_wait_rdy));
  511. WARN_ON(1);
  512. }
  513. set_bit(task_id, &host->task_id_index);
  514. host->ops->request(host, cmd_mrq);
  515. /* add for emmc reset when error happen */
  516. if ((cmd_mrq->sbc && cmd_mrq->sbc->error)
  517. || cmd_mrq->cmd->error) {
  518. /* wait data irq handle done otherwise timing issue happen*/
  519. msleep(2000);
  520. if (mmc_reset_for_cmdq(host)) {
  521. pr_notice("[CQ] reinit fail\n");
  522. BUG_ON(1);
  523. }
  524. mmc_clr_dat_list(host);
  525. mmc_restore_tasks(host);
  526. atomic_set(&host->cq_wait_rdy, 0);
  527. atomic_set(&host->cq_rdy_cnt, 0);
  528. } else
  529. atomic_inc(&host->cq_wait_rdy);
  530. spin_lock_irq(&host->cmd_que_lock);
  531. cmd_mrq = mmc_get_cmd_que(host);
  532. spin_unlock_irq(&host->cmd_que_lock);
  533. }
  534. }
  535. if (atomic_read(&host->cq_rw)) {
  536. /* wait for event to wakeup */
  537. /* wake up when new request arrived and dma done */
  538. areq_cnt_chk = atomic_read(&host->areq_cnt);
  539. tmo = wait_event_interruptible_timeout(host->cmdq_que,
  540. host->done_mrq ||
  541. (atomic_read(&host->areq_cnt) > areq_cnt_chk),
  542. 10 * HZ);
  543. if (!tmo) {
  544. pr_info("%s:tmo,mrq(%p),chk(%d),cnt(%d)\n",
  545. __func__,
  546. host->done_mrq,
  547. areq_cnt_chk,
  548. atomic_read(&host->areq_cnt));
  549. pr_info("%s:tmo,rw(%d),wait(%d),rdy(%d)\n",
  550. __func__,
  551. atomic_read(&host->cq_rw),
  552. atomic_read(&host->cq_wait_rdy),
  553. atomic_read(&host->cq_rdy_cnt));
  554. }
  555. /* DMA time should not count in polling time */
  556. chk_time = 0;
  557. }
  558. /* Send Command 13' */
  559. if (atomic_read(&host->cq_wait_rdy) > 0
  560. && atomic_read(&host->cq_rdy_cnt) == 0) {
  561. if (!chk_time)
  562. /* set check time */
  563. chk_time = sched_clock();
  564. /* send cmd13' */
  565. mmc_do_check(host);
  566. if (atomic_read(&host->cq_rdy_cnt))
  567. /* clear when got ready task */
  568. chk_time = 0;
  569. else if (sched_clock() - chk_time > CMD13_TMO_NS)
  570. /* sleep when TMO */
  571. usleep_range(2000, 5000);
  572. }
  573. /* Sleep when nothing to do */
  574. mt_biolog_cmdq_check();
  575. set_current_state(TASK_INTERRUPTIBLE);
  576. if (atomic_read(&host->areq_cnt) == 0)
  577. schedule();
  578. set_current_state(TASK_RUNNING);
  579. if (kthread_should_stop())
  580. break;
  581. }
  582. mt_bio_queue_free(current);
  583. return 0;
  584. }
  585. unsigned long not_ready_time;
  586. void mmc_wait_cmdq_done(struct mmc_request *mrq)
  587. {
  588. struct mmc_host *host = mrq->host;
  589. struct mmc_command *cmd = mrq->cmd;
  590. int done = 0, task_id;
  591. if (cmd->opcode == MMC_SEND_STATUS ||
  592. cmd->opcode == MMC_STOP_TRANSMISSION ||
  593. cmd->opcode == MMC_CMDQ_TASK_MGMT) {
  594. /* do nothing */
  595. } else
  596. mmc_dequeue_queue(host, mrq);
  597. /* error - request done */
  598. if (cmd->error) {
  599. pr_notice("%s: cmd%d arg:%x error:%d\n",
  600. mmc_hostname(host),
  601. cmd->opcode, cmd->arg,
  602. cmd->error);
  603. if ((cmd->opcode == MMC_EXECUTE_READ_TASK) ||
  604. (cmd->opcode == MMC_EXECUTE_WRITE_TASK)) {
  605. atomic_set(&host->cq_tuning_now, 1);
  606. goto clear_end;
  607. }
  608. goto request_end;
  609. }
  610. /* data error */
  611. if (mrq->data && mrq->data->error) {
  612. pr_notice("%s: cmd%d arg:%x data error:%d\n",
  613. mmc_hostname(host),
  614. cmd->opcode, cmd->arg,
  615. mrq->data->error);
  616. atomic_set(&host->cq_tuning_now, 1);
  617. goto clear_end;
  618. }
  619. /* check wp violation */
  620. if ((cmd->opcode == MMC_QUE_TASK_PARAMS) ||
  621. (cmd->opcode == MMC_QUE_TASK_ADDR)) {
  622. if (atomic_read(&host->cq_w)) {
  623. if (cmd->resp[0] & R1_WP_VIOLATION)
  624. host->wp_error = 1;
  625. }
  626. }
  627. /* cmd13' - check queue ready & enqueue 46/47 */
  628. if ((cmd->opcode == MMC_SEND_STATUS) && (cmd->arg & (1 << 15))) {
  629. int i = 0;
  630. unsigned int resp = cmd->resp[0];
  631. if (resp == 0) {
  632. /* Workaround for ALPS03808823: if task not ready over 30s, reinit emmc */
  633. if (!not_ready_time)
  634. not_ready_time = jiffies;
  635. else if (time_after(jiffies, not_ready_time
  636. + msecs_to_jiffies(30 * 1000))) {
  637. pr_notice("mmc0: error: task not ready over 30s\n");
  638. msleep(2000);
  639. if (mmc_reset_for_cmdq(host)) {
  640. pr_notice("[CQ] reinit fail\n");
  641. BUG_ON(1);
  642. }
  643. mmc_clr_dat_list(host);
  644. mmc_restore_tasks(host);
  645. atomic_set(&host->cq_wait_rdy, 0);
  646. atomic_set(&host->cq_rdy_cnt, 0);
  647. not_ready_time = 0;
  648. aee_kernel_warning("mmc",
  649. "task not ready over 30s");
  650. }
  651. goto request_end;
  652. }
  653. not_ready_time = 0;
  654. do {
  655. if ((resp & 1) && (!host->data_mrq_queued[i])) {
  656. if (host->cur_rw_task == i) {
  657. resp >>= 1;
  658. i++;
  659. continue;
  660. }
  661. if (!host->areq_que[i]) {
  662. pr_notice("%s: task %d not exist!,QSR:%x\n",
  663. mmc_hostname(host), i, cmd->resp[0]);
  664. pr_notice("%s: task_idx:%08lx\n",
  665. mmc_hostname(host),
  666. host->task_id_index);
  667. pr_notice("%s: cnt:%d,wait:%d,rdy:%d\n",
  668. mmc_hostname(host),
  669. atomic_read(&host->areq_cnt),
  670. atomic_read(&host->cq_wait_rdy),
  671. atomic_read(&host->cq_rdy_cnt));
  672. /* reset eMMC flow */
  673. cmd->error = (unsigned int)-ETIMEDOUT;
  674. cmd->retries = 0;
  675. goto request_end;
  676. }
  677. atomic_dec(&host->cq_wait_rdy);
  678. atomic_inc(&host->cq_rdy_cnt);
  679. mmc_prep_areq_que(host, host->areq_que[i]);
  680. mmc_enqueue_queue(host, host->areq_que[i]->mrq);
  681. host->data_mrq_queued[i] = true;
  682. }
  683. resp >>= 1;
  684. i++;
  685. } while (resp && (i < host->card->ext_csd.cmdq_depth));
  686. }
  687. /* cmd46 - request done */
  688. if (cmd->opcode == MMC_EXECUTE_READ_TASK
  689. || cmd->opcode == MMC_EXECUTE_WRITE_TASK)
  690. goto clear_end;
  691. goto request_end;
  692. clear_end:
  693. task_id = ((cmd->arg >> 16) & 0x1f);
  694. clear_bit(task_id, &host->task_id_index);
  695. host->data_mrq_queued[task_id] = false;
  696. done = 1;
  697. request_end:
  698. /* request done when next data transfer */
  699. if (done) {
  700. WARN_ON(cmd->opcode != 46 && cmd->opcode != 47);
  701. WARN_ON(host->done_mrq);
  702. host->done_mrq = mrq;
  703. /*
  704. * Need to wake up cmdq thread, after done rw.
  705. */
  706. wake_up_interruptible(&host->cmdq_que);
  707. }
  708. }
  709. static void mmc_wait_for_cmdq_done(struct mmc_host *host)
  710. {
  711. while (atomic_read(&host->areq_cnt) != 0) {
  712. wait_event_interruptible(host->cmp_que,
  713. (atomic_read(&host->areq_cnt) == 0));
  714. }
  715. }
  716. void mmc_wait_cmdq_empty(struct mmc_host *host)
  717. {
  718. mmc_wait_for_cmdq_done(host);
  719. }
  720. #endif
  721. #if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
  722. int mmc_blk_cmdq_switch(struct mmc_card *card, int enable)
  723. {
  724. int ret;
  725. bool cmdq_mode = !!mmc_card_cmdq(card);
  726. struct mmc_host *host = card->host;
  727. if (!card->ext_csd.cmdq_support ||
  728. (cmdq_mode == !!enable))
  729. return 0;
  730. #ifdef CONFIG_MTK_EMMC_HW_CQ
  731. if (!enable &&
  732. (card->host->caps2 & MMC_CAP2_CQE)) {
  733. /* host support cqe */
  734. ret = mmc_cmdq_halt_on_empty_queue(host);
  735. if (ret) {
  736. pr_notice("%s: halt: failed: %d\n",
  737. mmc_hostname(host), ret);
  738. goto out;
  739. }
  740. /* disable for xf data */
  741. host->cmdq_ops->disable(host, true);
  742. }
  743. #endif
  744. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  745. if (!enable &&
  746. !(card->host->caps2 & MMC_CAP2_CQE)) {
  747. mmc_wait_cmdq_empty(card->host);
  748. }
  749. #endif
  750. ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  751. EXT_CSD_CMDQ_MODE_EN, enable,
  752. card->ext_csd.generic_cmd6_time);
  753. if (ret) {
  754. pr_notice("%s: cmdq %s error %d\n",
  755. mmc_hostname(host),
  756. enable ? "on" : "off",
  757. ret);
  758. goto out;
  759. }
  760. card->ext_csd.cmdq_en = enable;
  761. pr_notice("%s: device cq %s\n",
  762. mmc_hostname(host),
  763. card->ext_csd.cmdq_en ? "on":"off");
  764. if (enable) {
  765. mmc_card_set_cmdq(card);
  766. #ifdef CONFIG_MTK_EMMC_HW_CQ
  767. if (card->host->caps2 & MMC_CAP2_CQE) {
  768. /* enable for cqhci */
  769. host->cmdq_ops->enable(host);
  770. /* un-halt when enable */
  771. if (mmc_host_halt(host) &&
  772. mmc_cmdq_halt(host, false))
  773. pr_notice("%s: %s: cmdq unhalt failed\n",
  774. mmc_hostname(host), __func__);
  775. }
  776. #endif
  777. } else
  778. mmc_card_clr_cmdq(card);
  779. out:
  780. return ret;
  781. }
  782. EXPORT_SYMBOL(mmc_blk_cmdq_switch);
  783. #endif
  784. static int mmc_schedule_delayed_work(struct delayed_work *work,
  785. unsigned long delay)
  786. {
  787. /*
  788. * We use the system_freezable_wq, because of two reasons.
  789. * First, it allows several works (not the same work item) to be
  790. * executed simultaneously. Second, the queue becomes frozen when
  791. * userspace becomes frozen during system PM.
  792. */
  793. return queue_delayed_work(system_freezable_wq, work, delay);
  794. }
  795. #ifdef CONFIG_FAIL_MMC_REQUEST
  796. /*
  797. * Internal function. Inject random data errors.
  798. * If mmc_data is NULL no errors are injected.
  799. */
  800. static void mmc_should_fail_request(struct mmc_host *host,
  801. struct mmc_request *mrq)
  802. {
  803. struct mmc_command *cmd = mrq->cmd;
  804. struct mmc_data *data = mrq->data;
  805. static const int data_errors[] = {
  806. -ETIMEDOUT,
  807. -EILSEQ,
  808. -EIO,
  809. };
  810. if (!data)
  811. return;
  812. if (cmd->error || data->error ||
  813. !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
  814. return;
  815. data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
  816. data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
  817. }
  818. #else /* CONFIG_FAIL_MMC_REQUEST */
  819. static inline void mmc_should_fail_request(struct mmc_host *host,
  820. struct mmc_request *mrq)
  821. {
  822. }
  823. #endif /* CONFIG_FAIL_MMC_REQUEST */
  824. static inline void mmc_complete_cmd(struct mmc_request *mrq)
  825. {
  826. if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
  827. complete_all(&mrq->cmd_completion);
  828. }
  829. void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
  830. {
  831. if (!mrq->cap_cmd_during_tfr)
  832. return;
  833. mmc_complete_cmd(mrq);
  834. pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
  835. mmc_hostname(host), mrq->cmd->opcode);
  836. }
  837. EXPORT_SYMBOL(mmc_command_done);
  838. /**
  839. * mmc_request_done - finish processing an MMC request
  840. * @host: MMC host which completed request
  841. * @mrq: MMC request which request
  842. *
  843. * MMC drivers should call this function when they have completed
  844. * their processing of a request.
  845. */
  846. void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
  847. {
  848. struct mmc_command *cmd = mrq->cmd;
  849. int err = cmd->error;
  850. /* Flag re-tuning needed on CRC errors */
  851. if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
  852. cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
  853. (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
  854. (mrq->data && mrq->data->error == -EILSEQ) ||
  855. (mrq->stop && mrq->stop->error == -EILSEQ)))
  856. mmc_retune_needed(host);
  857. if (err && cmd->retries && mmc_host_is_spi(host)) {
  858. if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
  859. cmd->retries = 0;
  860. }
  861. if (host->ongoing_mrq == mrq)
  862. host->ongoing_mrq = NULL;
  863. mmc_complete_cmd(mrq);
  864. trace_mmc_request_done(host, mrq);
  865. /*
  866. * We list various conditions for the command to be considered
  867. * properly done:
  868. *
  869. * - There was no error, OK fine then
  870. * - We are not doing some kind of retry
  871. * - The card was removed (...so just complete everything no matter
  872. * if there are errors or retries)
  873. */
  874. if (!err || !cmd->retries || mmc_card_removed(host->card)) {
  875. mmc_should_fail_request(host, mrq);
  876. if (!host->ongoing_mrq)
  877. led_trigger_event(host->led, LED_OFF);
  878. if (mrq->sbc) {
  879. pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
  880. mmc_hostname(host), mrq->sbc->opcode,
  881. mrq->sbc->error,
  882. mrq->sbc->resp[0], mrq->sbc->resp[1],
  883. mrq->sbc->resp[2], mrq->sbc->resp[3]);
  884. }
  885. pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
  886. mmc_hostname(host), cmd->opcode, err,
  887. cmd->resp[0], cmd->resp[1],
  888. cmd->resp[2], cmd->resp[3]);
  889. if (mrq->data) {
  890. pr_debug("%s: %d bytes transferred: %d\n",
  891. mmc_hostname(host),
  892. mrq->data->bytes_xfered, mrq->data->error);
  893. }
  894. if (mrq->stop) {
  895. pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
  896. mmc_hostname(host), mrq->stop->opcode,
  897. mrq->stop->error,
  898. mrq->stop->resp[0], mrq->stop->resp[1],
  899. mrq->stop->resp[2], mrq->stop->resp[3]);
  900. }
  901. }
  902. /*
  903. * Request starter must handle retries - see
  904. * mmc_wait_for_req_done().
  905. */
  906. if (mrq->done)
  907. mrq->done(mrq);
  908. }
  909. EXPORT_SYMBOL(mmc_request_done);
  910. static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  911. {
  912. int err;
  913. /* Assumes host controller has been runtime resumed by mmc_claim_host */
  914. err = mmc_retune(host);
  915. if (err) {
  916. mrq->cmd->error = err;
  917. mmc_request_done(host, mrq);
  918. return;
  919. }
  920. /*
  921. * For sdio rw commands we must wait for card busy otherwise some
  922. * sdio devices won't work properly.
  923. * And bypass I/O abort, reset and bus suspend operations.
  924. */
  925. if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
  926. host->ops->card_busy) {
  927. int tries = 500; /* Wait aprox 500ms at maximum */
  928. while (host->ops->card_busy(host) && --tries)
  929. mmc_delay(1);
  930. if (tries == 0) {
  931. mrq->cmd->error = -EBUSY;
  932. mmc_request_done(host, mrq);
  933. return;
  934. }
  935. }
  936. if (mrq->cap_cmd_during_tfr) {
  937. host->ongoing_mrq = mrq;
  938. /*
  939. * Retry path could come through here without having waiting on
  940. * cmd_completion, so ensure it is reinitialised.
  941. */
  942. reinit_completion(&mrq->cmd_completion);
  943. }
  944. trace_mmc_request_start(host, mrq);
  945. if (host->cqe_on)
  946. host->cqe_ops->cqe_off(host);
  947. host->ops->request(host, mrq);
  948. }
  949. static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
  950. {
  951. if (mrq->sbc) {
  952. pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
  953. mmc_hostname(host), mrq->sbc->opcode,
  954. mrq->sbc->arg, mrq->sbc->flags);
  955. }
  956. if (mrq->cmd) {
  957. pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
  958. mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
  959. mrq->cmd->flags);
  960. }
  961. if (mrq->data) {
  962. pr_debug("%s: blksz %d blocks %d flags %08x "
  963. "tsac %d ms nsac %d\n",
  964. mmc_hostname(host), mrq->data->blksz,
  965. mrq->data->blocks, mrq->data->flags,
  966. mrq->data->timeout_ns / 1000000,
  967. mrq->data->timeout_clks);
  968. }
  969. if (mrq->stop) {
  970. pr_debug("%s: CMD%u arg %08x flags %08x\n",
  971. mmc_hostname(host), mrq->stop->opcode,
  972. mrq->stop->arg, mrq->stop->flags);
  973. }
  974. }
  975. static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
  976. {
  977. unsigned int i, sz = 0;
  978. struct scatterlist *sg;
  979. if (mrq->cmd) {
  980. mrq->cmd->error = 0;
  981. mrq->cmd->mrq = mrq;
  982. mrq->cmd->data = mrq->data;
  983. }
  984. if (mrq->sbc) {
  985. mrq->sbc->error = 0;
  986. mrq->sbc->mrq = mrq;
  987. }
  988. if (mrq->data) {
  989. if (mrq->data->blksz > host->max_blk_size ||
  990. mrq->data->blocks > host->max_blk_count ||
  991. mrq->data->blocks * mrq->data->blksz > host->max_req_size)
  992. return -EINVAL;
  993. for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
  994. sz += sg->length;
  995. if (sz != mrq->data->blocks * mrq->data->blksz)
  996. return -EINVAL;
  997. mrq->data->error = 0;
  998. mrq->data->mrq = mrq;
  999. if (mrq->stop) {
  1000. mrq->data->stop = mrq->stop;
  1001. mrq->stop->error = 0;
  1002. mrq->stop->mrq = mrq;
  1003. }
  1004. }
  1005. return 0;
  1006. }
  1007. static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  1008. {
  1009. int err;
  1010. mmc_retune_hold(host);
  1011. if (mmc_card_removed(host->card))
  1012. return -ENOMEDIUM;
  1013. mmc_mrq_pr_debug(host, mrq);
  1014. WARN_ON(!host->claimed);
  1015. err = mmc_mrq_prep(host, mrq);
  1016. if (err)
  1017. return err;
  1018. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  1019. if (host->card
  1020. && mmc_card_cmdq(host->card)
  1021. && mrq->done == mmc_wait_cmdq_done) {
  1022. mmc_enqueue_queue(host, mrq);
  1023. wake_up_process(host->cmdq_thread);
  1024. led_trigger_event(host->led, LED_FULL);
  1025. } else {
  1026. #endif
  1027. led_trigger_event(host->led, LED_FULL);
  1028. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  1029. if (host->card
  1030. && host->card->ext_csd.cmdq_support
  1031. && mrq->cmd->opcode != MMC_SEND_STATUS)
  1032. /* add for emmc reset when error happen */
  1033. /* cannot wait cmdq empty for init requests
  1034. * when emmc resetting when cmdq
  1035. */
  1036. if (strncmp(current->comm, "exe_cq", 6)
  1037. || !emmc_resetting_when_cmdq)
  1038. mmc_wait_cmdq_empty(host);
  1039. #endif
  1040. __mmc_start_request(host, mrq);
  1041. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  1042. }
  1043. #endif
  1044. return 0;
  1045. }
  1046. #ifdef CONFIG_MTK_EMMC_HW_CQ
  1047. static void mmc_start_cmdq_request(struct mmc_host *host,
  1048. struct mmc_request *mrq)
  1049. {
  1050. if (mrq->data) {
  1051. pr_debug("%s: blksz %d blocks %d flags %08x tsac %lu ms nsac %d\n",
  1052. mmc_hostname(host), mrq->data->blksz,
  1053. mrq->data->blocks, mrq->data->flags,
  1054. mrq->data->timeout_ns / NSEC_PER_MSEC,
  1055. mrq->data->timeout_clks);
  1056. WARN_ON(mrq->data->blksz > host->max_blk_size); /*bug*/
  1057. WARN_ON(mrq->data->blocks > host->max_blk_count); /*bug*/
  1058. WARN_ON(mrq->data->blocks * mrq->data->blksz >
  1059. host->max_req_size); /*bug*/
  1060. mrq->data->error = 0;
  1061. mrq->data->mrq = mrq;
  1062. }
  1063. if (mrq->cmd) {
  1064. mrq->cmd->error = 0;
  1065. mrq->cmd->mrq = mrq;
  1066. }
  1067. if (likely(host->cmdq_ops->request))
  1068. host->cmdq_ops->request(host, mrq);
  1069. else
  1070. pr_notice("%s: %s: issue request failed\n", mmc_hostname(host),
  1071. __func__);
  1072. }
  1073. #endif
  1074. /*
  1075. * mmc_wait_data_done() - done callback for data request
  1076. * @mrq: done data request
  1077. *
  1078. * Wakes up mmc context, passed as a callback to host controller driver
  1079. */
  1080. static void mmc_wait_data_done(struct mmc_request *mrq)
  1081. {
  1082. struct mmc_context_info *context_info = &mrq->host->context_info;
  1083. context_info->is_done_rcv = true;
  1084. wake_up_interruptible(&context_info->wait);
  1085. }
  1086. static void mmc_wait_done(struct mmc_request *mrq)
  1087. {
  1088. complete(&mrq->completion);
  1089. }
  1090. static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
  1091. {
  1092. struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
  1093. /*
  1094. * If there is an ongoing transfer, wait for the command line to become
  1095. * available.
  1096. */
  1097. if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
  1098. wait_for_completion(&ongoing_mrq->cmd_completion);
  1099. }
  1100. /*
  1101. *__mmc_start_data_req() - starts data request
  1102. * @host: MMC host to start the request
  1103. * @mrq: data request to start
  1104. *
  1105. * Sets the done callback to be called when request is completed by the card.
  1106. * Starts data mmc request execution
  1107. * If an ongoing transfer is already in progress, wait for the command line
  1108. * to become available before sending another command.
  1109. */
  1110. static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
  1111. {
  1112. int err;
  1113. mmc_wait_ongoing_tfr_cmd(host);
  1114. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  1115. if (host->card && mmc_card_cmdq(host->card))
  1116. mrq->done = mmc_wait_cmdq_done;
  1117. else
  1118. #endif
  1119. mrq->done = mmc_wait_data_done;
  1120. mrq->host = host;
  1121. init_completion(&mrq->cmd_completion);
  1122. err = mmc_start_request(host, mrq);
  1123. if (err) {
  1124. mrq->cmd->error = err;
  1125. mmc_complete_cmd(mrq);
  1126. mmc_wait_data_done(mrq);
  1127. }
  1128. return err;
  1129. }
  1130. static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
  1131. {
  1132. int err;
  1133. mmc_wait_ongoing_tfr_cmd(host);
  1134. init_completion(&mrq->completion);
  1135. mrq->done = mmc_wait_done;
  1136. init_completion(&mrq->cmd_completion);
  1137. err = mmc_start_request(host, mrq);
  1138. if (err) {
  1139. mrq->cmd->error = err;
  1140. mmc_complete_cmd(mrq);
  1141. complete(&mrq->completion);
  1142. }
  1143. return err;
  1144. }
  1145. void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
  1146. {
  1147. struct mmc_command *cmd;
  1148. while (1) {
  1149. wait_for_completion(&mrq->completion);
  1150. cmd = mrq->cmd;
  1151. /*
  1152. * If host has timed out waiting for the sanitize
  1153. * to complete, card might be still in programming state
  1154. * so let's try to bring the card out of programming
  1155. * state.
  1156. */
  1157. if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
  1158. if (!mmc_interrupt_hpi(host->card)) {
  1159. pr_warn("%s: %s: Interrupted sanitize\n",
  1160. mmc_hostname(host), __func__);
  1161. cmd->error = 0;
  1162. break;
  1163. } else {
  1164. pr_err("%s: %s: Failed to interrupt sanitize\n",
  1165. mmc_hostname(host), __func__);
  1166. }
  1167. }
  1168. if (!cmd->error || !cmd->retries ||
  1169. mmc_card_removed(host->card))
  1170. break;
  1171. mmc_retune_recheck(host);
  1172. pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
  1173. mmc_hostname(host), cmd->opcode, cmd->error);
  1174. cmd->retries--;
  1175. cmd->error = 0;
  1176. __mmc_start_request(host, mrq);
  1177. }
  1178. mmc_retune_release(host);
  1179. }
  1180. EXPORT_SYMBOL(mmc_wait_for_req_done);
  1181. /**
  1182. * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
  1183. * @host: MMC host
  1184. * @mrq: MMC request
  1185. *
  1186. * mmc_is_req_done() is used with requests that have
  1187. * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
  1188. * starting a request and before waiting for it to complete. That is,
  1189. * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
  1190. * and before mmc_wait_for_req_done(). If it is called at other times the
  1191. * result is not meaningful.
  1192. */
  1193. bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
  1194. {
  1195. if (host->areq)
  1196. return host->context_info.is_done_rcv;
  1197. else
  1198. return completion_done(&mrq->completion);
  1199. }
  1200. EXPORT_SYMBOL(mmc_is_req_done);
  1201. /**
  1202. * mmc_pre_req - Prepare for a new request
  1203. * @host: MMC host to prepare command
  1204. * @mrq: MMC request to prepare for
  1205. *
  1206. * mmc_pre_req() is called in prior to mmc_start_req() to let
  1207. * host prepare for the new request. Preparation of a request may be
  1208. * performed while another request is running on the host.
  1209. */
  1210. static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
  1211. {
  1212. if (host->ops->pre_req)
  1213. host->ops->pre_req(host, mrq);
  1214. }
  1215. /**
  1216. * mmc_post_req - Post process a completed request
  1217. * @host: MMC host to post process command
  1218. * @mrq: MMC request to post process for
  1219. * @err: Error, if non zero, clean up any resources made in pre_req
  1220. *
  1221. * Let the host post process a completed request. Post processing of
  1222. * a request may be performed while another reuqest is running.
  1223. */
  1224. static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
  1225. int err)
  1226. {
  1227. if (host->ops->post_req)
  1228. host->ops->post_req(host, mrq, err);
  1229. }
  1230. #ifdef CONFIG_MTK_EMMC_HW_CQ
  1231. /**
  1232. * mmc_cmdq_discard_card_queue - discard the task[s] in the device
  1233. * @host: host instance
  1234. * @tasks: mask of tasks to be knocked off
  1235. * 0: remove all queued tasks
  1236. */
  1237. int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks)
  1238. {
  1239. pr_notice("%s: discard tasks = %d (0: all)\n",
  1240. mmc_hostname(host),
  1241. tasks);
  1242. return mmc_discard_queue(host, tasks);
  1243. }
  1244. EXPORT_SYMBOL(mmc_cmdq_discard_queue);
  1245. /**
  1246. * mmc_cmdq_post_req - post process of a completed request
  1247. * @host: host instance
  1248. * @tag: the request tag.
  1249. * @err: non-zero is error, success otherwise
  1250. */
  1251. void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err)
  1252. {
  1253. if (likely(host->cmdq_ops->post_req))
  1254. host->cmdq_ops->post_req(host, tag, err);
  1255. }
  1256. EXPORT_SYMBOL(mmc_cmdq_post_req);
  1257. /**
  1258. * mmc_cmdq_halt - halt/un-halt the command queue engine
  1259. * @host: host instance
  1260. * @halt: true - halt, un-halt otherwise
  1261. *
  1262. * Host halts the command queue engine. It should complete
  1263. * the ongoing transfer and release the bus.
  1264. * All legacy commands can be sent upon successful
  1265. * completion of this function.
  1266. * Returns 0 on success, negative otherwise
  1267. */
  1268. int mmc_cmdq_halt(struct mmc_host *host, bool halt)
  1269. {
  1270. int err = 0;
  1271. if (mmc_host_cq_disable(host)) {
  1272. pr_notice("%s: %s: CQE is already disabled\n",
  1273. mmc_hostname(host), __func__);
  1274. return 0;
  1275. }
  1276. if ((halt && mmc_host_halt(host)) ||
  1277. (!halt && !mmc_host_halt(host))) {
  1278. pr_notice("%s: %s: CQE is already %s\n", mmc_hostname(host),
  1279. __func__, halt ? "halted" : "un-halted");
  1280. return 0;
  1281. }
  1282. pr_debug("%s: %s: CQE need %s\n", mmc_hostname(host),
  1283. __func__, halt ? "halted" : "un-halted");
  1284. if (host->cmdq_ops->halt) {
  1285. err = host->cmdq_ops->halt(host, halt);
  1286. if (!err && halt)
  1287. mmc_host_set_halt(host);
  1288. else if (!err && !halt) {
  1289. mmc_host_clr_halt(host);
  1290. wake_up(&host->cmdq_ctx.wait);
  1291. }
  1292. } else
  1293. err = -EINVAL;
  1294. pr_debug("%s: %s: CQE done %s\n", mmc_hostname(host),
  1295. __func__,
  1296. mmc_host_halt(host) ? "halted" : "un-halted");
  1297. return err;
  1298. }
  1299. EXPORT_SYMBOL(mmc_cmdq_halt);
  1300. int mmc_cmdq_start_req(struct mmc_host *host, struct mmc_cmdq_req *cmdq_req)
  1301. {
  1302. struct mmc_request *mrq = &cmdq_req->mrq;
  1303. mrq->host = host;
  1304. if (mmc_card_removed(host->card)) {
  1305. mrq->cmd->error = -ENOMEDIUM;
  1306. return -ENOMEDIUM;
  1307. }
  1308. mmc_start_cmdq_request(host, mrq);
  1309. return 0;
  1310. }
  1311. EXPORT_SYMBOL(mmc_cmdq_start_req);
  1312. static void mmc_cmdq_dcmd_req_done(struct mmc_request *mrq)
  1313. {
  1314. complete(&mrq->completion);
  1315. }
  1316. int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
  1317. struct mmc_cmdq_req *cmdq_req)
  1318. {
  1319. struct mmc_request *mrq = &cmdq_req->mrq;
  1320. struct mmc_command *cmd = mrq->cmd;
  1321. int err = 0;
  1322. init_completion(&mrq->completion);
  1323. mrq->done = mmc_cmdq_dcmd_req_done;
  1324. err = mmc_cmdq_start_req(host, cmdq_req);
  1325. if (err)
  1326. return err;
  1327. mmc_cmdq_up_rwsem(host);
  1328. wait_for_completion_io(&mrq->completion);
  1329. err = mmc_cmdq_down_rwsem(host, mrq->req);
  1330. if (err || cmd->error) {
  1331. pr_notice("%s: dcmd %d failed with err %d\n",
  1332. mmc_hostname(host), cmd->opcode,
  1333. cmd->error);
  1334. err = cmd->error;
  1335. if (host->cmdq_ops->dumpstate)
  1336. host->cmdq_ops->dumpstate(host, false);
  1337. }
  1338. pr_debug("%s: dcmd %d done with err %d\n",
  1339. mmc_hostname(host), cmd->opcode,
  1340. cmd->error);
  1341. return err;
  1342. }
  1343. EXPORT_SYMBOL(mmc_cmdq_wait_for_dcmd);
  1344. int mmc_cmdq_prepare_flush(struct mmc_command *cmd)
  1345. {
  1346. return __mmc_switch_cmdq_mode(cmd, EXT_CSD_CMD_SET_NORMAL,
  1347. EXT_CSD_FLUSH_CACHE, 1,
  1348. 0, true, true);
  1349. }
  1350. EXPORT_SYMBOL(mmc_cmdq_prepare_flush);
  1351. #endif
  1352. /**
  1353. * mmc_finalize_areq() - finalize an asynchronous request
  1354. * @host: MMC host to finalize any ongoing request on
  1355. *
  1356. * Returns the status of the ongoing asynchronous request, but
  1357. * MMC_BLK_SUCCESS if no request was going on.
  1358. */
  1359. static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
  1360. {
  1361. struct mmc_context_info *context_info = &host->context_info;
  1362. enum mmc_blk_status status;
  1363. if (!host->areq)
  1364. return MMC_BLK_SUCCESS;
  1365. while (1) {
  1366. wait_event_interruptible(context_info->wait,
  1367. (context_info->is_done_rcv ||
  1368. context_info->is_new_req));
  1369. if (context_info->is_done_rcv) {
  1370. struct mmc_command *cmd;
  1371. context_info->is_done_rcv = false;
  1372. cmd = host->areq->mrq->cmd;
  1373. if (!cmd->error || !cmd->retries ||
  1374. mmc_card_removed(host->card)) {
  1375. status = host->areq->err_check(host->card,
  1376. host->areq);
  1377. break; /* return status */
  1378. } else {
  1379. mmc_retune_recheck(host);
  1380. pr_info("%s: req failed (CMD%u): %d, retrying...\n",
  1381. mmc_hostname(host),
  1382. cmd->opcode, cmd->error);
  1383. cmd->retries--;
  1384. cmd->error = 0;
  1385. __mmc_start_request(host, host->areq->mrq);
  1386. continue; /* wait for done/new event again */
  1387. }
  1388. }
  1389. return MMC_BLK_NEW_REQUEST;
  1390. }
  1391. mmc_retune_release(host);
  1392. /*
  1393. * Check BKOPS urgency for each R1 response
  1394. */
  1395. if (host->card && mmc_card_mmc(host->card) &&
  1396. ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
  1397. (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
  1398. (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
  1399. mmc_start_bkops(host->card, true);
  1400. }
  1401. return status;
  1402. }
  1403. /**
  1404. * mmc_start_areq - start an asynchronous request
  1405. * @host: MMC host to start command
  1406. * @areq: asynchronous request to start
  1407. * @ret_stat: out parameter for status
  1408. *
  1409. * Start a new MMC custom command request for a host.
  1410. * If there is on ongoing async request wait for completion
  1411. * of that request and start the new one and return.
  1412. * Does not wait for the new request to complete.
  1413. *
  1414. * Returns the completed request, NULL in case of none completed.
  1415. * Wait for the an ongoing request (previoulsy started) to complete and
  1416. * return the completed request. If there is no ongoing request, NULL
  1417. * is returned without waiting. NULL is not an error condition.
  1418. */
  1419. struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
  1420. struct mmc_async_req *areq,
  1421. enum mmc_blk_status *ret_stat)
  1422. {
  1423. enum mmc_blk_status status;
  1424. int start_err = 0;
  1425. struct mmc_async_req *previous = host->areq;
  1426. struct mmc_request *mrq;
  1427. bool cmdq_en = false;
  1428. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  1429. cmdq_en = mmc_card_cmdq(host->card);
  1430. #endif
  1431. /* Prepare a new request */
  1432. if (areq)
  1433. mmc_pre_req(host, areq->mrq);
  1434. /* Finalize previous request */
  1435. status = mmc_finalize_areq(host);
  1436. if (ret_stat)
  1437. *ret_stat = status;
  1438. /* The previous request is still going on... */
  1439. if (status == MMC_BLK_NEW_REQUEST)
  1440. return NULL;
  1441. if (host->areq)
  1442. mt_biolog_mmcqd_req_end(host->areq->mrq->data);
  1443. /* Fine so far, start the new request! */
  1444. if (status == MMC_BLK_SUCCESS && areq) {
  1445. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  1446. if (cmdq_en)
  1447. mrq = areq->mrq_que;
  1448. else
  1449. #endif
  1450. mrq = areq->mrq;
  1451. start_err =
  1452. __mmc_start_data_req(host, mrq);
  1453. if (!cmdq_en)
  1454. mt_biolog_mmcqd_req_start(host);
  1455. }
  1456. /* Postprocess the old request at this point */
  1457. if (!cmdq_en && host->areq)
  1458. mmc_post_req(host, host->areq->mrq, 0);
  1459. /* Cancel a prepared request if it was not started. */
  1460. if ((status != MMC_BLK_SUCCESS || start_err) && areq)
  1461. mmc_post_req(host, areq->mrq, -EINVAL);
  1462. if (status != MMC_BLK_SUCCESS || cmdq_en)
  1463. host->areq = NULL;
  1464. else
  1465. host->areq = areq;
  1466. return previous;
  1467. }
  1468. EXPORT_SYMBOL(mmc_start_areq);
  1469. /**
  1470. * mmc_wait_for_req - start a request and wait for completion
  1471. * @host: MMC host to start command
  1472. * @mrq: MMC request to start
  1473. *
  1474. * Start a new MMC custom command request for a host, and wait
  1475. * for the command to complete. In the case of 'cap_cmd_during_tfr'
  1476. * requests, the transfer is ongoing and the caller can issue further
  1477. * commands that do not use the data lines, and then wait by calling
  1478. * mmc_wait_for_req_done().
  1479. * Does not attempt to parse the response.
  1480. */
  1481. void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
  1482. {
  1483. __mmc_start_req(host, mrq);
  1484. if (!mrq->cap_cmd_during_tfr)
  1485. mmc_wait_for_req_done(host, mrq);
  1486. }
  1487. EXPORT_SYMBOL(mmc_wait_for_req);
  1488. /**
  1489. * mmc_wait_for_cmd - start a command and wait for completion
  1490. * @host: MMC host to start command
  1491. * @cmd: MMC command to start
  1492. * @retries: maximum number of retries
  1493. *
  1494. * Start a new MMC command for a host, and wait for the command
  1495. * to complete. Return any error that occurred while the command
  1496. * was executing. Do not attempt to parse the response.
  1497. */
  1498. int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
  1499. {
  1500. struct mmc_request mrq = {};
  1501. WARN_ON(!host->claimed);
  1502. memset(cmd->resp, 0, sizeof(cmd->resp));
  1503. cmd->retries = retries;
  1504. mrq.cmd = cmd;
  1505. cmd->data = NULL;
  1506. mmc_wait_for_req(host, &mrq);
  1507. return cmd->error;
  1508. }
  1509. EXPORT_SYMBOL(mmc_wait_for_cmd);
  1510. /**
  1511. * mmc_set_data_timeout - set the timeout for a data command
  1512. * @data: data phase for command
  1513. * @card: the MMC card associated with the data transfer
  1514. *
  1515. * Computes the data timeout parameters according to the
  1516. * correct algorithm given the card type.
  1517. */
  1518. void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
  1519. {
  1520. unsigned int mult;
  1521. /*
  1522. * SDIO cards only define an upper 1 s limit on access.
  1523. */
  1524. if (mmc_card_sdio(card)) {
  1525. data->timeout_ns = 1000000000;
  1526. data->timeout_clks = 0;
  1527. return;
  1528. }
  1529. /*
  1530. * SD cards use a 100 multiplier rather than 10
  1531. */
  1532. mult = mmc_card_sd(card) ? 100 : 10;
  1533. /*
  1534. * Scale up the multiplier (and therefore the timeout) by
  1535. * the r2w factor for writes.
  1536. */
  1537. if (data->flags & MMC_DATA_WRITE)
  1538. mult <<= card->csd.r2w_factor;
  1539. data->timeout_ns = card->csd.taac_ns * mult;
  1540. data->timeout_clks = card->csd.taac_clks * mult;
  1541. /*
  1542. * SD cards also have an upper limit on the timeout.
  1543. */
  1544. if (mmc_card_sd(card)) {
  1545. unsigned int timeout_us, limit_us;
  1546. timeout_us = data->timeout_ns / 1000;
  1547. if (card->host->ios.clock)
  1548. timeout_us += data->timeout_clks * 1000 /
  1549. (card->host->ios.clock / 1000);
  1550. if (data->flags & MMC_DATA_WRITE)
  1551. /*
  1552. * The MMC spec "It is strongly recommended
  1553. * for hosts to implement more than 500ms
  1554. * timeout value even if the card indicates
  1555. * the 250ms maximum busy length." Even the
  1556. * previous value of 300ms is known to be
  1557. * insufficient for some cards.
  1558. */
  1559. limit_us = 3000000;
  1560. else
  1561. limit_us = 100000;
  1562. /*
  1563. * SDHC cards always use these fixed values.
  1564. */
  1565. if (timeout_us > limit_us) {
  1566. data->timeout_ns = limit_us * 1000;
  1567. data->timeout_clks = 0;
  1568. }
  1569. /* assign limit value if invalid */
  1570. if (timeout_us == 0)
  1571. data->timeout_ns = limit_us * 1000;
  1572. }
  1573. /*
  1574. * Some cards require longer data read timeout than indicated in CSD.
  1575. * Address this by setting the read timeout to a "reasonably high"
  1576. * value. For the cards tested, 600ms has proven enough. If necessary,
  1577. * this value can be increased if other problematic cards require this.
  1578. */
  1579. if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
  1580. data->timeout_ns = 600000000;
  1581. data->timeout_clks = 0;
  1582. }
  1583. /*
  1584. * Some cards need very high timeouts if driven in SPI mode.
  1585. * The worst observed timeout was 900ms after writing a
  1586. * continuous stream of data until the internal logic
  1587. * overflowed.
  1588. */
  1589. if (mmc_host_is_spi(card->host)) {
  1590. if (data->flags & MMC_DATA_WRITE) {
  1591. if (data->timeout_ns < 1000000000)
  1592. data->timeout_ns = 1000000000; /* 1s */
  1593. } else {
  1594. if (data->timeout_ns < 100000000)
  1595. data->timeout_ns = 100000000; /* 100ms */
  1596. }
  1597. }
  1598. }
  1599. EXPORT_SYMBOL(mmc_set_data_timeout);
  1600. /**
  1601. * mmc_align_data_size - pads a transfer size to a more optimal value
  1602. * @card: the MMC card associated with the data transfer
  1603. * @sz: original transfer size
  1604. *
  1605. * Pads the original data size with a number of extra bytes in
  1606. * order to avoid controller bugs and/or performance hits
  1607. * (e.g. some controllers revert to PIO for certain sizes).
  1608. *
  1609. * Returns the improved size, which might be unmodified.
  1610. *
  1611. * Note that this function is only relevant when issuing a
  1612. * single scatter gather entry.
  1613. */
  1614. unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
  1615. {
  1616. /*
  1617. * FIXME: We don't have a system for the controller to tell
  1618. * the core about its problems yet, so for now we just 32-bit
  1619. * align the size.
  1620. */
  1621. sz = ((sz + 3) / 4) * 4;
  1622. return sz;
  1623. }
  1624. EXPORT_SYMBOL(mmc_align_data_size);
  1625. /**
  1626. * __mmc_claim_host - exclusively claim a host
  1627. * @host: mmc host to claim
  1628. * @abort: whether or not the operation should be aborted
  1629. *
  1630. * Claim a host for a set of operations. If @abort is non null and
  1631. * dereference a non-zero value then this will return prematurely with
  1632. * that non-zero value without acquiring the lock. Returns zero
  1633. * with the lock held otherwise.
  1634. */
  1635. int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
  1636. {
  1637. DECLARE_WAITQUEUE(wait, current);
  1638. unsigned long flags;
  1639. int stop;
  1640. bool pm = false;
  1641. might_sleep();
  1642. add_wait_queue(&host->wq, &wait);
  1643. spin_lock_irqsave(&host->lock, flags);
  1644. while (1) {
  1645. set_current_state(TASK_UNINTERRUPTIBLE);
  1646. stop = abort ? atomic_read(abort) : 0;
  1647. if (stop || !host->claimed || host->claimer == current)
  1648. break;
  1649. spin_unlock_irqrestore(&host->lock, flags);
  1650. schedule();
  1651. spin_lock_irqsave(&host->lock, flags);
  1652. }
  1653. set_current_state(TASK_RUNNING);
  1654. if (!stop) {
  1655. host->claimed = 1;
  1656. host->claimer = current;
  1657. host->claim_cnt += 1;
  1658. if (host->claim_cnt == 1)
  1659. pm = true;
  1660. } else
  1661. wake_up(&host->wq);
  1662. spin_unlock_irqrestore(&host->lock, flags);
  1663. remove_wait_queue(&host->wq, &wait);
  1664. if (pm)
  1665. pm_runtime_get_sync(mmc_dev(host));
  1666. return stop;
  1667. }
  1668. EXPORT_SYMBOL(__mmc_claim_host);
  1669. /**
  1670. * mmc_try_claim_host - try exclusively to claim a host
  1671. * and keep trying for given time, with a gap of 10ms
  1672. * @host: mmc host to claim
  1673. * @dealy_ms: delay in ms
  1674. *
  1675. * Returns %1 if the host is claimed, %0 otherwise.
  1676. */
  1677. int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms)
  1678. {
  1679. int claimed_host = 0;
  1680. unsigned long flags;
  1681. int retry_cnt = delay_ms/10;
  1682. bool pm = false;
  1683. do {
  1684. spin_lock_irqsave(&host->lock, flags);
  1685. if (!host->claimed || host->claimer == current) {
  1686. host->claimed = 1;
  1687. host->claimer = current;
  1688. host->claim_cnt += 1;
  1689. claimed_host = 1;
  1690. if (host->claim_cnt == 1)
  1691. pm = true;
  1692. }
  1693. spin_unlock_irqrestore(&host->lock, flags);
  1694. if (!claimed_host)
  1695. mmc_delay(10);
  1696. } while (!claimed_host && retry_cnt--);
  1697. if (pm)
  1698. pm_runtime_get_sync(mmc_dev(host));
  1699. return claimed_host;
  1700. }
  1701. EXPORT_SYMBOL(mmc_try_claim_host);
  1702. /**
  1703. * mmc_release_host - release a host
  1704. * @host: mmc host to release
  1705. *
  1706. * Release a MMC host, allowing others to claim the host
  1707. * for their operations.
  1708. */
  1709. void mmc_release_host(struct mmc_host *host)
  1710. {
  1711. unsigned long flags;
  1712. WARN_ON(!host->claimed);
  1713. spin_lock_irqsave(&host->lock, flags);
  1714. if (--host->claim_cnt) {
  1715. /* Release for nested claim */
  1716. spin_unlock_irqrestore(&host->lock, flags);
  1717. } else {
  1718. host->claimed = 0;
  1719. host->claimer = NULL;
  1720. spin_unlock_irqrestore(&host->lock, flags);
  1721. wake_up(&host->wq);
  1722. pm_runtime_mark_last_busy(mmc_dev(host));
  1723. pm_runtime_put_autosuspend(mmc_dev(host));
  1724. }
  1725. }
  1726. EXPORT_SYMBOL(mmc_release_host);
  1727. /*
  1728. * This is a helper function, which fetches a runtime pm reference for the
  1729. * card device and also claims the host.
  1730. */
  1731. void mmc_get_card(struct mmc_card *card)
  1732. {
  1733. pm_runtime_get_sync(&card->dev);
  1734. mmc_claim_host(card->host);
  1735. }
  1736. EXPORT_SYMBOL(mmc_get_card);
  1737. /*
  1738. * This is a helper function, which releases the host and drops the runtime
  1739. * pm reference for the card device.
  1740. */
  1741. void mmc_put_card(struct mmc_card *card)
  1742. {
  1743. mmc_release_host(card->host);
  1744. pm_runtime_mark_last_busy(&card->dev);
  1745. pm_runtime_put_autosuspend(&card->dev);
  1746. }
  1747. EXPORT_SYMBOL(mmc_put_card);
  1748. /*
  1749. * Internal function that does the actual ios call to the host driver,
  1750. * optionally printing some debug output.
  1751. */
  1752. static inline void mmc_set_ios(struct mmc_host *host)
  1753. {
  1754. struct mmc_ios *ios = &host->ios;
  1755. pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
  1756. "width %u timing %u\n",
  1757. mmc_hostname(host), ios->clock, ios->bus_mode,
  1758. ios->power_mode, ios->chip_select, ios->vdd,
  1759. 1 << ios->bus_width, ios->timing);
  1760. host->ops->set_ios(host, ios);
  1761. }
  1762. /*
  1763. * Control chip select pin on a host.
  1764. */
  1765. void mmc_set_chip_select(struct mmc_host *host, int mode)
  1766. {
  1767. host->ios.chip_select = mode;
  1768. mmc_set_ios(host);
  1769. }
  1770. /*
  1771. * Sets the host clock to the highest possible frequency that
  1772. * is below "hz".
  1773. */
  1774. void mmc_set_clock(struct mmc_host *host, unsigned int hz)
  1775. {
  1776. WARN_ON(hz && hz < host->f_min);
  1777. if (hz > host->f_max)
  1778. hz = host->f_max;
  1779. host->ios.clock = hz;
  1780. mmc_set_ios(host);
  1781. }
  1782. int mmc_execute_tuning(struct mmc_card *card)
  1783. {
  1784. struct mmc_host *host = card->host;
  1785. u32 opcode;
  1786. int err;
  1787. if (!host->ops->execute_tuning)
  1788. return 0;
  1789. if (host->cqe_on)
  1790. host->cqe_ops->cqe_off(host);
  1791. if (mmc_card_mmc(card))
  1792. opcode = MMC_SEND_TUNING_BLOCK_HS200;
  1793. else
  1794. opcode = MMC_SEND_TUNING_BLOCK;
  1795. err = host->ops->execute_tuning(host, opcode);
  1796. if (err) {
  1797. pr_err("%s: tuning execution failed: %d\n",
  1798. mmc_hostname(host), err);
  1799. } else {
  1800. host->retune_now = 0;
  1801. host->need_retune = 0;
  1802. mmc_retune_enable(host);
  1803. }
  1804. return err;
  1805. }
  1806. /*
  1807. * Change the bus mode (open drain/push-pull) of a host.
  1808. */
  1809. void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  1810. {
  1811. host->ios.bus_mode = mode;
  1812. mmc_set_ios(host);
  1813. }
  1814. /*
  1815. * Change data bus width of a host.
  1816. */
  1817. void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
  1818. {
  1819. host->ios.bus_width = width;
  1820. mmc_set_ios(host);
  1821. }
  1822. /*
  1823. * Set initial state after a power cycle or a hw_reset.
  1824. */
  1825. void mmc_set_initial_state(struct mmc_host *host)
  1826. {
  1827. if (host->cqe_on)
  1828. host->cqe_ops->cqe_off(host);
  1829. mmc_retune_disable(host);
  1830. if (mmc_host_is_spi(host))
  1831. host->ios.chip_select = MMC_CS_HIGH;
  1832. else
  1833. host->ios.chip_select = MMC_CS_DONTCARE;
  1834. host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
  1835. host->ios.bus_width = MMC_BUS_WIDTH_1;
  1836. host->ios.timing = MMC_TIMING_LEGACY;
  1837. host->ios.drv_type = 0;
  1838. host->ios.enhanced_strobe = false;
  1839. /*
  1840. * Make sure we are in non-enhanced strobe mode before we
  1841. * actually enable it in ext_csd.
  1842. */
  1843. if ((host->caps2 & MMC_CAP2_HS400_ES) &&
  1844. host->ops->hs400_enhanced_strobe)
  1845. host->ops->hs400_enhanced_strobe(host, &host->ios);
  1846. mmc_set_ios(host);
  1847. }
  1848. /**
  1849. * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
  1850. * @vdd: voltage (mV)
  1851. * @low_bits: prefer low bits in boundary cases
  1852. *
  1853. * This function returns the OCR bit number according to the provided @vdd
  1854. * value. If conversion is not possible a negative errno value returned.
  1855. *
  1856. * Depending on the @low_bits flag the function prefers low or high OCR bits
  1857. * on boundary voltages. For example,
  1858. * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
  1859. * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
  1860. *
  1861. * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
  1862. */
  1863. static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
  1864. {
  1865. const int max_bit = ilog2(MMC_VDD_35_36);
  1866. int bit;
  1867. if (vdd < 1650 || vdd > 3600)
  1868. return -EINVAL;
  1869. if (vdd >= 1650 && vdd <= 1950)
  1870. return ilog2(MMC_VDD_165_195);
  1871. if (low_bits)
  1872. vdd -= 1;
  1873. /* Base 2000 mV, step 100 mV, bit's base 8. */
  1874. bit = (vdd - 2000) / 100 + 8;
  1875. if (bit > max_bit)
  1876. return max_bit;
  1877. return bit;
  1878. }
  1879. /**
  1880. * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
  1881. * @vdd_min: minimum voltage value (mV)
  1882. * @vdd_max: maximum voltage value (mV)
  1883. *
  1884. * This function returns the OCR mask bits according to the provided @vdd_min
  1885. * and @vdd_max values. If conversion is not possible the function returns 0.
  1886. *
  1887. * Notes wrt boundary cases:
  1888. * This function sets the OCR bits for all boundary voltages, for example
  1889. * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
  1890. * MMC_VDD_34_35 mask.
  1891. */
  1892. u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
  1893. {
  1894. u32 mask = 0;
  1895. if (vdd_max < vdd_min)
  1896. return 0;
  1897. /* Prefer high bits for the boundary vdd_max values. */
  1898. vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
  1899. if (vdd_max < 0)
  1900. return 0;
  1901. /* Prefer low bits for the boundary vdd_min values. */
  1902. vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
  1903. if (vdd_min < 0)
  1904. return 0;
  1905. /* Fill the mask, from max bit to min bit. */
  1906. while (vdd_max >= vdd_min)
  1907. mask |= 1 << vdd_max--;
  1908. return mask;
  1909. }
  1910. EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
  1911. #ifdef CONFIG_OF
  1912. /**
  1913. * mmc_of_parse_voltage - return mask of supported voltages
  1914. * @np: The device node need to be parsed.
  1915. * @mask: mask of voltages available for MMC/SD/SDIO
  1916. *
  1917. * Parse the "voltage-ranges" DT property, returning zero if it is not
  1918. * found, negative errno if the voltage-range specification is invalid,
  1919. * or one if the voltage-range is specified and successfully parsed.
  1920. */
  1921. int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
  1922. {
  1923. const u32 *voltage_ranges;
  1924. int num_ranges, i;
  1925. voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
  1926. num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
  1927. if (!voltage_ranges) {
  1928. pr_debug("%pOF: voltage-ranges unspecified\n", np);
  1929. return 0;
  1930. }
  1931. if (!num_ranges) {
  1932. pr_err("%pOF: voltage-ranges empty\n", np);
  1933. return -EINVAL;
  1934. }
  1935. for (i = 0; i < num_ranges; i++) {
  1936. const int j = i * 2;
  1937. u32 ocr_mask;
  1938. ocr_mask = mmc_vddrange_to_ocrmask(
  1939. be32_to_cpu(voltage_ranges[j]),
  1940. be32_to_cpu(voltage_ranges[j + 1]));
  1941. if (!ocr_mask) {
  1942. pr_err("%pOF: voltage-range #%d is invalid\n",
  1943. np, i);
  1944. return -EINVAL;
  1945. }
  1946. *mask |= ocr_mask;
  1947. }
  1948. return 1;
  1949. }
  1950. EXPORT_SYMBOL(mmc_of_parse_voltage);
  1951. #endif /* CONFIG_OF */
  1952. static int mmc_of_get_func_num(struct device_node *node)
  1953. {
  1954. u32 reg;
  1955. int ret;
  1956. ret = of_property_read_u32(node, "reg", &reg);
  1957. if (ret < 0)
  1958. return ret;
  1959. return reg;
  1960. }
  1961. struct device_node *mmc_of_find_child_device(struct mmc_host *host,
  1962. unsigned func_num)
  1963. {
  1964. struct device_node *node;
  1965. if (!host->parent || !host->parent->of_node)
  1966. return NULL;
  1967. for_each_child_of_node(host->parent->of_node, node) {
  1968. if (mmc_of_get_func_num(node) == func_num)
  1969. return node;
  1970. }
  1971. return NULL;
  1972. }
  1973. #ifdef CONFIG_REGULATOR
  1974. /**
  1975. * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
  1976. * @vdd_bit: OCR bit number
  1977. * @min_uV: minimum voltage value (mV)
  1978. * @max_uV: maximum voltage value (mV)
  1979. *
  1980. * This function returns the voltage range according to the provided OCR
  1981. * bit number. If conversion is not possible a negative errno value returned.
  1982. */
  1983. static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
  1984. {
  1985. int tmp;
  1986. if (!vdd_bit)
  1987. return -EINVAL;
  1988. /*
  1989. * REVISIT mmc_vddrange_to_ocrmask() may have set some
  1990. * bits this regulator doesn't quite support ... don't
  1991. * be too picky, most cards and regulators are OK with
  1992. * a 0.1V range goof (it's a small error percentage).
  1993. */
  1994. tmp = vdd_bit - ilog2(MMC_VDD_165_195);
  1995. if (tmp == 0) {
  1996. *min_uV = 1650 * 1000;
  1997. *max_uV = 1950 * 1000;
  1998. } else {
  1999. *min_uV = 1900 * 1000 + tmp * 100 * 1000;
  2000. *max_uV = *min_uV + 100 * 1000;
  2001. }
  2002. return 0;
  2003. }
  2004. /**
  2005. * mmc_regulator_get_ocrmask - return mask of supported voltages
  2006. * @supply: regulator to use
  2007. *
  2008. * This returns either a negative errno, or a mask of voltages that
  2009. * can be provided to MMC/SD/SDIO devices using the specified voltage
  2010. * regulator. This would normally be called before registering the
  2011. * MMC host adapter.
  2012. */
  2013. int mmc_regulator_get_ocrmask(struct regulator *supply)
  2014. {
  2015. int result = 0;
  2016. int count;
  2017. int i;
  2018. int vdd_uV;
  2019. int vdd_mV;
  2020. count = regulator_count_voltages(supply);
  2021. if (count < 0)
  2022. return count;
  2023. for (i = 0; i < count; i++) {
  2024. vdd_uV = regulator_list_voltage(supply, i);
  2025. if (vdd_uV <= 0)
  2026. continue;
  2027. vdd_mV = vdd_uV / 1000;
  2028. result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
  2029. }
  2030. if (!result) {
  2031. vdd_uV = regulator_get_voltage(supply);
  2032. if (vdd_uV <= 0)
  2033. return vdd_uV;
  2034. vdd_mV = vdd_uV / 1000;
  2035. result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
  2036. }
  2037. return result;
  2038. }
  2039. EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
  2040. /**
  2041. * mmc_regulator_set_ocr - set regulator to match host->ios voltage
  2042. * @mmc: the host to regulate
  2043. * @supply: regulator to use
  2044. * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
  2045. *
  2046. * Returns zero on success, else negative errno.
  2047. *
  2048. * MMC host drivers may use this to enable or disable a regulator using
  2049. * a particular supply voltage. This would normally be called from the
  2050. * set_ios() method.
  2051. */
  2052. int mmc_regulator_set_ocr(struct mmc_host *mmc,
  2053. struct regulator *supply,
  2054. unsigned short vdd_bit)
  2055. {
  2056. int result = 0;
  2057. int min_uV, max_uV;
  2058. if (vdd_bit) {
  2059. mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
  2060. result = regulator_set_voltage(supply, min_uV, max_uV);
  2061. if (result == 0 && !mmc->regulator_enabled) {
  2062. result = regulator_enable(supply);
  2063. if (!result)
  2064. mmc->regulator_enabled = true;
  2065. }
  2066. } else if (mmc->regulator_enabled) {
  2067. result = regulator_disable(supply);
  2068. if (result == 0)
  2069. mmc->regulator_enabled = false;
  2070. }
  2071. if (result)
  2072. dev_err(mmc_dev(mmc),
  2073. "could not set regulator OCR (%d)\n", result);
  2074. return result;
  2075. }
  2076. EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
  2077. static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
  2078. int min_uV, int target_uV,
  2079. int max_uV)
  2080. {
  2081. /*
  2082. * Check if supported first to avoid errors since we may try several
  2083. * signal levels during power up and don't want to show errors.
  2084. */
  2085. if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
  2086. return -EINVAL;
  2087. return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
  2088. max_uV);
  2089. }
  2090. /**
  2091. * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
  2092. *
  2093. * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
  2094. * That will match the behavior of old boards where VQMMC and VMMC were supplied
  2095. * by the same supply. The Bus Operating conditions for 3.3V signaling in the
  2096. * SD card spec also define VQMMC in terms of VMMC.
  2097. * If this is not possible we'll try the full 2.7-3.6V of the spec.
  2098. *
  2099. * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
  2100. * requested voltage. This is definitely a good idea for UHS where there's a
  2101. * separate regulator on the card that's trying to make 1.8V and it's best if
  2102. * we match.
  2103. *
  2104. * This function is expected to be used by a controller's
  2105. * start_signal_voltage_switch() function.
  2106. */
  2107. int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
  2108. {
  2109. struct device *dev = mmc_dev(mmc);
  2110. int ret, volt, min_uV, max_uV;
  2111. /* If no vqmmc supply then we can't change the voltage */
  2112. if (IS_ERR(mmc->supply.vqmmc))
  2113. return -EINVAL;
  2114. switch (ios->signal_voltage) {
  2115. case MMC_SIGNAL_VOLTAGE_120:
  2116. return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
  2117. 1100000, 1200000, 1300000);
  2118. case MMC_SIGNAL_VOLTAGE_180:
  2119. return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
  2120. 1700000, 1800000, 1950000);
  2121. case MMC_SIGNAL_VOLTAGE_330:
  2122. ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
  2123. if (ret < 0)
  2124. return ret;
  2125. dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
  2126. __func__, volt, max_uV);
  2127. min_uV = max(volt - 300000, 2700000);
  2128. max_uV = min(max_uV + 200000, 3600000);
  2129. /*
  2130. * Due to a limitation in the current implementation of
  2131. * regulator_set_voltage_triplet() which is taking the lowest
  2132. * voltage possible if below the target, search for a suitable
  2133. * voltage in two steps and try to stay close to vmmc
  2134. * with a 0.3V tolerance at first.
  2135. */
  2136. if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
  2137. min_uV, volt, max_uV))
  2138. return 0;
  2139. return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
  2140. 2700000, volt, 3600000);
  2141. default:
  2142. return -EINVAL;
  2143. }
  2144. }
  2145. EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
  2146. #endif /* CONFIG_REGULATOR */
  2147. int mmc_regulator_get_supply(struct mmc_host *mmc)
  2148. {
  2149. struct device *dev = mmc_dev(mmc);
  2150. int ret;
  2151. mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
  2152. mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
  2153. if (IS_ERR(mmc->supply.vmmc)) {
  2154. if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
  2155. return -EPROBE_DEFER;
  2156. dev_dbg(dev, "No vmmc regulator found\n");
  2157. } else {
  2158. ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
  2159. if (ret > 0)
  2160. mmc->ocr_avail = ret;
  2161. else
  2162. dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
  2163. }
  2164. if (IS_ERR(mmc->supply.vqmmc)) {
  2165. if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
  2166. return -EPROBE_DEFER;
  2167. dev_dbg(dev, "No vqmmc regulator found\n");
  2168. }
  2169. return 0;
  2170. }
  2171. EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
  2172. /*
  2173. * Mask off any voltages we don't support and select
  2174. * the lowest voltage
  2175. */
  2176. u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
  2177. {
  2178. int bit;
  2179. /*
  2180. * Sanity check the voltages that the card claims to
  2181. * support.
  2182. */
  2183. if (ocr & 0x7F) {
  2184. dev_warn(mmc_dev(host),
  2185. "card claims to support voltages below defined range\n");
  2186. ocr &= ~0x7F;
  2187. }
  2188. ocr &= host->ocr_avail;
  2189. if (!ocr) {
  2190. dev_warn(mmc_dev(host), "no support for card's volts\n");
  2191. return 0;
  2192. }
  2193. if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
  2194. bit = ffs(ocr) - 1;
  2195. ocr &= 3 << bit;
  2196. mmc_power_cycle(host, ocr);
  2197. } else {
  2198. bit = fls(ocr) - 1;
  2199. ocr &= 3 << bit;
  2200. if (bit != host->ios.vdd)
  2201. dev_warn(mmc_dev(host), "exceeding card's volts\n");
  2202. }
  2203. return ocr;
  2204. }
  2205. int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
  2206. {
  2207. int err = 0;
  2208. int old_signal_voltage = host->ios.signal_voltage;
  2209. host->ios.signal_voltage = signal_voltage;
  2210. if (host->ops->start_signal_voltage_switch)
  2211. err = host->ops->start_signal_voltage_switch(host, &host->ios);
  2212. if (err)
  2213. host->ios.signal_voltage = old_signal_voltage;
  2214. return err;
  2215. }
  2216. int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
  2217. {
  2218. struct mmc_command cmd = {};
  2219. int err = 0;
  2220. u32 clock;
  2221. /*
  2222. * If we cannot switch voltages, return failure so the caller
  2223. * can continue without UHS mode
  2224. */
  2225. if (!host->ops->start_signal_voltage_switch)
  2226. return -EPERM;
  2227. if (!host->ops->card_busy)
  2228. pr_warn("%s: cannot verify signal voltage switch\n",
  2229. mmc_hostname(host));
  2230. cmd.opcode = SD_SWITCH_VOLTAGE;
  2231. cmd.arg = 0;
  2232. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  2233. err = mmc_wait_for_cmd(host, &cmd, 0);
  2234. if (err)
  2235. goto power_cycle;
  2236. if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
  2237. return -EIO;
  2238. /*
  2239. * The card should drive cmd and dat[0:3] low immediately
  2240. * after the response of cmd11, but wait 1 ms to be sure
  2241. */
  2242. mmc_delay(1);
  2243. if (host->ops->card_busy && !host->ops->card_busy(host)) {
  2244. err = -EAGAIN;
  2245. goto power_cycle;
  2246. }
  2247. /*
  2248. * During a signal voltage level switch, the clock must be gated
  2249. * for 5 ms according to the SD spec
  2250. */
  2251. clock = host->ios.clock;
  2252. host->ios.clock = 0;
  2253. mmc_set_ios(host);
  2254. if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
  2255. /*
  2256. * Voltages may not have been switched, but we've already
  2257. * sent CMD11, so a power cycle is required anyway
  2258. */
  2259. err = -EAGAIN;
  2260. goto power_cycle;
  2261. }
  2262. /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
  2263. mmc_delay(10);
  2264. host->ios.clock = clock;
  2265. mmc_set_ios(host);
  2266. /* Wait for at least 1 ms according to spec */
  2267. mmc_delay(1);
  2268. /*
  2269. * Failure to switch is indicated by the card holding
  2270. * dat[0:3] low
  2271. */
  2272. if (host->ops->card_busy && host->ops->card_busy(host))
  2273. err = -EAGAIN;
  2274. power_cycle:
  2275. if (err) {
  2276. pr_debug("%s: Signal voltage switch failed, "
  2277. "power cycling card\n", mmc_hostname(host));
  2278. mmc_power_cycle(host, ocr);
  2279. }
  2280. return err;
  2281. }
  2282. /*
  2283. * Select timing parameters for host.
  2284. */
  2285. void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  2286. {
  2287. host->ios.timing = timing;
  2288. mmc_set_ios(host);
  2289. }
  2290. /*
  2291. * Select appropriate driver type for host.
  2292. */
  2293. void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
  2294. {
  2295. host->ios.drv_type = drv_type;
  2296. mmc_set_ios(host);
  2297. }
  2298. int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
  2299. int card_drv_type, int *drv_type)
  2300. {
  2301. struct mmc_host *host = card->host;
  2302. int host_drv_type = SD_DRIVER_TYPE_B;
  2303. *drv_type = 0;
  2304. if (!host->ops->select_drive_strength)
  2305. return 0;
  2306. /* Use SD definition of driver strength for hosts */
  2307. if (host->caps & MMC_CAP_DRIVER_TYPE_A)
  2308. host_drv_type |= SD_DRIVER_TYPE_A;
  2309. if (host->caps & MMC_CAP_DRIVER_TYPE_C)
  2310. host_drv_type |= SD_DRIVER_TYPE_C;
  2311. if (host->caps & MMC_CAP_DRIVER_TYPE_D)
  2312. host_drv_type |= SD_DRIVER_TYPE_D;
  2313. /*
  2314. * The drive strength that the hardware can support
  2315. * depends on the board design. Pass the appropriate
  2316. * information and let the hardware specific code
  2317. * return what is possible given the options
  2318. */
  2319. return host->ops->select_drive_strength(card, max_dtr,
  2320. host_drv_type,
  2321. card_drv_type,
  2322. drv_type);
  2323. }
  2324. /*
  2325. * Apply power to the MMC stack. This is a two-stage process.
  2326. * First, we enable power to the card without the clock running.
  2327. * We then wait a bit for the power to stabilise. Finally,
  2328. * enable the bus drivers and clock to the card.
  2329. *
  2330. * We must _NOT_ enable the clock prior to power stablising.
  2331. *
  2332. * If a host does all the power sequencing itself, ignore the
  2333. * initial MMC_POWER_UP stage.
  2334. */
  2335. void mmc_power_up(struct mmc_host *host, u32 ocr)
  2336. {
  2337. if (host->ios.power_mode == MMC_POWER_ON)
  2338. return;
  2339. mmc_pwrseq_pre_power_on(host);
  2340. host->ios.vdd = fls(ocr) - 1;
  2341. host->ios.power_mode = MMC_POWER_UP;
  2342. /* Set initial state and call mmc_set_ios */
  2343. mmc_set_initial_state(host);
  2344. /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
  2345. if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
  2346. dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
  2347. else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
  2348. dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
  2349. else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
  2350. dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
  2351. /*
  2352. * This delay should be sufficient to allow the power supply
  2353. * to reach the minimum voltage.
  2354. */
  2355. mmc_delay(10);
  2356. mmc_pwrseq_post_power_on(host);
  2357. host->ios.clock = host->f_init;
  2358. host->ios.power_mode = MMC_POWER_ON;
  2359. mmc_set_ios(host);
  2360. /*
  2361. * This delay must be at least 74 clock sizes, or 1 ms, or the
  2362. * time required to reach a stable voltage.
  2363. */
  2364. mmc_delay(10);
  2365. }
  2366. void mmc_power_off(struct mmc_host *host)
  2367. {
  2368. if (host->ios.power_mode == MMC_POWER_OFF)
  2369. return;
  2370. mmc_pwrseq_power_off(host);
  2371. host->ios.clock = 0;
  2372. host->ios.vdd = 0;
  2373. host->ios.power_mode = MMC_POWER_OFF;
  2374. /* Set initial state and call mmc_set_ios */
  2375. mmc_set_initial_state(host);
  2376. /*
  2377. * Some configurations, such as the 802.11 SDIO card in the OLPC
  2378. * XO-1.5, require a short delay after poweroff before the card
  2379. * can be successfully turned on again.
  2380. */
  2381. mmc_delay(1);
  2382. }
  2383. void mmc_power_cycle(struct mmc_host *host, u32 ocr)
  2384. {
  2385. mmc_power_off(host);
  2386. /* Wait at least 1 ms according to SD spec */
  2387. mmc_delay(1);
  2388. mmc_power_up(host, ocr);
  2389. }
  2390. /*
  2391. * Cleanup when the last reference to the bus operator is dropped.
  2392. */
  2393. static void __mmc_release_bus(struct mmc_host *host)
  2394. {
  2395. WARN_ON(!host->bus_dead);
  2396. host->bus_ops = NULL;
  2397. }
  2398. /*
  2399. * Increase reference count of bus operator
  2400. */
  2401. static inline void mmc_bus_get(struct mmc_host *host)
  2402. {
  2403. unsigned long flags;
  2404. spin_lock_irqsave(&host->lock, flags);
  2405. host->bus_refs++;
  2406. spin_unlock_irqrestore(&host->lock, flags);
  2407. }
  2408. /*
  2409. * Decrease reference count of bus operator and free it if
  2410. * it is the last reference.
  2411. */
  2412. static inline void mmc_bus_put(struct mmc_host *host)
  2413. {
  2414. unsigned long flags;
  2415. spin_lock_irqsave(&host->lock, flags);
  2416. host->bus_refs--;
  2417. if ((host->bus_refs == 0) && host->bus_ops)
  2418. __mmc_release_bus(host);
  2419. spin_unlock_irqrestore(&host->lock, flags);
  2420. }
  2421. /*
  2422. * Assign a mmc bus handler to a host. Only one bus handler may control a
  2423. * host at any given time.
  2424. */
  2425. void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
  2426. {
  2427. unsigned long flags;
  2428. WARN_ON(!host->claimed);
  2429. spin_lock_irqsave(&host->lock, flags);
  2430. WARN_ON(host->bus_ops);
  2431. WARN_ON(host->bus_refs);
  2432. host->bus_ops = ops;
  2433. host->bus_refs = 1;
  2434. host->bus_dead = 0;
  2435. spin_unlock_irqrestore(&host->lock, flags);
  2436. }
  2437. /*
  2438. * Remove the current bus handler from a host.
  2439. */
  2440. void mmc_detach_bus(struct mmc_host *host)
  2441. {
  2442. unsigned long flags;
  2443. WARN_ON(!host->claimed);
  2444. WARN_ON(!host->bus_ops);
  2445. spin_lock_irqsave(&host->lock, flags);
  2446. host->bus_dead = 1;
  2447. spin_unlock_irqrestore(&host->lock, flags);
  2448. mmc_bus_put(host);
  2449. }
  2450. static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
  2451. bool cd_irq)
  2452. {
  2453. /*
  2454. * If the device is configured as wakeup, we prevent a new sleep for
  2455. * 5 s to give provision for user space to consume the event.
  2456. */
  2457. if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
  2458. device_can_wakeup(mmc_dev(host)))
  2459. pm_wakeup_event(mmc_dev(host), 5000);
  2460. host->detect_change = 1;
  2461. mmc_schedule_delayed_work(&host->detect, delay);
  2462. }
  2463. /**
  2464. * mmc_detect_change - process change of state on a MMC socket
  2465. * @host: host which changed state.
  2466. * @delay: optional delay to wait before detection (jiffies)
  2467. *
  2468. * MMC drivers should call this when they detect a card has been
  2469. * inserted or removed. The MMC layer will confirm that any
  2470. * present card is still functional, and initialize any newly
  2471. * inserted.
  2472. */
  2473. void mmc_detect_change(struct mmc_host *host, unsigned long delay)
  2474. {
  2475. _mmc_detect_change(host, delay, true);
  2476. }
  2477. EXPORT_SYMBOL(mmc_detect_change);
  2478. void mmc_init_erase(struct mmc_card *card)
  2479. {
  2480. unsigned int sz;
  2481. if (is_power_of_2(card->erase_size))
  2482. card->erase_shift = ffs(card->erase_size) - 1;
  2483. else
  2484. card->erase_shift = 0;
  2485. /*
  2486. * It is possible to erase an arbitrarily large area of an SD or MMC
  2487. * card. That is not desirable because it can take a long time
  2488. * (minutes) potentially delaying more important I/O, and also the
  2489. * timeout calculations become increasingly hugely over-estimated.
  2490. * Consequently, 'pref_erase' is defined as a guide to limit erases
  2491. * to that size and alignment.
  2492. *
  2493. * For SD cards that define Allocation Unit size, limit erases to one
  2494. * Allocation Unit at a time.
  2495. * For MMC, have a stab at ai good value and for modern cards it will
  2496. * end up being 4MiB. Note that if the value is too small, it can end
  2497. * up taking longer to erase. Also note, erase_size is already set to
  2498. * High Capacity Erase Size if available when this function is called.
  2499. */
  2500. if (mmc_card_sd(card) && card->ssr.au) {
  2501. card->pref_erase = card->ssr.au;
  2502. card->erase_shift = ffs(card->ssr.au) - 1;
  2503. } else if (card->erase_size) {
  2504. sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
  2505. if (sz < 128)
  2506. card->pref_erase = 512 * 1024 / 512;
  2507. else if (sz < 512)
  2508. card->pref_erase = 1024 * 1024 / 512;
  2509. else if (sz < 1024)
  2510. card->pref_erase = 2 * 1024 * 1024 / 512;
  2511. else
  2512. card->pref_erase = 4 * 1024 * 1024 / 512;
  2513. if (card->pref_erase < card->erase_size)
  2514. card->pref_erase = card->erase_size;
  2515. else {
  2516. sz = card->pref_erase % card->erase_size;
  2517. if (sz)
  2518. card->pref_erase += card->erase_size - sz;
  2519. }
  2520. } else
  2521. card->pref_erase = 0;
  2522. }
  2523. static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
  2524. unsigned int arg, unsigned int qty)
  2525. {
  2526. unsigned int erase_timeout;
  2527. if (arg == MMC_DISCARD_ARG ||
  2528. (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
  2529. erase_timeout = card->ext_csd.trim_timeout;
  2530. } else if (card->ext_csd.erase_group_def & 1) {
  2531. /* High Capacity Erase Group Size uses HC timeouts */
  2532. if (arg == MMC_TRIM_ARG)
  2533. erase_timeout = card->ext_csd.trim_timeout;
  2534. else
  2535. erase_timeout = card->ext_csd.hc_erase_timeout;
  2536. } else {
  2537. /* CSD Erase Group Size uses write timeout */
  2538. unsigned int mult = (10 << card->csd.r2w_factor);
  2539. unsigned int timeout_clks = card->csd.taac_clks * mult;
  2540. unsigned int timeout_us;
  2541. /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
  2542. if (card->csd.taac_ns < 1000000)
  2543. timeout_us = (card->csd.taac_ns * mult) / 1000;
  2544. else
  2545. timeout_us = (card->csd.taac_ns / 1000) * mult;
  2546. /*
  2547. * ios.clock is only a target. The real clock rate might be
  2548. * less but not that much less, so fudge it by multiplying by 2.
  2549. */
  2550. timeout_clks <<= 1;
  2551. timeout_us += (timeout_clks * 1000) /
  2552. (card->host->ios.clock / 1000);
  2553. erase_timeout = timeout_us / 1000;
  2554. /*
  2555. * Theoretically, the calculation could underflow so round up
  2556. * to 1ms in that case.
  2557. */
  2558. if (!erase_timeout)
  2559. erase_timeout = 1;
  2560. }
  2561. /* Multiplier for secure operations */
  2562. if (arg & MMC_SECURE_ARGS) {
  2563. if (arg == MMC_SECURE_ERASE_ARG)
  2564. erase_timeout *= card->ext_csd.sec_erase_mult;
  2565. else
  2566. erase_timeout *= card->ext_csd.sec_trim_mult;
  2567. }
  2568. erase_timeout *= qty;
  2569. /*
  2570. * Ensure at least a 1 second timeout for SPI as per
  2571. * 'mmc_set_data_timeout()'
  2572. */
  2573. if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
  2574. erase_timeout = 1000;
  2575. return erase_timeout;
  2576. }
  2577. static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
  2578. unsigned int arg,
  2579. unsigned int qty)
  2580. {
  2581. unsigned int erase_timeout;
  2582. if (card->ssr.erase_timeout) {
  2583. /* Erase timeout specified in SD Status Register (SSR) */
  2584. erase_timeout = card->ssr.erase_timeout * qty +
  2585. card->ssr.erase_offset;
  2586. } else {
  2587. /*
  2588. * Erase timeout not specified in SD Status Register (SSR) so
  2589. * use 250ms per write block.
  2590. */
  2591. erase_timeout = 250 * qty;
  2592. }
  2593. /* Must not be less than 1 second */
  2594. if (erase_timeout < 1000)
  2595. erase_timeout = 1000;
  2596. return erase_timeout;
  2597. }
  2598. static unsigned int mmc_erase_timeout(struct mmc_card *card,
  2599. unsigned int arg,
  2600. unsigned int qty)
  2601. {
  2602. if (mmc_card_sd(card))
  2603. return mmc_sd_erase_timeout(card, arg, qty);
  2604. else
  2605. return mmc_mmc_erase_timeout(card, arg, qty);
  2606. }
  2607. #ifdef CONFIG_MTK_EMMC_HW_CQ
  2608. static u32 mmc_get_erase_qty(struct mmc_card *card, u32 from, u32 to)
  2609. {
  2610. u32 qty = 0;
  2611. /*
  2612. * qty is used to calculate the erase timeout which depends on how many
  2613. * erase groups (or allocation units in SD terminology) are affected.
  2614. * We count erasing part of an erase group as one erase group.
  2615. * For SD, the allocation units are always a power of 2. For MMC, the
  2616. * erase group size is almost certainly also power of 2, but it does not
  2617. * seem to insist on that in the JEDEC standard, so we fall back to
  2618. * division in that case. SD may not specify an allocation unit size,
  2619. * in which case the timeout is based on the number of write blocks.
  2620. *
  2621. * Note that the timeout for secure trim 2 will only be correct if the
  2622. * number of erase groups specified is the same as the total of all
  2623. * preceding secure trim 1 commands. Since the power may have been
  2624. * lost since the secure trim 1 commands occurred, it is generally
  2625. * impossible to calculate the secure trim 2 timeout correctly.
  2626. */
  2627. if (card->erase_shift)
  2628. qty += ((to >> card->erase_shift) -
  2629. (from >> card->erase_shift)) + 1;
  2630. else if (mmc_card_sd(card))
  2631. qty += to - from + 1;
  2632. else
  2633. qty += ((to / card->erase_size) -
  2634. (from / card->erase_size)) + 1;
  2635. return qty;
  2636. }
  2637. static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
  2638. struct mmc_card *card, u32 opcode, u32 arg, u32 qty)
  2639. {
  2640. struct mmc_command *cmd = cmdq_req->mrq.cmd;
  2641. int err;
  2642. memset(cmd, 0, sizeof(struct mmc_command));
  2643. cmd->opcode = opcode;
  2644. cmd->arg = arg;
  2645. if (cmd->opcode == MMC_ERASE) {
  2646. cmd->flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  2647. cmd->busy_timeout = mmc_erase_timeout(card, arg, qty);
  2648. } else {
  2649. cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  2650. }
  2651. err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
  2652. if (err) {
  2653. pr_notice("%s: group start error %d, status %#x\n",
  2654. __func__, err, cmd->resp[0]);
  2655. return -EIO;
  2656. }
  2657. return 0;
  2658. }
  2659. static int mmc_cmdq_do_erase(struct mmc_cmdq_req *cmdq_req,
  2660. struct mmc_card *card, unsigned int from,
  2661. unsigned int to, unsigned int arg)
  2662. {
  2663. struct mmc_command *cmd = cmdq_req->mrq.cmd;
  2664. unsigned int qty = 0;
  2665. unsigned long timeout;
  2666. unsigned int fr, nr;
  2667. int err;
  2668. fr = from;
  2669. nr = to - from + 1;
  2670. qty = mmc_get_erase_qty(card, from, to);
  2671. if (!mmc_card_blockaddr(card)) {
  2672. from <<= 9;
  2673. to <<= 9;
  2674. }
  2675. err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_START,
  2676. from, qty);
  2677. if (err)
  2678. goto out;
  2679. err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_END,
  2680. to, qty);
  2681. if (err)
  2682. goto out;
  2683. err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE,
  2684. arg, qty);
  2685. if (err)
  2686. goto out;
  2687. timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
  2688. do {
  2689. memset(cmd, 0, sizeof(struct mmc_command));
  2690. cmd->opcode = MMC_SEND_STATUS;
  2691. cmd->arg = card->rca << 16;
  2692. cmd->flags = MMC_RSP_R1 | MMC_CMD_AC;
  2693. /* Do not retry else we can't see errors */
  2694. err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
  2695. if (err || (cmd->resp[0] & 0xFDF92000)) {
  2696. pr_notice("error %d requesting status %#x\n",
  2697. err, cmd->resp[0]);
  2698. err = -EIO;
  2699. goto out;
  2700. }
  2701. /* Timeout if the device never becomes ready for data and
  2702. * never leaves the program state.
  2703. */
  2704. if (time_after(jiffies, timeout)) {
  2705. pr_notice("%s: %s Card stuck in programming state!\n",
  2706. mmc_hostname(card->host), __func__);
  2707. err = -EIO;
  2708. goto out;
  2709. }
  2710. } while (!(cmd->resp[0] & R1_READY_FOR_DATA) ||
  2711. (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG));
  2712. out:
  2713. return err;
  2714. }
  2715. #endif
  2716. static int mmc_do_erase(struct mmc_card *card, unsigned int from,
  2717. unsigned int to, unsigned int arg)
  2718. {
  2719. struct mmc_command cmd = {};
  2720. unsigned int qty = 0, busy_timeout = 0;
  2721. bool use_r1b_resp = false;
  2722. unsigned long timeout;
  2723. int err;
  2724. mmc_retune_hold(card->host);
  2725. /*
  2726. * qty is used to calculate the erase timeout which depends on how many
  2727. * erase groups (or allocation units in SD terminology) are affected.
  2728. * We count erasing part of an erase group as one erase group.
  2729. * For SD, the allocation units are always a power of 2. For MMC, the
  2730. * erase group size is almost certainly also power of 2, but it does not
  2731. * seem to insist on that in the JEDEC standard, so we fall back to
  2732. * division in that case. SD may not specify an allocation unit size,
  2733. * in which case the timeout is based on the number of write blocks.
  2734. *
  2735. * Note that the timeout for secure trim 2 will only be correct if the
  2736. * number of erase groups specified is the same as the total of all
  2737. * preceding secure trim 1 commands. Since the power may have been
  2738. * lost since the secure trim 1 commands occurred, it is generally
  2739. * impossible to calculate the secure trim 2 timeout correctly.
  2740. */
  2741. if (card->erase_shift)
  2742. qty += ((to >> card->erase_shift) -
  2743. (from >> card->erase_shift)) + 1;
  2744. else if (mmc_card_sd(card))
  2745. qty += to - from + 1;
  2746. else
  2747. qty += ((to / card->erase_size) -
  2748. (from / card->erase_size)) + 1;
  2749. if (!mmc_card_blockaddr(card)) {
  2750. from <<= 9;
  2751. to <<= 9;
  2752. }
  2753. if (mmc_card_sd(card))
  2754. cmd.opcode = SD_ERASE_WR_BLK_START;
  2755. else
  2756. cmd.opcode = MMC_ERASE_GROUP_START;
  2757. cmd.arg = from;
  2758. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  2759. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2760. if (err) {
  2761. pr_err("mmc_erase: group start error %d, "
  2762. "status %#x\n", err, cmd.resp[0]);
  2763. err = -EIO;
  2764. goto out;
  2765. }
  2766. memset(&cmd, 0, sizeof(struct mmc_command));
  2767. if (mmc_card_sd(card))
  2768. cmd.opcode = SD_ERASE_WR_BLK_END;
  2769. else
  2770. cmd.opcode = MMC_ERASE_GROUP_END;
  2771. cmd.arg = to;
  2772. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  2773. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2774. if (err) {
  2775. pr_err("mmc_erase: group end error %d, status %#x\n",
  2776. err, cmd.resp[0]);
  2777. err = -EIO;
  2778. goto out;
  2779. }
  2780. memset(&cmd, 0, sizeof(struct mmc_command));
  2781. cmd.opcode = MMC_ERASE;
  2782. cmd.arg = arg;
  2783. busy_timeout = mmc_erase_timeout(card, arg, qty);
  2784. /*
  2785. * If the host controller supports busy signalling and the timeout for
  2786. * the erase operation does not exceed the max_busy_timeout, we should
  2787. * use R1B response. Or we need to prevent the host from doing hw busy
  2788. * detection, which is done by converting to a R1 response instead.
  2789. */
  2790. if (card->host->max_busy_timeout &&
  2791. busy_timeout > card->host->max_busy_timeout) {
  2792. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  2793. } else {
  2794. cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  2795. cmd.busy_timeout = busy_timeout;
  2796. use_r1b_resp = true;
  2797. }
  2798. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2799. if (err) {
  2800. pr_err("mmc_erase: erase error %d, status %#x\n",
  2801. err, cmd.resp[0]);
  2802. err = -EIO;
  2803. goto out;
  2804. }
  2805. if (mmc_host_is_spi(card->host))
  2806. goto out;
  2807. /*
  2808. * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
  2809. * shall be avoided.
  2810. */
  2811. if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
  2812. goto out;
  2813. timeout = jiffies + msecs_to_jiffies(busy_timeout);
  2814. do {
  2815. memset(&cmd, 0, sizeof(struct mmc_command));
  2816. cmd.opcode = MMC_SEND_STATUS;
  2817. cmd.arg = card->rca << 16;
  2818. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  2819. /* Do not retry else we can't see errors */
  2820. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  2821. if (err || (cmd.resp[0] & 0xFDF92000)) {
  2822. pr_err("error %d requesting status %#x\n",
  2823. err, cmd.resp[0]);
  2824. err = -EIO;
  2825. goto out;
  2826. }
  2827. /* Timeout if the device never becomes ready for data and
  2828. * never leaves the program state.
  2829. */
  2830. if (time_after(jiffies, timeout)) {
  2831. pr_err("%s: Card stuck in programming state! %s\n",
  2832. mmc_hostname(card->host), __func__);
  2833. err = -EIO;
  2834. goto out;
  2835. }
  2836. } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
  2837. (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
  2838. out:
  2839. mmc_retune_release(card->host);
  2840. return err;
  2841. }
  2842. static unsigned int mmc_align_erase_size(struct mmc_card *card,
  2843. unsigned int *from,
  2844. unsigned int *to,
  2845. unsigned int nr)
  2846. {
  2847. unsigned int from_new = *from, nr_new = nr, rem;
  2848. /*
  2849. * When the 'card->erase_size' is power of 2, we can use round_up/down()
  2850. * to align the erase size efficiently.
  2851. */
  2852. if (is_power_of_2(card->erase_size)) {
  2853. unsigned int temp = from_new;
  2854. from_new = round_up(temp, card->erase_size);
  2855. rem = from_new - temp;
  2856. if (nr_new > rem)
  2857. nr_new -= rem;
  2858. else
  2859. return 0;
  2860. nr_new = round_down(nr_new, card->erase_size);
  2861. } else {
  2862. rem = from_new % card->erase_size;
  2863. if (rem) {
  2864. rem = card->erase_size - rem;
  2865. from_new += rem;
  2866. if (nr_new > rem)
  2867. nr_new -= rem;
  2868. else
  2869. return 0;
  2870. }
  2871. rem = nr_new % card->erase_size;
  2872. if (rem)
  2873. nr_new -= rem;
  2874. }
  2875. if (nr_new == 0)
  2876. return 0;
  2877. *to = from_new + nr_new;
  2878. *from = from_new;
  2879. return nr_new;
  2880. }
  2881. #ifdef CONFIG_MTK_EMMC_HW_CQ
  2882. int mmc_erase_sanity_check(struct mmc_card *card, unsigned int from,
  2883. unsigned int nr, unsigned int arg)
  2884. {
  2885. if (!(card->host->caps & MMC_CAP_ERASE) ||
  2886. !(card->csd.cmdclass & CCC_ERASE))
  2887. return -EOPNOTSUPP;
  2888. if (!card->erase_size)
  2889. return -EOPNOTSUPP;
  2890. if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
  2891. return -EOPNOTSUPP;
  2892. if ((arg & MMC_SECURE_ARGS) &&
  2893. !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
  2894. return -EOPNOTSUPP;
  2895. if ((arg & MMC_TRIM_ARGS) &&
  2896. !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
  2897. return -EOPNOTSUPP;
  2898. if (arg == MMC_SECURE_ERASE_ARG) {
  2899. if (from % card->erase_size || nr % card->erase_size)
  2900. return -EINVAL;
  2901. }
  2902. return 0;
  2903. }
  2904. int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
  2905. struct mmc_card *card, unsigned int from, unsigned int nr,
  2906. unsigned int arg)
  2907. {
  2908. unsigned int rem, to = from + nr;
  2909. int ret;
  2910. ret = mmc_erase_sanity_check(card, from, nr, arg);
  2911. if (ret)
  2912. return ret;
  2913. if (arg == MMC_ERASE_ARG) {
  2914. rem = from % card->erase_size;
  2915. if (rem) {
  2916. rem = card->erase_size - rem;
  2917. from += rem;
  2918. if (nr > rem)
  2919. nr -= rem;
  2920. else
  2921. return 0;
  2922. }
  2923. rem = nr % card->erase_size;
  2924. if (rem)
  2925. nr -= rem;
  2926. }
  2927. if (nr == 0)
  2928. return 0;
  2929. to = from + nr;
  2930. if (to <= from)
  2931. return -EINVAL;
  2932. /* 'from' and 'to' are inclusive */
  2933. to -= 1;
  2934. return mmc_cmdq_do_erase(cmdq_req, card, from, to, arg);
  2935. }
  2936. EXPORT_SYMBOL(mmc_cmdq_erase);
  2937. void mmc_cmdq_up_rwsem(struct mmc_host *host)
  2938. {
  2939. struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
  2940. up_read(&ctx->err_rwsem);
  2941. }
  2942. EXPORT_SYMBOL(mmc_cmdq_up_rwsem);
  2943. int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq)
  2944. {
  2945. struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
  2946. down_read(&ctx->err_rwsem);
  2947. if (rq && !(rq->rq_flags & RQF_QUEUED))
  2948. return -EINVAL;
  2949. else
  2950. return 0;
  2951. }
  2952. EXPORT_SYMBOL(mmc_cmdq_down_rwsem);
  2953. #endif
  2954. /**
  2955. * mmc_erase - erase sectors.
  2956. * @card: card to erase
  2957. * @from: first sector to erase
  2958. * @nr: number of sectors to erase
  2959. * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
  2960. *
  2961. * Caller must claim host before calling this function.
  2962. */
  2963. int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
  2964. unsigned int arg)
  2965. {
  2966. unsigned int rem, to = from + nr;
  2967. int err;
  2968. if (!(card->host->caps & MMC_CAP_ERASE) ||
  2969. !(card->csd.cmdclass & CCC_ERASE))
  2970. return -EOPNOTSUPP;
  2971. if (!card->erase_size)
  2972. return -EOPNOTSUPP;
  2973. if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
  2974. return -EOPNOTSUPP;
  2975. if ((arg & MMC_SECURE_ARGS) &&
  2976. !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
  2977. return -EOPNOTSUPP;
  2978. if ((arg & MMC_TRIM_ARGS) &&
  2979. !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
  2980. return -EOPNOTSUPP;
  2981. if (arg == MMC_SECURE_ERASE_ARG) {
  2982. if (from % card->erase_size || nr % card->erase_size)
  2983. return -EINVAL;
  2984. }
  2985. if (arg == MMC_ERASE_ARG)
  2986. nr = mmc_align_erase_size(card, &from, &to, nr);
  2987. if (nr == 0)
  2988. return 0;
  2989. if (to <= from)
  2990. return -EINVAL;
  2991. /* 'from' and 'to' are inclusive */
  2992. to -= 1;
  2993. /*
  2994. * Special case where only one erase-group fits in the timeout budget:
  2995. * If the region crosses an erase-group boundary on this particular
  2996. * case, we will be trimming more than one erase-group which, does not
  2997. * fit in the timeout budget of the controller, so we need to split it
  2998. * and call mmc_do_erase() twice if necessary. This special case is
  2999. * identified by the card->eg_boundary flag.
  3000. */
  3001. rem = card->erase_size - (from % card->erase_size);
  3002. if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
  3003. err = mmc_do_erase(card, from, from + rem - 1, arg);
  3004. from += rem;
  3005. if ((err) || (to <= from))
  3006. return err;
  3007. }
  3008. return mmc_do_erase(card, from, to, arg);
  3009. }
  3010. EXPORT_SYMBOL(mmc_erase);
  3011. int mmc_can_erase(struct mmc_card *card)
  3012. {
  3013. if ((card->host->caps & MMC_CAP_ERASE) &&
  3014. (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
  3015. return 1;
  3016. return 0;
  3017. }
  3018. EXPORT_SYMBOL(mmc_can_erase);
  3019. int mmc_can_trim(struct mmc_card *card)
  3020. {
  3021. if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
  3022. (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
  3023. return 1;
  3024. return 0;
  3025. }
  3026. EXPORT_SYMBOL(mmc_can_trim);
  3027. int mmc_can_discard(struct mmc_card *card)
  3028. {
  3029. /*
  3030. * As there's no way to detect the discard support bit at v4.5
  3031. * use the s/w feature support filed.
  3032. */
  3033. if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
  3034. return 1;
  3035. return 0;
  3036. }
  3037. EXPORT_SYMBOL(mmc_can_discard);
  3038. int mmc_can_sanitize(struct mmc_card *card)
  3039. {
  3040. if (!mmc_can_trim(card) && !mmc_can_erase(card))
  3041. return 0;
  3042. if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
  3043. return 1;
  3044. return 0;
  3045. }
  3046. EXPORT_SYMBOL(mmc_can_sanitize);
  3047. int mmc_can_secure_erase_trim(struct mmc_card *card)
  3048. {
  3049. if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
  3050. !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
  3051. return 1;
  3052. return 0;
  3053. }
  3054. EXPORT_SYMBOL(mmc_can_secure_erase_trim);
  3055. int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
  3056. unsigned int nr)
  3057. {
  3058. if (!card->erase_size)
  3059. return 0;
  3060. if (from % card->erase_size || nr % card->erase_size)
  3061. return 0;
  3062. return 1;
  3063. }
  3064. EXPORT_SYMBOL(mmc_erase_group_aligned);
  3065. static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
  3066. unsigned int arg)
  3067. {
  3068. struct mmc_host *host = card->host;
  3069. unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
  3070. unsigned int last_timeout = 0;
  3071. unsigned int max_busy_timeout = host->max_busy_timeout ?
  3072. host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
  3073. if (card->erase_shift) {
  3074. max_qty = UINT_MAX >> card->erase_shift;
  3075. min_qty = card->pref_erase >> card->erase_shift;
  3076. } else if (mmc_card_sd(card)) {
  3077. max_qty = UINT_MAX;
  3078. min_qty = card->pref_erase;
  3079. } else {
  3080. max_qty = UINT_MAX / card->erase_size;
  3081. min_qty = card->pref_erase / card->erase_size;
  3082. }
  3083. /*
  3084. * We should not only use 'host->max_busy_timeout' as the limitation
  3085. * when deciding the max discard sectors. We should set a balance value
  3086. * to improve the erase speed, and it can not get too long timeout at
  3087. * the same time.
  3088. *
  3089. * Here we set 'card->pref_erase' as the minimal discard sectors no
  3090. * matter what size of 'host->max_busy_timeout', but if the
  3091. * 'host->max_busy_timeout' is large enough for more discard sectors,
  3092. * then we can continue to increase the max discard sectors until we
  3093. * get a balance value. In cases when the 'host->max_busy_timeout'
  3094. * isn't specified, use the default max erase timeout.
  3095. */
  3096. do {
  3097. y = 0;
  3098. for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
  3099. timeout = mmc_erase_timeout(card, arg, qty + x);
  3100. if (qty + x > min_qty && timeout > max_busy_timeout)
  3101. break;
  3102. if (timeout < last_timeout)
  3103. break;
  3104. last_timeout = timeout;
  3105. y = x;
  3106. }
  3107. qty += y;
  3108. } while (y);
  3109. if (!qty)
  3110. return 0;
  3111. /*
  3112. * When specifying a sector range to trim, chances are we might cross
  3113. * an erase-group boundary even if the amount of sectors is less than
  3114. * one erase-group.
  3115. * If we can only fit one erase-group in the controller timeout budget,
  3116. * we have to care that erase-group boundaries are not crossed by a
  3117. * single trim operation. We flag that special case with "eg_boundary".
  3118. * In all other cases we can just decrement qty and pretend that we
  3119. * always touch (qty + 1) erase-groups as a simple optimization.
  3120. */
  3121. if (qty == 1)
  3122. card->eg_boundary = 1;
  3123. else
  3124. qty--;
  3125. /* Convert qty to sectors */
  3126. if (card->erase_shift)
  3127. max_discard = qty << card->erase_shift;
  3128. else if (mmc_card_sd(card))
  3129. max_discard = qty + 1;
  3130. else
  3131. max_discard = qty * card->erase_size;
  3132. return max_discard;
  3133. }
  3134. unsigned int mmc_calc_max_discard(struct mmc_card *card)
  3135. {
  3136. struct mmc_host *host = card->host;
  3137. unsigned int max_discard, max_trim;
  3138. if (!host->max_busy_timeout)
  3139. return UINT_MAX;
  3140. /*
  3141. * Without erase_group_def set, MMC erase timeout depends on clock
  3142. * frequence which can change. In that case, the best choice is
  3143. * just the preferred erase size.
  3144. */
  3145. if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
  3146. return card->pref_erase;
  3147. max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
  3148. if (mmc_can_trim(card)) {
  3149. max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
  3150. if (max_trim < max_discard)
  3151. max_discard = max_trim;
  3152. } else if (max_discard < card->erase_size) {
  3153. max_discard = 0;
  3154. }
  3155. pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
  3156. mmc_hostname(host), max_discard, host->max_busy_timeout ?
  3157. host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
  3158. return max_discard;
  3159. }
  3160. EXPORT_SYMBOL(mmc_calc_max_discard);
  3161. bool mmc_card_is_blockaddr(struct mmc_card *card)
  3162. {
  3163. return card ? mmc_card_blockaddr(card) : false;
  3164. }
  3165. EXPORT_SYMBOL(mmc_card_is_blockaddr);
  3166. int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
  3167. {
  3168. struct mmc_command cmd = {};
  3169. if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
  3170. mmc_card_hs400(card) || mmc_card_hs400es(card))
  3171. return 0;
  3172. cmd.opcode = MMC_SET_BLOCKLEN;
  3173. cmd.arg = blocklen;
  3174. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  3175. return mmc_wait_for_cmd(card->host, &cmd, 5);
  3176. }
  3177. EXPORT_SYMBOL(mmc_set_blocklen);
  3178. int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
  3179. bool is_rel_write)
  3180. {
  3181. struct mmc_command cmd = {};
  3182. cmd.opcode = MMC_SET_BLOCK_COUNT;
  3183. cmd.arg = blockcount & 0x0000FFFF;
  3184. if (is_rel_write)
  3185. cmd.arg |= 1 << 31;
  3186. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  3187. return mmc_wait_for_cmd(card->host, &cmd, 5);
  3188. }
  3189. EXPORT_SYMBOL(mmc_set_blockcount);
  3190. static void mmc_hw_reset_for_init(struct mmc_host *host)
  3191. {
  3192. mmc_pwrseq_reset(host);
  3193. if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
  3194. return;
  3195. host->ops->hw_reset(host);
  3196. }
  3197. #ifdef CONFIG_MTK_EMMC_HW_CQ
  3198. /*
  3199. * mmc_cmdq_hw_reset: Helper API for doing
  3200. * reset_all of host and reinitializing card.
  3201. * This must be called with mmc_claim_host
  3202. * acquired by the caller.
  3203. */
  3204. int mmc_cmdq_hw_reset(struct mmc_host *host)
  3205. {
  3206. if (!host->bus_ops->reset)
  3207. return -EOPNOTSUPP;
  3208. return host->bus_ops->reset(host);
  3209. }
  3210. EXPORT_SYMBOL(mmc_cmdq_hw_reset);
  3211. int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
  3212. {
  3213. int err = 0;
  3214. err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
  3215. (!host->cmdq_ctx.active_reqs));
  3216. if (host->cmdq_ctx.active_reqs) {
  3217. pr_notice("%s: %s: unexpected active requests (%lu)\n",
  3218. mmc_hostname(host), __func__,
  3219. host->cmdq_ctx.active_reqs);
  3220. return -EPERM;
  3221. }
  3222. err = mmc_cmdq_halt(host, true);
  3223. if (err) {
  3224. pr_notice("%s: %s: mmc_cmdq_halt failed (%d)\n",
  3225. mmc_hostname(host), __func__, err);
  3226. goto out;
  3227. }
  3228. out:
  3229. return err;
  3230. }
  3231. EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
  3232. #endif
  3233. int mmc_hw_reset(struct mmc_host *host)
  3234. {
  3235. int ret;
  3236. if (!host->card)
  3237. return -EINVAL;
  3238. mmc_bus_get(host);
  3239. if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
  3240. mmc_bus_put(host);
  3241. return -EOPNOTSUPP;
  3242. }
  3243. ret = host->bus_ops->reset(host);
  3244. mmc_bus_put(host);
  3245. if (ret)
  3246. pr_warn("%s: tried to reset card, got error %d\n",
  3247. mmc_hostname(host), ret);
  3248. return ret;
  3249. }
  3250. EXPORT_SYMBOL(mmc_hw_reset);
  3251. static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
  3252. {
  3253. host->f_init = freq;
  3254. pr_debug("%s: %s: trying to init card at %u Hz\n",
  3255. mmc_hostname(host), __func__, host->f_init);
  3256. mmc_power_up(host, host->ocr_avail);
  3257. /*
  3258. * Some eMMCs (with VCCQ always on) may not be reset after power up, so
  3259. * do a hardware reset if possible.
  3260. */
  3261. mmc_hw_reset_for_init(host);
  3262. /*
  3263. * sdio_reset sends CMD52 to reset card. Since we do not know
  3264. * if the card is being re-initialized, just send it. CMD52
  3265. * should be ignored by SD/eMMC cards.
  3266. * Skip it if we already know that we do not support SDIO commands
  3267. */
  3268. if (!(host->caps2 & MMC_CAP2_NO_SDIO))
  3269. sdio_reset(host);
  3270. mmc_go_idle(host);
  3271. if (!(host->caps2 & MMC_CAP2_NO_SD))
  3272. mmc_send_if_cond(host, host->ocr_avail);
  3273. /* Order's important: probe SDIO, then SD, then MMC */
  3274. if (!(host->caps2 & MMC_CAP2_NO_SDIO))
  3275. if (!mmc_attach_sdio(host))
  3276. return 0;
  3277. if (!(host->caps2 & MMC_CAP2_NO_SD))
  3278. if (!mmc_attach_sd(host))
  3279. return 0;
  3280. if (!(host->caps2 & MMC_CAP2_NO_MMC))
  3281. if (!mmc_attach_mmc(host))
  3282. return 0;
  3283. mmc_power_off(host);
  3284. return -EIO;
  3285. }
  3286. int _mmc_detect_card_removed(struct mmc_host *host)
  3287. {
  3288. int ret;
  3289. if (!host->card || mmc_card_removed(host->card))
  3290. return 1;
  3291. ret = host->bus_ops->alive(host);
  3292. /*
  3293. * Card detect status and alive check may be out of sync if card is
  3294. * removed slowly, when card detect switch changes while card/slot
  3295. * pads are still contacted in hardware (refer to "SD Card Mechanical
  3296. * Addendum, Appendix C: Card Detection Switch"). So reschedule a
  3297. * detect work 200ms later for this case.
  3298. */
  3299. if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
  3300. mmc_detect_change(host, msecs_to_jiffies(200));
  3301. pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
  3302. }
  3303. if (ret) {
  3304. mmc_card_set_removed(host->card);
  3305. pr_debug("%s: card remove detected\n", mmc_hostname(host));
  3306. }
  3307. return ret;
  3308. }
  3309. int mmc_detect_card_removed(struct mmc_host *host)
  3310. {
  3311. struct mmc_card *card = host->card;
  3312. int ret;
  3313. WARN_ON(!host->claimed);
  3314. if (!card)
  3315. return 1;
  3316. if (!mmc_card_is_removable(host))
  3317. return 0;
  3318. ret = mmc_card_removed(card);
  3319. /*
  3320. * The card will be considered unchanged unless we have been asked to
  3321. * detect a change or host requires polling to provide card detection.
  3322. */
  3323. if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
  3324. return ret;
  3325. host->detect_change = 0;
  3326. if (!ret) {
  3327. ret = _mmc_detect_card_removed(host);
  3328. if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
  3329. /*
  3330. * Schedule a detect work as soon as possible to let a
  3331. * rescan handle the card removal.
  3332. */
  3333. cancel_delayed_work(&host->detect);
  3334. _mmc_detect_change(host, 0, false);
  3335. }
  3336. }
  3337. return ret;
  3338. }
  3339. EXPORT_SYMBOL(mmc_detect_card_removed);
  3340. void mmc_rescan(struct work_struct *work)
  3341. {
  3342. struct mmc_host *host =
  3343. container_of(work, struct mmc_host, detect.work);
  3344. int i;
  3345. if (host->rescan_disable)
  3346. return;
  3347. /* If there is a non-removable card registered, only scan once */
  3348. if (!mmc_card_is_removable(host) && host->rescan_entered)
  3349. return;
  3350. host->rescan_entered = 1;
  3351. if (host->trigger_card_event && host->ops->card_event) {
  3352. mmc_claim_host(host);
  3353. host->ops->card_event(host);
  3354. mmc_release_host(host);
  3355. host->trigger_card_event = false;
  3356. }
  3357. mmc_bus_get(host);
  3358. /*
  3359. * if there is a _removable_ card registered, check whether it is
  3360. * still present
  3361. */
  3362. if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
  3363. host->bus_ops->detect(host);
  3364. host->detect_change = 0;
  3365. /*
  3366. * Let mmc_bus_put() free the bus/bus_ops if we've found that
  3367. * the card is no longer present.
  3368. */
  3369. mmc_bus_put(host);
  3370. mmc_bus_get(host);
  3371. /* if there still is a card present, stop here */
  3372. if (host->bus_ops != NULL) {
  3373. mmc_bus_put(host);
  3374. goto out;
  3375. }
  3376. /*
  3377. * Only we can add a new handler, so it's safe to
  3378. * release the lock here.
  3379. */
  3380. mmc_bus_put(host);
  3381. mmc_claim_host(host);
  3382. if (mmc_card_is_removable(host) && host->ops->get_cd &&
  3383. host->ops->get_cd(host) == 0) {
  3384. mmc_power_off(host);
  3385. mmc_release_host(host);
  3386. goto out;
  3387. }
  3388. for (i = 0; i < ARRAY_SIZE(freqs); i++) {
  3389. if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
  3390. break;
  3391. if (freqs[i] <= host->f_min)
  3392. break;
  3393. }
  3394. mmc_release_host(host);
  3395. out:
  3396. if (host->caps & MMC_CAP_NEEDS_POLL)
  3397. mmc_schedule_delayed_work(&host->detect, HZ);
  3398. }
  3399. void mmc_start_host(struct mmc_host *host)
  3400. {
  3401. host->f_init = max(freqs[0], host->f_min);
  3402. host->rescan_disable = 0;
  3403. host->ios.power_mode = MMC_POWER_UNDEFINED;
  3404. if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
  3405. mmc_claim_host(host);
  3406. mmc_power_up(host, host->ocr_avail);
  3407. mmc_release_host(host);
  3408. }
  3409. mmc_gpiod_request_cd_irq(host);
  3410. _mmc_detect_change(host, 0, false);
  3411. }
  3412. void mmc_stop_host(struct mmc_host *host)
  3413. {
  3414. if (host->slot.cd_irq >= 0) {
  3415. if (host->slot.cd_wake_enabled)
  3416. disable_irq_wake(host->slot.cd_irq);
  3417. disable_irq(host->slot.cd_irq);
  3418. }
  3419. host->rescan_disable = 1;
  3420. cancel_delayed_work_sync(&host->detect);
  3421. /* clear pm flags now and let card drivers set them as needed */
  3422. host->pm_flags = 0;
  3423. mmc_bus_get(host);
  3424. if (host->bus_ops && !host->bus_dead) {
  3425. /* Calling bus_ops->remove() with a claimed host can deadlock */
  3426. host->bus_ops->remove(host);
  3427. mmc_claim_host(host);
  3428. mmc_detach_bus(host);
  3429. mmc_power_off(host);
  3430. mmc_release_host(host);
  3431. mmc_bus_put(host);
  3432. return;
  3433. }
  3434. mmc_bus_put(host);
  3435. mmc_claim_host(host);
  3436. mmc_power_off(host);
  3437. mmc_release_host(host);
  3438. }
  3439. int mmc_power_save_host(struct mmc_host *host)
  3440. {
  3441. int ret = 0;
  3442. pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__);
  3443. mmc_bus_get(host);
  3444. if (!host->bus_ops || host->bus_dead) {
  3445. mmc_bus_put(host);
  3446. return -EINVAL;
  3447. }
  3448. if (host->bus_ops->power_save)
  3449. ret = host->bus_ops->power_save(host);
  3450. mmc_bus_put(host);
  3451. mmc_power_off(host);
  3452. return ret;
  3453. }
  3454. EXPORT_SYMBOL(mmc_power_save_host);
  3455. int mmc_power_restore_host(struct mmc_host *host)
  3456. {
  3457. int ret;
  3458. pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__);
  3459. mmc_bus_get(host);
  3460. if (!host->bus_ops || host->bus_dead) {
  3461. mmc_bus_put(host);
  3462. return -EINVAL;
  3463. }
  3464. mmc_power_up(host, host->card->ocr);
  3465. ret = host->bus_ops->power_restore(host);
  3466. mmc_bus_put(host);
  3467. return ret;
  3468. }
  3469. EXPORT_SYMBOL(mmc_power_restore_host);
  3470. #ifdef CONFIG_PM_SLEEP
  3471. /* Do the card removal on suspend if card is assumed removeable
  3472. * Do that in pm notifier while userspace isn't yet frozen, so we will be able
  3473. to sync the card.
  3474. */
  3475. static int mmc_pm_notify(struct notifier_block *notify_block,
  3476. unsigned long mode, void *unused)
  3477. {
  3478. struct mmc_host *host = container_of(
  3479. notify_block, struct mmc_host, pm_notify);
  3480. unsigned long flags;
  3481. int err = 0;
  3482. switch (mode) {
  3483. case PM_HIBERNATION_PREPARE:
  3484. case PM_SUSPEND_PREPARE:
  3485. case PM_RESTORE_PREPARE:
  3486. spin_lock_irqsave(&host->lock, flags);
  3487. host->rescan_disable = 1;
  3488. spin_unlock_irqrestore(&host->lock, flags);
  3489. cancel_delayed_work_sync(&host->detect);
  3490. if (!host->bus_ops)
  3491. break;
  3492. /* Validate prerequisites for suspend */
  3493. if (host->bus_ops->pre_suspend)
  3494. err = host->bus_ops->pre_suspend(host);
  3495. if (!err)
  3496. break;
  3497. if (!mmc_card_is_removable(host)) {
  3498. dev_warn(mmc_dev(host),
  3499. "pre_suspend failed for non-removable host: "
  3500. "%d\n", err);
  3501. /* Avoid removing non-removable hosts */
  3502. break;
  3503. }
  3504. /* Calling bus_ops->remove() with a claimed host can deadlock */
  3505. host->bus_ops->remove(host);
  3506. mmc_claim_host(host);
  3507. mmc_detach_bus(host);
  3508. mmc_power_off(host);
  3509. mmc_release_host(host);
  3510. host->pm_flags = 0;
  3511. break;
  3512. case PM_POST_SUSPEND:
  3513. case PM_POST_HIBERNATION:
  3514. case PM_POST_RESTORE:
  3515. spin_lock_irqsave(&host->lock, flags);
  3516. host->rescan_disable = 0;
  3517. spin_unlock_irqrestore(&host->lock, flags);
  3518. _mmc_detect_change(host, 0, false);
  3519. }
  3520. return 0;
  3521. }
  3522. void mmc_register_pm_notifier(struct mmc_host *host)
  3523. {
  3524. host->pm_notify.notifier_call = mmc_pm_notify;
  3525. register_pm_notifier(&host->pm_notify);
  3526. }
  3527. void mmc_unregister_pm_notifier(struct mmc_host *host)
  3528. {
  3529. unregister_pm_notifier(&host->pm_notify);
  3530. }
  3531. #endif
  3532. /**
  3533. * mmc_init_context_info() - init synchronization context
  3534. * @host: mmc host
  3535. *
  3536. * Init struct context_info needed to implement asynchronous
  3537. * request mechanism, used by mmc core, host driver and mmc requests
  3538. * supplier.
  3539. */
  3540. void mmc_init_context_info(struct mmc_host *host)
  3541. {
  3542. host->context_info.is_new_req = false;
  3543. host->context_info.is_done_rcv = false;
  3544. host->context_info.is_waiting_last_req = false;
  3545. init_waitqueue_head(&host->context_info.wait);
  3546. }
  3547. #ifdef CONFIG_MMC_EMBEDDED_SDIO
  3548. void mmc_set_embedded_sdio_data(struct mmc_host *host,
  3549. struct sdio_cis *cis,
  3550. struct sdio_cccr *cccr,
  3551. struct sdio_embedded_func *funcs,
  3552. int num_funcs)
  3553. {
  3554. host->embedded_sdio_data.cis = cis;
  3555. host->embedded_sdio_data.cccr = cccr;
  3556. host->embedded_sdio_data.funcs = funcs;
  3557. host->embedded_sdio_data.num_funcs = num_funcs;
  3558. }
  3559. EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
  3560. #endif
  3561. static int __init mmc_init(void)
  3562. {
  3563. int ret;
  3564. ret = mmc_register_bus();
  3565. if (ret)
  3566. return ret;
  3567. ret = mmc_register_host_class();
  3568. if (ret)
  3569. goto unregister_bus;
  3570. ret = sdio_register_bus();
  3571. if (ret)
  3572. goto unregister_host_class;
  3573. return 0;
  3574. unregister_host_class:
  3575. mmc_unregister_host_class();
  3576. unregister_bus:
  3577. mmc_unregister_bus();
  3578. return ret;
  3579. }
  3580. static void __exit mmc_exit(void)
  3581. {
  3582. sdio_unregister_bus();
  3583. mmc_unregister_host_class();
  3584. mmc_unregister_bus();
  3585. }
  3586. subsys_initcall(mmc_init);
  3587. module_exit(mmc_exit);
  3588. MODULE_LICENSE("GPL");