mmc.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539
  1. /*
  2. * linux/drivers/mmc/core/mmc.c
  3. *
  4. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  5. * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
  6. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/of.h>
  14. #include <linux/slab.h>
  15. #include <linux/stat.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/mmc/host.h>
  18. #include <linux/mmc/card.h>
  19. #include <linux/mmc/mmc.h>
  20. #include "core.h"
  21. #include "card.h"
  22. #include "host.h"
  23. #include "bus.h"
  24. #include "mmc_ops.h"
  25. #include "quirks.h"
  26. #include "sd_ops.h"
  27. #include "pwrseq.h"
  28. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  29. #include <linux/kthread.h>
  30. #include <mt-plat/mtk_boot_common.h>
  31. #endif
  32. #define DEFAULT_CMD6_TIMEOUT_MS 500
  33. #define MIN_CACHE_EN_TIMEOUT_MS 1600
  34. static const unsigned int tran_exp[] = {
  35. 10000, 100000, 1000000, 10000000,
  36. 0, 0, 0, 0
  37. };
  38. static const unsigned char tran_mant[] = {
  39. 0, 10, 12, 13, 15, 20, 25, 30,
  40. 35, 40, 45, 50, 55, 60, 70, 80,
  41. };
  42. static const unsigned int taac_exp[] = {
  43. 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
  44. };
  45. static const unsigned int taac_mant[] = {
  46. 0, 10, 12, 13, 15, 20, 25, 30,
  47. 35, 40, 45, 50, 55, 60, 70, 80,
  48. };
  49. #define UNSTUFF_BITS(resp,start,size) \
  50. ({ \
  51. const int __size = size; \
  52. const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
  53. const int __off = 3 - ((start) / 32); \
  54. const int __shft = (start) & 31; \
  55. u32 __res; \
  56. \
  57. __res = resp[__off] >> __shft; \
  58. if (__size + __shft > 32) \
  59. __res |= resp[__off-1] << ((32 - __shft) % 32); \
  60. __res & __mask; \
  61. })
  62. /*
  63. * Given the decoded CSD structure, decode the raw CID to our CID structure.
  64. */
  65. static int mmc_decode_cid(struct mmc_card *card)
  66. {
  67. u32 *resp = card->raw_cid;
  68. /*
  69. * The selection of the format here is based upon published
  70. * specs from sandisk and from what people have reported.
  71. */
  72. switch (card->csd.mmca_vsn) {
  73. case 0: /* MMC v1.0 - v1.2 */
  74. case 1: /* MMC v1.4 */
  75. card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
  76. card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
  77. card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
  78. card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
  79. card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
  80. card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
  81. card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
  82. card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
  83. card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
  84. card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
  85. card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
  86. card->cid.month = UNSTUFF_BITS(resp, 12, 4);
  87. card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
  88. break;
  89. case 2: /* MMC v2.0 - v2.2 */
  90. case 3: /* MMC v3.1 - v3.3 */
  91. case 4: /* MMC v4 */
  92. card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
  93. card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
  94. card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
  95. card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
  96. card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
  97. card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
  98. card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
  99. card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
  100. card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
  101. card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
  102. card->cid.month = UNSTUFF_BITS(resp, 12, 4);
  103. card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
  104. break;
  105. default:
  106. pr_err("%s: card has unknown MMCA version %d\n",
  107. mmc_hostname(card->host), card->csd.mmca_vsn);
  108. return -EINVAL;
  109. }
  110. return 0;
  111. }
  112. static void mmc_set_erase_size(struct mmc_card *card)
  113. {
  114. if (card->ext_csd.erase_group_def & 1)
  115. card->erase_size = card->ext_csd.hc_erase_size;
  116. else
  117. card->erase_size = card->csd.erase_size;
  118. mmc_init_erase(card);
  119. }
  120. static void mmc_set_wp_grp_size(struct mmc_card *card)
  121. {
  122. if (card->ext_csd.erase_group_def & 1)
  123. card->wp_grp_size = card->ext_csd.hc_erase_size *
  124. card->ext_csd.raw_hc_erase_gap_size;
  125. else
  126. card->wp_grp_size = card->csd.erase_size *
  127. (card->csd.wp_grp_size + 1);
  128. }
  129. /*
  130. * Given a 128-bit response, decode to our card CSD structure.
  131. */
  132. static int mmc_decode_csd(struct mmc_card *card)
  133. {
  134. struct mmc_csd *csd = &card->csd;
  135. unsigned int e, m, a, b;
  136. u32 *resp = card->raw_csd;
  137. /*
  138. * We only understand CSD structure v1.1 and v1.2.
  139. * v1.2 has extra information in bits 15, 11 and 10.
  140. * We also support eMMC v4.4 & v4.41.
  141. */
  142. csd->structure = UNSTUFF_BITS(resp, 126, 2);
  143. if (csd->structure == 0) {
  144. pr_err("%s: unrecognised CSD structure version %d\n",
  145. mmc_hostname(card->host), csd->structure);
  146. return -EINVAL;
  147. }
  148. csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
  149. m = UNSTUFF_BITS(resp, 115, 4);
  150. e = UNSTUFF_BITS(resp, 112, 3);
  151. csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
  152. csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
  153. m = UNSTUFF_BITS(resp, 99, 4);
  154. e = UNSTUFF_BITS(resp, 96, 3);
  155. csd->max_dtr = tran_exp[e] * tran_mant[m];
  156. csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
  157. e = UNSTUFF_BITS(resp, 47, 3);
  158. m = UNSTUFF_BITS(resp, 62, 12);
  159. csd->capacity = (1 + m) << (e + 2);
  160. csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
  161. csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
  162. csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
  163. csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
  164. csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
  165. csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
  166. csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
  167. csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
  168. if (csd->write_blkbits >= 9) {
  169. a = UNSTUFF_BITS(resp, 42, 5);
  170. b = UNSTUFF_BITS(resp, 37, 5);
  171. csd->erase_size = (a + 1) * (b + 1);
  172. csd->erase_size <<= csd->write_blkbits - 9;
  173. csd->wp_grp_size = UNSTUFF_BITS(resp, 32, 5);
  174. }
  175. return 0;
  176. }
  177. static void mmc_select_card_type(struct mmc_card *card)
  178. {
  179. struct mmc_host *host = card->host;
  180. u8 card_type = card->ext_csd.raw_card_type;
  181. u32 caps = host->caps, caps2 = host->caps2;
  182. unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
  183. unsigned int avail_type = 0;
  184. if (caps & MMC_CAP_MMC_HIGHSPEED &&
  185. card_type & EXT_CSD_CARD_TYPE_HS_26) {
  186. hs_max_dtr = MMC_HIGH_26_MAX_DTR;
  187. avail_type |= EXT_CSD_CARD_TYPE_HS_26;
  188. }
  189. if (caps & MMC_CAP_MMC_HIGHSPEED &&
  190. card_type & EXT_CSD_CARD_TYPE_HS_52) {
  191. hs_max_dtr = MMC_HIGH_52_MAX_DTR;
  192. avail_type |= EXT_CSD_CARD_TYPE_HS_52;
  193. }
  194. if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
  195. card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
  196. hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
  197. avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
  198. }
  199. if (caps & MMC_CAP_1_2V_DDR &&
  200. card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
  201. hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
  202. avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
  203. }
  204. if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
  205. card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
  206. hs200_max_dtr = MMC_HS200_MAX_DTR;
  207. avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
  208. }
  209. if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
  210. card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
  211. hs200_max_dtr = MMC_HS200_MAX_DTR;
  212. avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
  213. }
  214. if (caps2 & MMC_CAP2_HS400_1_8V &&
  215. card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
  216. hs200_max_dtr = MMC_HS200_MAX_DTR;
  217. avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
  218. }
  219. if (caps2 & MMC_CAP2_HS400_1_2V &&
  220. card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
  221. hs200_max_dtr = MMC_HS200_MAX_DTR;
  222. avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
  223. }
  224. if ((caps2 & MMC_CAP2_HS400_ES) &&
  225. card->ext_csd.strobe_support &&
  226. (avail_type & EXT_CSD_CARD_TYPE_HS400))
  227. avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
  228. card->ext_csd.hs_max_dtr = hs_max_dtr;
  229. card->ext_csd.hs200_max_dtr = hs200_max_dtr;
  230. card->mmc_avail_type = avail_type;
  231. }
  232. static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
  233. {
  234. u8 hc_erase_grp_sz, hc_wp_grp_sz;
  235. /*
  236. * Disable these attributes by default
  237. */
  238. card->ext_csd.enhanced_area_offset = -EINVAL;
  239. card->ext_csd.enhanced_area_size = -EINVAL;
  240. /*
  241. * Enhanced area feature support -- check whether the eMMC
  242. * card has the Enhanced area enabled. If so, export enhanced
  243. * area offset and size to user by adding sysfs interface.
  244. */
  245. if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
  246. (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
  247. if (card->ext_csd.partition_setting_completed) {
  248. hc_erase_grp_sz =
  249. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  250. hc_wp_grp_sz =
  251. ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  252. /*
  253. * calculate the enhanced data area offset, in bytes
  254. */
  255. card->ext_csd.enhanced_area_offset =
  256. (((unsigned long long)ext_csd[139]) << 24) +
  257. (((unsigned long long)ext_csd[138]) << 16) +
  258. (((unsigned long long)ext_csd[137]) << 8) +
  259. (((unsigned long long)ext_csd[136]));
  260. if (mmc_card_blockaddr(card))
  261. card->ext_csd.enhanced_area_offset <<= 9;
  262. /*
  263. * calculate the enhanced data area size, in kilobytes
  264. */
  265. card->ext_csd.enhanced_area_size =
  266. (ext_csd[142] << 16) + (ext_csd[141] << 8) +
  267. ext_csd[140];
  268. card->ext_csd.enhanced_area_size *=
  269. (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
  270. card->ext_csd.enhanced_area_size <<= 9;
  271. } else {
  272. pr_warn("%s: defines enhanced area without partition setting complete\n",
  273. mmc_hostname(card->host));
  274. }
  275. }
  276. }
  277. static void mmc_part_add(struct mmc_card *card, u64 size,
  278. unsigned int part_cfg, char *name, int idx, bool ro,
  279. int area_type)
  280. {
  281. card->part[card->nr_parts].size = size;
  282. card->part[card->nr_parts].part_cfg = part_cfg;
  283. sprintf(card->part[card->nr_parts].name, name, idx);
  284. card->part[card->nr_parts].force_ro = ro;
  285. card->part[card->nr_parts].area_type = area_type;
  286. card->nr_parts++;
  287. }
  288. static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
  289. {
  290. int idx;
  291. u8 hc_erase_grp_sz, hc_wp_grp_sz;
  292. u64 part_size;
  293. /*
  294. * General purpose partition feature support --
  295. * If ext_csd has the size of general purpose partitions,
  296. * set size, part_cfg, partition name in mmc_part.
  297. */
  298. if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
  299. EXT_CSD_PART_SUPPORT_PART_EN) {
  300. hc_erase_grp_sz =
  301. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  302. hc_wp_grp_sz =
  303. ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  304. for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
  305. if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
  306. !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
  307. !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
  308. continue;
  309. if (card->ext_csd.partition_setting_completed == 0) {
  310. pr_warn("%s: has partition size defined without partition complete\n",
  311. mmc_hostname(card->host));
  312. break;
  313. }
  314. part_size =
  315. (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
  316. << 16) +
  317. (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
  318. << 8) +
  319. ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
  320. part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
  321. mmc_part_add(card, part_size << 19,
  322. EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
  323. "gp%d", idx, false,
  324. MMC_BLK_DATA_AREA_GP);
  325. }
  326. }
  327. }
  328. /* Minimum partition switch timeout in milliseconds */
  329. #define MMC_MIN_PART_SWITCH_TIME 300
  330. /*
  331. * Decode extended CSD.
  332. */
  333. static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
  334. {
  335. int err = 0, idx;
  336. u64 part_size;
  337. struct device_node *np;
  338. bool broken_hpi = false;
  339. /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
  340. card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
  341. if (card->csd.structure == 3) {
  342. if (card->ext_csd.raw_ext_csd_structure > 2) {
  343. pr_err("%s: unrecognised EXT_CSD structure "
  344. "version %d\n", mmc_hostname(card->host),
  345. card->ext_csd.raw_ext_csd_structure);
  346. err = -EINVAL;
  347. goto out;
  348. }
  349. }
  350. np = mmc_of_find_child_device(card->host, 0);
  351. if (np && of_device_is_compatible(np, "mmc-card"))
  352. broken_hpi = of_property_read_bool(np, "broken-hpi");
  353. of_node_put(np);
  354. /*
  355. * The EXT_CSD format is meant to be forward compatible. As long
  356. * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
  357. * are authorized, see JEDEC JESD84-B50 section B.8.
  358. */
  359. card->ext_csd.rev = ext_csd[EXT_CSD_REV];
  360. /* fixup device after ext_csd revision field is updated */
  361. mmc_fixup_device(card, mmc_ext_csd_fixups);
  362. card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
  363. card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
  364. card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
  365. card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
  366. if (card->ext_csd.rev >= 2) {
  367. card->ext_csd.sectors =
  368. ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
  369. ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
  370. ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
  371. ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
  372. /* Cards with density > 2GiB are sector addressed */
  373. if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
  374. mmc_card_set_blockaddr(card);
  375. }
  376. card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
  377. card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
  378. mmc_select_card_type(card);
  379. card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
  380. card->ext_csd.raw_erase_timeout_mult =
  381. ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
  382. card->ext_csd.raw_hc_erase_grp_size =
  383. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  384. if (card->ext_csd.rev >= 3) {
  385. u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
  386. u8 sn_shift = ext_csd[EXT_CSD_SLEEP_NOTIFICATION_TIME];
  387. card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
  388. /* EXT_CSD value is in units of 10ms, but we store in ms */
  389. card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
  390. /* Sleep / awake timeout in 100ns units */
  391. if (sa_shift > 0 && sa_shift <= 0x17)
  392. card->ext_csd.sa_timeout =
  393. 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
  394. /* Sleep notification time in 10us units */
  395. if (sn_shift > 0 && sn_shift <= 0x17)
  396. card->ext_csd.sleep_notification_time =
  397. 1 << ext_csd[EXT_CSD_SLEEP_NOTIFICATION_TIME];
  398. card->ext_csd.erase_group_def =
  399. ext_csd[EXT_CSD_ERASE_GROUP_DEF];
  400. card->ext_csd.hc_erase_timeout = 300 *
  401. ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
  402. card->ext_csd.hc_erase_size =
  403. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
  404. card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
  405. /*
  406. * There are two boot regions of equal size, defined in
  407. * multiples of 128K.
  408. */
  409. if (!(card->host->caps2 & MMC_CAP2_NMCARD) &&
  410. ext_csd[EXT_CSD_BOOT_MULT] &&
  411. mmc_boot_partition_access(card->host)) {
  412. for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
  413. part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
  414. mmc_part_add(card, part_size,
  415. EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
  416. "boot%d", idx, true,
  417. MMC_BLK_DATA_AREA_BOOT);
  418. }
  419. }
  420. }
  421. card->ext_csd.raw_hc_erase_gap_size =
  422. ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  423. card->ext_csd.raw_sec_trim_mult =
  424. ext_csd[EXT_CSD_SEC_TRIM_MULT];
  425. card->ext_csd.raw_sec_erase_mult =
  426. ext_csd[EXT_CSD_SEC_ERASE_MULT];
  427. card->ext_csd.raw_sec_feature_support =
  428. ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
  429. card->ext_csd.raw_trim_mult =
  430. ext_csd[EXT_CSD_TRIM_MULT];
  431. card->ext_csd.raw_partition_support =
  432. ext_csd[EXT_CSD_PARTITION_SUPPORT];
  433. card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
  434. if (card->ext_csd.rev >= 4) {
  435. if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
  436. EXT_CSD_PART_SETTING_COMPLETED)
  437. card->ext_csd.partition_setting_completed = 1;
  438. else
  439. card->ext_csd.partition_setting_completed = 0;
  440. mmc_manage_enhanced_area(card, ext_csd);
  441. mmc_manage_gp_partitions(card, ext_csd);
  442. card->ext_csd.sec_trim_mult =
  443. ext_csd[EXT_CSD_SEC_TRIM_MULT];
  444. card->ext_csd.sec_erase_mult =
  445. ext_csd[EXT_CSD_SEC_ERASE_MULT];
  446. card->ext_csd.sec_feature_support =
  447. ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
  448. card->ext_csd.trim_timeout = 300 *
  449. ext_csd[EXT_CSD_TRIM_MULT];
  450. /*
  451. * Note that the call to mmc_part_add above defaults to read
  452. * only. If this default assumption is changed, the call must
  453. * take into account the value of boot_locked below.
  454. */
  455. card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
  456. card->ext_csd.boot_wp_status = ext_csd[EXT_CSD_BOOT_WP_STATUS];
  457. card->ext_csd.boot_ro_lockable = true;
  458. /* Save power class values */
  459. card->ext_csd.raw_pwr_cl_52_195 =
  460. ext_csd[EXT_CSD_PWR_CL_52_195];
  461. card->ext_csd.raw_pwr_cl_26_195 =
  462. ext_csd[EXT_CSD_PWR_CL_26_195];
  463. card->ext_csd.raw_pwr_cl_52_360 =
  464. ext_csd[EXT_CSD_PWR_CL_52_360];
  465. card->ext_csd.raw_pwr_cl_26_360 =
  466. ext_csd[EXT_CSD_PWR_CL_26_360];
  467. card->ext_csd.raw_pwr_cl_200_195 =
  468. ext_csd[EXT_CSD_PWR_CL_200_195];
  469. card->ext_csd.raw_pwr_cl_200_360 =
  470. ext_csd[EXT_CSD_PWR_CL_200_360];
  471. card->ext_csd.raw_pwr_cl_ddr_52_195 =
  472. ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
  473. card->ext_csd.raw_pwr_cl_ddr_52_360 =
  474. ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
  475. card->ext_csd.raw_pwr_cl_ddr_200_360 =
  476. ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
  477. }
  478. if (card->ext_csd.rev >= 5) {
  479. /* Adjust production date as per JEDEC JESD84-B451 */
  480. if (card->cid.year < 2010)
  481. card->cid.year += 16;
  482. /* check whether the eMMC card supports BKOPS */
  483. if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
  484. card->ext_csd.bkops = 1;
  485. card->ext_csd.man_bkops_en =
  486. (ext_csd[EXT_CSD_BKOPS_EN] &
  487. EXT_CSD_MANUAL_BKOPS_MASK);
  488. card->ext_csd.raw_bkops_status =
  489. ext_csd[EXT_CSD_BKOPS_STATUS];
  490. if (card->ext_csd.man_bkops_en)
  491. pr_debug("%s: MAN_BKOPS_EN bit is set\n",
  492. mmc_hostname(card->host));
  493. card->ext_csd.auto_bkops_en =
  494. (ext_csd[EXT_CSD_BKOPS_EN] &
  495. EXT_CSD_AUTO_BKOPS_MASK);
  496. if (card->ext_csd.auto_bkops_en)
  497. pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
  498. mmc_hostname(card->host));
  499. }
  500. /* check whether the eMMC card supports HPI */
  501. if (!mmc_card_broken_hpi(card) &&
  502. !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
  503. card->ext_csd.hpi = 1;
  504. if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
  505. card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
  506. else
  507. card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
  508. /*
  509. * Indicate the maximum timeout to close
  510. * a command interrupted by HPI
  511. */
  512. card->ext_csd.out_of_int_time =
  513. ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
  514. }
  515. card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
  516. card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
  517. /*
  518. * RPMB regions are defined in multiples of 128K.
  519. */
  520. card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
  521. if (!(card->host->caps2 & MMC_CAP2_NMCARD) &&
  522. ext_csd[EXT_CSD_RPMB_MULT] &&
  523. mmc_host_cmd23(card->host)) {
  524. mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
  525. EXT_CSD_PART_CONFIG_ACC_RPMB,
  526. "rpmb", 0, false,
  527. MMC_BLK_DATA_AREA_RPMB);
  528. }
  529. }
  530. card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
  531. if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
  532. card->erased_byte = 0xFF;
  533. else
  534. card->erased_byte = 0x0;
  535. /* eMMC v4.5 or later */
  536. card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
  537. if (card->ext_csd.rev >= 6) {
  538. card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
  539. card->ext_csd.generic_cmd6_time = 10 *
  540. ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
  541. card->ext_csd.power_off_longtime = 10 *
  542. ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
  543. card->ext_csd.cache_size =
  544. ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
  545. ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
  546. ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
  547. ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
  548. if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
  549. card->ext_csd.data_sector_size = 4096;
  550. else
  551. card->ext_csd.data_sector_size = 512;
  552. if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
  553. (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
  554. card->ext_csd.data_tag_unit_size =
  555. ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
  556. (card->ext_csd.data_sector_size);
  557. } else {
  558. card->ext_csd.data_tag_unit_size = 0;
  559. }
  560. card->ext_csd.max_packed_writes =
  561. ext_csd[EXT_CSD_MAX_PACKED_WRITES];
  562. card->ext_csd.max_packed_reads =
  563. ext_csd[EXT_CSD_MAX_PACKED_READS];
  564. } else {
  565. card->ext_csd.data_sector_size = 512;
  566. }
  567. /*
  568. * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
  569. * when accessing a specific field", so use it here if there is no
  570. * PARTITION_SWITCH_TIME.
  571. */
  572. if (!card->ext_csd.part_time)
  573. card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
  574. /* Some eMMC set the value too low so set a minimum */
  575. if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
  576. card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
  577. /* eMMC v5 or later */
  578. if (!(card->host->caps2 & MMC_CAP2_NMCARD) &&
  579. (card->ext_csd.rev >= 7)) {
  580. if ((ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
  581. !card->ext_csd.man_bkops_en) {
  582. card->ext_csd.auto_bkops = 1;
  583. card->ext_csd.auto_bkops_en =
  584. !!(ext_csd[EXT_CSD_BKOPS_EN] &
  585. EXT_CSD_AUTO_BKOPS_MASK);
  586. if (!card->ext_csd.auto_bkops_en)
  587. pr_info("%s: AUTO_BKOPS_EN bit is not set\n",
  588. mmc_hostname(card->host));
  589. }
  590. memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
  591. MMC_FIRMWARE_LEN);
  592. card->ext_csd.ffu_capable =
  593. (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
  594. !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
  595. card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
  596. card->ext_csd.device_life_time_est_typ_a =
  597. ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
  598. card->ext_csd.device_life_time_est_typ_b =
  599. ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
  600. }
  601. #if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
  602. /* eMMC v5.1 or later */
  603. if (!(card->host->caps2 & MMC_CAP2_NMCARD) &&
  604. card->ext_csd.rev >= 8) {
  605. card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
  606. EXT_CSD_CMDQ_SUPPORTED;
  607. card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
  608. EXT_CSD_CMDQ_DEPTH_MASK) + 1;
  609. /* Exclude inefficiently small queue depths */
  610. if (card->ext_csd.cmdq_depth <= 2) {
  611. card->ext_csd.cmdq_support = false;
  612. card->ext_csd.cmdq_depth = 2;
  613. }
  614. if (card->ext_csd.cmdq_support) {
  615. pr_debug("%s: Command Queue supported depth %u\n",
  616. mmc_hostname(card->host),
  617. card->ext_csd.cmdq_depth);
  618. }
  619. }
  620. #endif
  621. out:
  622. return err;
  623. }
  624. static int mmc_read_ext_csd(struct mmc_card *card)
  625. {
  626. u8 *ext_csd;
  627. int err;
  628. if (!mmc_can_ext_csd(card))
  629. return 0;
  630. err = mmc_get_ext_csd(card, &ext_csd);
  631. if (err) {
  632. /* If the host or the card can't do the switch,
  633. * fail more gracefully. */
  634. if ((err != -EINVAL)
  635. && (err != -ENOSYS)
  636. && (err != -EFAULT))
  637. return err;
  638. /*
  639. * High capacity cards should have this "magic" size
  640. * stored in their CSD.
  641. */
  642. if (card->csd.capacity == (4096 * 512)) {
  643. pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
  644. mmc_hostname(card->host));
  645. } else {
  646. pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
  647. mmc_hostname(card->host));
  648. err = 0;
  649. }
  650. return err;
  651. }
  652. err = mmc_decode_ext_csd(card, ext_csd);
  653. kfree(ext_csd);
  654. return err;
  655. }
  656. static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
  657. {
  658. u8 *bw_ext_csd;
  659. int err;
  660. #if defined(CONFIG_MTK_EMMC_CQ_SUPPORT)
  661. /* add for emmc reset when error happen */
  662. /* return directly because compare fail seldom happens when reinit
  663. * emmc
  664. */
  665. if (emmc_resetting_when_cmdq)
  666. return 0;
  667. #endif
  668. if (bus_width == MMC_BUS_WIDTH_1)
  669. return 0;
  670. err = mmc_get_ext_csd(card, &bw_ext_csd);
  671. if (err)
  672. return err;
  673. /* only compare read only fields */
  674. err = !((card->ext_csd.raw_partition_support ==
  675. bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
  676. (card->ext_csd.raw_erased_mem_count ==
  677. bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
  678. (card->ext_csd.rev ==
  679. bw_ext_csd[EXT_CSD_REV]) &&
  680. (card->ext_csd.raw_ext_csd_structure ==
  681. bw_ext_csd[EXT_CSD_STRUCTURE]) &&
  682. (card->ext_csd.raw_card_type ==
  683. bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
  684. (card->ext_csd.raw_s_a_timeout ==
  685. bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
  686. (card->ext_csd.raw_hc_erase_gap_size ==
  687. bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
  688. (card->ext_csd.raw_erase_timeout_mult ==
  689. bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
  690. (card->ext_csd.raw_hc_erase_grp_size ==
  691. bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
  692. (card->ext_csd.raw_sec_trim_mult ==
  693. bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
  694. (card->ext_csd.raw_sec_erase_mult ==
  695. bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
  696. (card->ext_csd.raw_sec_feature_support ==
  697. bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
  698. (card->ext_csd.raw_trim_mult ==
  699. bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
  700. (card->ext_csd.raw_sectors[0] ==
  701. bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
  702. (card->ext_csd.raw_sectors[1] ==
  703. bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
  704. (card->ext_csd.raw_sectors[2] ==
  705. bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
  706. (card->ext_csd.raw_sectors[3] ==
  707. bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
  708. (card->ext_csd.raw_pwr_cl_52_195 ==
  709. bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
  710. (card->ext_csd.raw_pwr_cl_26_195 ==
  711. bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
  712. (card->ext_csd.raw_pwr_cl_52_360 ==
  713. bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
  714. (card->ext_csd.raw_pwr_cl_26_360 ==
  715. bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
  716. (card->ext_csd.raw_pwr_cl_200_195 ==
  717. bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
  718. (card->ext_csd.raw_pwr_cl_200_360 ==
  719. bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
  720. (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
  721. bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
  722. (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
  723. bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
  724. (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
  725. bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
  726. if (err)
  727. err = -EINVAL;
  728. kfree(bw_ext_csd);
  729. return err;
  730. }
  731. MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
  732. card->raw_cid[2], card->raw_cid[3]);
  733. MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
  734. card->raw_csd[2], card->raw_csd[3]);
  735. MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
  736. MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
  737. MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
  738. MMC_DEV_ATTR(wp_grp_size, "%u\n", card->wp_grp_size << 9);
  739. MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
  740. MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
  741. MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
  742. MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
  743. MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
  744. MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
  745. MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
  746. MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
  747. MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
  748. card->ext_csd.device_life_time_est_typ_a,
  749. card->ext_csd.device_life_time_est_typ_b);
  750. MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
  751. MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
  752. card->ext_csd.enhanced_area_offset);
  753. MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
  754. MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
  755. MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
  756. MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
  757. MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
  758. static ssize_t mmc_fwrev_show(struct device *dev,
  759. struct device_attribute *attr,
  760. char *buf)
  761. {
  762. struct mmc_card *card = mmc_dev_to_card(dev);
  763. if (card->ext_csd.rev < 7) {
  764. return sprintf(buf, "0x%x\n", card->cid.fwrev);
  765. } else {
  766. return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
  767. card->ext_csd.fwrev);
  768. }
  769. }
  770. static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
  771. static ssize_t mmc_dsr_show(struct device *dev,
  772. struct device_attribute *attr,
  773. char *buf)
  774. {
  775. struct mmc_card *card = mmc_dev_to_card(dev);
  776. struct mmc_host *host = card->host;
  777. if (card->csd.dsr_imp && host->dsr_req)
  778. return sprintf(buf, "0x%x\n", host->dsr);
  779. else
  780. /* return default DSR value */
  781. return sprintf(buf, "0x%x\n", 0x404);
  782. }
  783. static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
  784. static struct attribute *mmc_std_attrs[] = {
  785. &dev_attr_cid.attr,
  786. &dev_attr_csd.attr,
  787. &dev_attr_date.attr,
  788. &dev_attr_erase_size.attr,
  789. &dev_attr_preferred_erase_size.attr,
  790. &dev_attr_wp_grp_size.attr,
  791. &dev_attr_fwrev.attr,
  792. &dev_attr_ffu_capable.attr,
  793. &dev_attr_hwrev.attr,
  794. &dev_attr_manfid.attr,
  795. &dev_attr_name.attr,
  796. &dev_attr_oemid.attr,
  797. &dev_attr_prv.attr,
  798. &dev_attr_rev.attr,
  799. &dev_attr_pre_eol_info.attr,
  800. &dev_attr_life_time.attr,
  801. &dev_attr_serial.attr,
  802. &dev_attr_enhanced_area_offset.attr,
  803. &dev_attr_enhanced_area_size.attr,
  804. &dev_attr_raw_rpmb_size_mult.attr,
  805. &dev_attr_rel_sectors.attr,
  806. &dev_attr_ocr.attr,
  807. &dev_attr_dsr.attr,
  808. &dev_attr_cmdq_en.attr,
  809. NULL,
  810. };
  811. ATTRIBUTE_GROUPS(mmc_std);
  812. static struct device_type mmc_type = {
  813. .groups = mmc_std_groups,
  814. };
  815. /*
  816. * Select the PowerClass for the current bus width
  817. * If power class is defined for 4/8 bit bus in the
  818. * extended CSD register, select it by executing the
  819. * mmc_switch command.
  820. */
  821. static int __mmc_select_powerclass(struct mmc_card *card,
  822. unsigned int bus_width)
  823. {
  824. struct mmc_host *host = card->host;
  825. struct mmc_ext_csd *ext_csd = &card->ext_csd;
  826. unsigned int pwrclass_val = 0;
  827. int err = 0;
  828. switch (1 << host->ios.vdd) {
  829. case MMC_VDD_165_195:
  830. if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
  831. pwrclass_val = ext_csd->raw_pwr_cl_26_195;
  832. else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
  833. pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
  834. ext_csd->raw_pwr_cl_52_195 :
  835. ext_csd->raw_pwr_cl_ddr_52_195;
  836. else if (host->ios.clock <= MMC_HS200_MAX_DTR)
  837. pwrclass_val = ext_csd->raw_pwr_cl_200_195;
  838. break;
  839. case MMC_VDD_27_28:
  840. case MMC_VDD_28_29:
  841. case MMC_VDD_29_30:
  842. case MMC_VDD_30_31:
  843. case MMC_VDD_31_32:
  844. case MMC_VDD_32_33:
  845. case MMC_VDD_33_34:
  846. case MMC_VDD_34_35:
  847. case MMC_VDD_35_36:
  848. if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
  849. pwrclass_val = ext_csd->raw_pwr_cl_26_360;
  850. else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
  851. pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
  852. ext_csd->raw_pwr_cl_52_360 :
  853. ext_csd->raw_pwr_cl_ddr_52_360;
  854. else if (host->ios.clock <= MMC_HS200_MAX_DTR)
  855. pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
  856. ext_csd->raw_pwr_cl_ddr_200_360 :
  857. ext_csd->raw_pwr_cl_200_360;
  858. break;
  859. default:
  860. pr_warn("%s: Voltage range not supported for power class\n",
  861. mmc_hostname(host));
  862. return -EINVAL;
  863. }
  864. if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
  865. pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
  866. EXT_CSD_PWR_CL_8BIT_SHIFT;
  867. else
  868. pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
  869. EXT_CSD_PWR_CL_4BIT_SHIFT;
  870. /* If the power class is different from the default value */
  871. if (pwrclass_val > 0) {
  872. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  873. EXT_CSD_POWER_CLASS,
  874. pwrclass_val,
  875. card->ext_csd.generic_cmd6_time);
  876. }
  877. return err;
  878. }
  879. static int mmc_select_powerclass(struct mmc_card *card)
  880. {
  881. struct mmc_host *host = card->host;
  882. u32 bus_width, ext_csd_bits;
  883. int err, ddr;
  884. /* Power class selection is supported for versions >= 4.0 */
  885. if (!mmc_can_ext_csd(card))
  886. return 0;
  887. bus_width = host->ios.bus_width;
  888. /* Power class values are defined only for 4/8 bit bus */
  889. if (bus_width == MMC_BUS_WIDTH_1)
  890. return 0;
  891. ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
  892. if (ddr)
  893. ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
  894. EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
  895. else
  896. ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
  897. EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
  898. err = __mmc_select_powerclass(card, ext_csd_bits);
  899. if (err)
  900. pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
  901. mmc_hostname(host), 1 << bus_width, ddr);
  902. return err;
  903. }
  904. /*
  905. * Set the bus speed for the selected speed mode.
  906. */
  907. static void mmc_set_bus_speed(struct mmc_card *card)
  908. {
  909. unsigned int max_dtr = (unsigned int)-1;
  910. if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
  911. max_dtr > card->ext_csd.hs200_max_dtr)
  912. max_dtr = card->ext_csd.hs200_max_dtr;
  913. else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
  914. max_dtr = card->ext_csd.hs_max_dtr;
  915. else if (max_dtr > card->csd.max_dtr)
  916. max_dtr = card->csd.max_dtr;
  917. mmc_set_clock(card->host, max_dtr);
  918. }
  919. /*
  920. * Select the bus width amoung 4-bit and 8-bit(SDR).
  921. * If the bus width is changed successfully, return the selected width value.
  922. * Zero is returned instead of error value if the wide width is not supported.
  923. */
  924. static int mmc_select_bus_width(struct mmc_card *card)
  925. {
  926. static unsigned ext_csd_bits[] = {
  927. EXT_CSD_BUS_WIDTH_8,
  928. EXT_CSD_BUS_WIDTH_4,
  929. };
  930. static unsigned bus_widths[] = {
  931. MMC_BUS_WIDTH_8,
  932. MMC_BUS_WIDTH_4,
  933. };
  934. struct mmc_host *host = card->host;
  935. unsigned idx, bus_width = 0;
  936. int err = 0;
  937. if (!mmc_can_ext_csd(card) ||
  938. !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
  939. return 0;
  940. idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
  941. /*
  942. * Unlike SD, MMC cards dont have a configuration register to notify
  943. * supported bus width. So bus test command should be run to identify
  944. * the supported bus width or compare the ext csd values of current
  945. * bus width and ext csd values of 1 bit mode read earlier.
  946. */
  947. for (; idx < ARRAY_SIZE(bus_widths); idx++) {
  948. /*
  949. * Host is capable of 8bit transfer, then switch
  950. * the device to work in 8bit transfer mode. If the
  951. * mmc switch command returns error then switch to
  952. * 4bit transfer mode. On success set the corresponding
  953. * bus width on the host.
  954. */
  955. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  956. EXT_CSD_BUS_WIDTH,
  957. ext_csd_bits[idx],
  958. card->ext_csd.generic_cmd6_time);
  959. if (err)
  960. continue;
  961. bus_width = bus_widths[idx];
  962. mmc_set_bus_width(host, bus_width);
  963. /*
  964. * If controller can't handle bus width test,
  965. * compare ext_csd previously read in 1 bit mode
  966. * against ext_csd at new bus width
  967. */
  968. if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
  969. err = mmc_compare_ext_csds(card, bus_width);
  970. else
  971. err = mmc_bus_test(card, bus_width);
  972. if (!err) {
  973. err = bus_width;
  974. break;
  975. } else {
  976. pr_warn("%s: switch to bus width %d failed\n",
  977. mmc_hostname(host), 1 << bus_width);
  978. }
  979. }
  980. return err;
  981. }
  982. /*
  983. * Switch to the high-speed mode
  984. */
  985. static int mmc_select_hs(struct mmc_card *card)
  986. {
  987. int err;
  988. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  989. EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
  990. card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
  991. true, true, true);
  992. if (err)
  993. pr_warn("%s: switch to high-speed failed, err:%d\n",
  994. mmc_hostname(card->host), err);
  995. return err;
  996. }
  997. /*
  998. * Activate wide bus and DDR if supported.
  999. */
  1000. static int mmc_select_hs_ddr(struct mmc_card *card)
  1001. {
  1002. struct mmc_host *host = card->host;
  1003. u32 bus_width, ext_csd_bits;
  1004. int err = 0;
  1005. if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
  1006. return 0;
  1007. bus_width = host->ios.bus_width;
  1008. if (bus_width == MMC_BUS_WIDTH_1)
  1009. return 0;
  1010. ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
  1011. EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
  1012. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1013. EXT_CSD_BUS_WIDTH,
  1014. ext_csd_bits,
  1015. card->ext_csd.generic_cmd6_time,
  1016. MMC_TIMING_MMC_DDR52,
  1017. true, true, true);
  1018. if (err) {
  1019. pr_err("%s: switch to bus width %d ddr failed\n",
  1020. mmc_hostname(host), 1 << bus_width);
  1021. return err;
  1022. }
  1023. /*
  1024. * eMMC cards can support 3.3V to 1.2V i/o (vccq)
  1025. * signaling.
  1026. *
  1027. * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
  1028. *
  1029. * 1.8V vccq at 3.3V core voltage (vcc) is not required
  1030. * in the JEDEC spec for DDR.
  1031. *
  1032. * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
  1033. * host controller can support this, like some of the SDHCI
  1034. * controller which connect to an eMMC device. Some of these
  1035. * host controller still needs to use 1.8v vccq for supporting
  1036. * DDR mode.
  1037. *
  1038. * So the sequence will be:
  1039. * if (host and device can both support 1.2v IO)
  1040. * use 1.2v IO;
  1041. * else if (host and device can both support 1.8v IO)
  1042. * use 1.8v IO;
  1043. * so if host and device can only support 3.3v IO, this is the
  1044. * last choice.
  1045. *
  1046. * WARNING: eMMC rules are NOT the same as SD DDR
  1047. */
  1048. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
  1049. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
  1050. if (!err)
  1051. return 0;
  1052. }
  1053. if (host->caps2 & MMC_CAP2_NMCARD)
  1054. err = 0;
  1055. else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
  1056. host->caps & MMC_CAP_1_8V_DDR)
  1057. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
  1058. /* make sure vccq is 3.3v after switching disaster */
  1059. if (err)
  1060. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
  1061. return err;
  1062. }
  1063. static int mmc_select_hs400(struct mmc_card *card)
  1064. {
  1065. struct mmc_host *host = card->host;
  1066. unsigned int max_dtr;
  1067. int err = 0;
  1068. u8 val;
  1069. /*
  1070. * HS400 mode requires 8-bit bus width
  1071. */
  1072. if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
  1073. host->ios.bus_width == MMC_BUS_WIDTH_8))
  1074. return 0;
  1075. /* Switch card to HS mode */
  1076. val = EXT_CSD_TIMING_HS;
  1077. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1078. EXT_CSD_HS_TIMING, val,
  1079. card->ext_csd.generic_cmd6_time, 0,
  1080. true, false, true);
  1081. if (err) {
  1082. pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
  1083. mmc_hostname(host), err);
  1084. return err;
  1085. }
  1086. /* Set host controller to HS timing */
  1087. mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
  1088. /* Reduce frequency to HS frequency */
  1089. max_dtr = card->ext_csd.hs_max_dtr;
  1090. mmc_set_clock(host, max_dtr);
  1091. err = mmc_switch_status(card);
  1092. if (err)
  1093. goto out_err;
  1094. /* Switch card to DDR */
  1095. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1096. EXT_CSD_BUS_WIDTH,
  1097. EXT_CSD_DDR_BUS_WIDTH_8,
  1098. card->ext_csd.generic_cmd6_time);
  1099. if (err) {
  1100. pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
  1101. mmc_hostname(host), err);
  1102. return err;
  1103. }
  1104. /* Switch card to HS400 */
  1105. val = EXT_CSD_TIMING_HS400 |
  1106. card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
  1107. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1108. EXT_CSD_HS_TIMING, val,
  1109. card->ext_csd.generic_cmd6_time, 0,
  1110. true, false, true);
  1111. if (err) {
  1112. pr_err("%s: switch to hs400 failed, err:%d\n",
  1113. mmc_hostname(host), err);
  1114. return err;
  1115. }
  1116. /* Set host controller to HS400 timing and frequency */
  1117. mmc_set_timing(host, MMC_TIMING_MMC_HS400);
  1118. mmc_set_bus_speed(card);
  1119. err = mmc_switch_status(card);
  1120. if (err)
  1121. goto out_err;
  1122. return 0;
  1123. out_err:
  1124. pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
  1125. __func__, err);
  1126. return err;
  1127. }
  1128. int mmc_hs200_to_hs400(struct mmc_card *card)
  1129. {
  1130. return mmc_select_hs400(card);
  1131. }
  1132. int mmc_hs400_to_hs200(struct mmc_card *card)
  1133. {
  1134. struct mmc_host *host = card->host;
  1135. unsigned int max_dtr;
  1136. int err;
  1137. u8 val;
  1138. /* Reduce frequency to HS */
  1139. max_dtr = card->ext_csd.hs_max_dtr;
  1140. mmc_set_clock(host, max_dtr);
  1141. /* Switch HS400 to HS DDR */
  1142. val = EXT_CSD_TIMING_HS;
  1143. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
  1144. val, card->ext_csd.generic_cmd6_time, 0,
  1145. true, false, true);
  1146. if (err)
  1147. goto out_err;
  1148. mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
  1149. err = mmc_switch_status(card);
  1150. if (err)
  1151. goto out_err;
  1152. /* Switch HS DDR to HS */
  1153. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
  1154. EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
  1155. 0, true, false, true);
  1156. if (err)
  1157. goto out_err;
  1158. mmc_set_timing(host, MMC_TIMING_MMC_HS);
  1159. err = mmc_switch_status(card);
  1160. if (err)
  1161. goto out_err;
  1162. /* Switch HS to HS200 */
  1163. val = EXT_CSD_TIMING_HS200 |
  1164. card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
  1165. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
  1166. val, card->ext_csd.generic_cmd6_time, 0,
  1167. true, false, true);
  1168. if (err)
  1169. goto out_err;
  1170. mmc_set_timing(host, MMC_TIMING_MMC_HS200);
  1171. /*
  1172. * For HS200, CRC errors are not a reliable way to know the switch
  1173. * failed. If there really is a problem, we would expect tuning will
  1174. * fail and the result ends up the same.
  1175. */
  1176. err = __mmc_switch_status(card, false);
  1177. if (err)
  1178. goto out_err;
  1179. mmc_set_bus_speed(card);
  1180. return 0;
  1181. out_err:
  1182. pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
  1183. __func__, err);
  1184. return err;
  1185. }
  1186. static void mmc_select_driver_type(struct mmc_card *card)
  1187. {
  1188. int card_drv_type, drive_strength, drv_type;
  1189. card_drv_type = card->ext_csd.raw_driver_strength |
  1190. mmc_driver_type_mask(0);
  1191. drive_strength = mmc_select_drive_strength(card,
  1192. card->ext_csd.hs200_max_dtr,
  1193. card_drv_type, &drv_type);
  1194. card->drive_strength = drive_strength;
  1195. if (drv_type)
  1196. mmc_set_driver_type(card->host, drv_type);
  1197. }
  1198. static int mmc_select_hs400es(struct mmc_card *card)
  1199. {
  1200. struct mmc_host *host = card->host;
  1201. int err = -EINVAL;
  1202. u8 val;
  1203. if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
  1204. err = -ENOTSUPP;
  1205. goto out_err;
  1206. }
  1207. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
  1208. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
  1209. if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
  1210. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
  1211. /* If fails try again during next card power cycle */
  1212. if (err)
  1213. goto out_err;
  1214. err = mmc_select_bus_width(card);
  1215. if (err < 0)
  1216. goto out_err;
  1217. /* Switch card to HS mode */
  1218. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1219. EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
  1220. card->ext_csd.generic_cmd6_time, 0,
  1221. true, false, true);
  1222. if (err) {
  1223. pr_err("%s: switch to hs for hs400es failed, err:%d\n",
  1224. mmc_hostname(host), err);
  1225. goto out_err;
  1226. }
  1227. mmc_set_timing(host, MMC_TIMING_MMC_HS);
  1228. err = mmc_switch_status(card);
  1229. if (err)
  1230. goto out_err;
  1231. mmc_set_clock(host, card->ext_csd.hs_max_dtr);
  1232. /* Switch card to DDR with strobe bit */
  1233. val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
  1234. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1235. EXT_CSD_BUS_WIDTH,
  1236. val,
  1237. card->ext_csd.generic_cmd6_time);
  1238. if (err) {
  1239. pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
  1240. mmc_hostname(host), err);
  1241. goto out_err;
  1242. }
  1243. mmc_select_driver_type(card);
  1244. /* Switch card to HS400 */
  1245. val = EXT_CSD_TIMING_HS400 |
  1246. card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
  1247. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1248. EXT_CSD_HS_TIMING, val,
  1249. card->ext_csd.generic_cmd6_time, 0,
  1250. true, false, true);
  1251. if (err) {
  1252. pr_err("%s: switch to hs400es failed, err:%d\n",
  1253. mmc_hostname(host), err);
  1254. goto out_err;
  1255. }
  1256. /* Set host controller to HS400 timing and frequency */
  1257. mmc_set_timing(host, MMC_TIMING_MMC_HS400);
  1258. /* Controller enable enhanced strobe function */
  1259. host->ios.enhanced_strobe = true;
  1260. if (host->ops->hs400_enhanced_strobe)
  1261. host->ops->hs400_enhanced_strobe(host, &host->ios);
  1262. err = mmc_switch_status(card);
  1263. if (err)
  1264. goto out_err;
  1265. return 0;
  1266. out_err:
  1267. pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
  1268. __func__, err);
  1269. return err;
  1270. }
  1271. /*
  1272. * For device supporting HS200 mode, the following sequence
  1273. * should be done before executing the tuning process.
  1274. * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
  1275. * 2. switch to HS200 mode
  1276. * 3. set the clock to > 52Mhz and <=200MHz
  1277. */
  1278. static int mmc_select_hs200(struct mmc_card *card)
  1279. {
  1280. struct mmc_host *host = card->host;
  1281. unsigned int old_timing, old_signal_voltage;
  1282. int err = -EINVAL;
  1283. u8 val;
  1284. old_signal_voltage = host->ios.signal_voltage;
  1285. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
  1286. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
  1287. if (host->caps2 & MMC_CAP2_NMCARD)
  1288. err = 0;
  1289. else if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
  1290. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
  1291. /* If fails try again during next card power cycle */
  1292. if (err)
  1293. return err;
  1294. mmc_select_driver_type(card);
  1295. /*
  1296. * Set the bus width(4 or 8) with host's support and
  1297. * switch to HS200 mode if bus width is set successfully.
  1298. */
  1299. err = mmc_select_bus_width(card);
  1300. if (err > 0) {
  1301. val = EXT_CSD_TIMING_HS200 |
  1302. card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
  1303. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1304. EXT_CSD_HS_TIMING, val,
  1305. card->ext_csd.generic_cmd6_time, 0,
  1306. true, false, true);
  1307. if (err)
  1308. goto err;
  1309. old_timing = host->ios.timing;
  1310. mmc_set_timing(host, MMC_TIMING_MMC_HS200);
  1311. /*
  1312. * For HS200, CRC errors are not a reliable way to know the
  1313. * switch failed. If there really is a problem, we would expect
  1314. * tuning will fail and the result ends up the same.
  1315. */
  1316. err = __mmc_switch_status(card, false);
  1317. /*
  1318. * mmc_select_timing() assumes timing has not changed if
  1319. * it is a switch error.
  1320. */
  1321. if (err == -EBADMSG)
  1322. mmc_set_timing(host, old_timing);
  1323. }
  1324. err:
  1325. if (err) {
  1326. /* fall back to the old signal voltage, if fails report error */
  1327. if (mmc_set_signal_voltage(host, old_signal_voltage))
  1328. err = -EIO;
  1329. pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
  1330. __func__, err);
  1331. }
  1332. return err;
  1333. }
  1334. /*
  1335. * Activate High Speed, HS200 or HS400ES mode if supported.
  1336. */
  1337. static int mmc_select_timing(struct mmc_card *card)
  1338. {
  1339. int err = 0;
  1340. if (!mmc_can_ext_csd(card))
  1341. goto bus_speed;
  1342. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
  1343. err = mmc_select_hs400es(card);
  1344. else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
  1345. err = mmc_select_hs200(card);
  1346. else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
  1347. err = mmc_select_hs(card);
  1348. if (err && err != -EBADMSG)
  1349. return err;
  1350. bus_speed:
  1351. /*
  1352. * Set the bus speed to the selected bus timing.
  1353. * If timing is not selected, backward compatible is the default.
  1354. */
  1355. mmc_set_bus_speed(card);
  1356. return 0;
  1357. }
  1358. #if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
  1359. static int mmc_select_cmdq(struct mmc_card *card)
  1360. {
  1361. struct mmc_host *host = card->host;
  1362. int ret = 0;
  1363. #if !defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
  1364. /* if cqhci driver is enabled,
  1365. * but host does not support.
  1366. * return fail at this case.
  1367. */
  1368. if (!(host->caps2 & MMC_CAP2_CQE)) {
  1369. pr_notice("%s: host \"cqe\" capability missing\n",
  1370. mmc_hostname(host));
  1371. ret = -EBADMSG;
  1372. goto out;
  1373. }
  1374. #endif
  1375. ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1376. EXT_CSD_CMDQ_MODE_EN, 1,
  1377. card->ext_csd.generic_cmd6_time);
  1378. if (ret)
  1379. goto out;
  1380. mmc_card_set_cmdq(card);
  1381. card->ext_csd.cmdq_en = true;
  1382. #ifdef CONFIG_MTK_EMMC_HW_CQ
  1383. if (host->caps2 & MMC_CAP2_CQE) {
  1384. ret = host->cmdq_ops->enable(card->host);
  1385. if (ret) {
  1386. pr_notice("%s: failed (%d) enabling CMDQ on host\n",
  1387. mmc_hostname(host), ret);
  1388. mmc_card_clr_cmdq(card);
  1389. card->ext_csd.cmdq_en = false;
  1390. ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1391. EXT_CSD_CMDQ_MODE_EN, 0,
  1392. card->ext_csd.generic_cmd6_time);
  1393. if (ret)
  1394. goto out;
  1395. }
  1396. }
  1397. #endif
  1398. out:
  1399. pr_notice("%s: CMDQ enable %s\n",
  1400. mmc_hostname(host), ret ? "fail":"done");
  1401. return ret;
  1402. }
  1403. #endif
  1404. /*
  1405. * Execute tuning sequence to seek the proper bus operating
  1406. * conditions for HS200 and HS400, which sends CMD21 to the device.
  1407. */
  1408. static int mmc_hs200_tuning(struct mmc_card *card)
  1409. {
  1410. struct mmc_host *host = card->host;
  1411. /*
  1412. * Timing should be adjusted to the HS400 target
  1413. * operation frequency for tuning process
  1414. */
  1415. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
  1416. host->ios.bus_width == MMC_BUS_WIDTH_8)
  1417. if (host->ops->prepare_hs400_tuning)
  1418. host->ops->prepare_hs400_tuning(host, &host->ios);
  1419. return mmc_execute_tuning(card);
  1420. }
  1421. /*
  1422. * Handle the detection and initialisation of a card.
  1423. *
  1424. * In the case of a resume, "oldcard" will contain the card
  1425. * we're trying to reinitialise.
  1426. */
  1427. static int mmc_init_card(struct mmc_host *host, u32 ocr,
  1428. struct mmc_card *oldcard)
  1429. {
  1430. struct mmc_card *card;
  1431. int err;
  1432. u32 cid[4];
  1433. u32 rocr;
  1434. WARN_ON(!host->claimed);
  1435. /* Set correct bus mode for MMC before attempting init */
  1436. if (!mmc_host_is_spi(host))
  1437. mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
  1438. /*
  1439. * Since we're changing the OCR value, we seem to
  1440. * need to tell some cards to go back to the idle
  1441. * state. We wait 1ms to give cards time to
  1442. * respond.
  1443. * mmc_go_idle is needed for eMMC that are asleep
  1444. */
  1445. mmc_go_idle(host);
  1446. /* The extra bit indicates that we support high capacity */
  1447. err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
  1448. if (err)
  1449. goto err;
  1450. /*
  1451. * For SPI, enable CRC as appropriate.
  1452. */
  1453. if (mmc_host_is_spi(host)) {
  1454. err = mmc_spi_set_crc(host, use_spi_crc);
  1455. if (err)
  1456. goto err;
  1457. }
  1458. /*
  1459. * Fetch CID from card.
  1460. */
  1461. err = mmc_send_cid(host, cid);
  1462. if (err)
  1463. goto err;
  1464. #ifdef CONFIG_MMC_FFU
  1465. if (oldcard && (oldcard->state & MMC_STATE_FFUED)) {
  1466. /* After FFU, some fields in CID may change,
  1467. * so just copy new CID into card->raw_cid
  1468. */
  1469. memcpy((void *)oldcard->raw_cid, (void *)cid, sizeof(cid));
  1470. err = mmc_decode_cid(oldcard);
  1471. if (err)
  1472. goto free_card;
  1473. card = oldcard;
  1474. card->nr_parts = 0;
  1475. oldcard = NULL;
  1476. } else
  1477. #endif
  1478. {
  1479. if (oldcard) {
  1480. if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
  1481. err = -ENOENT;
  1482. goto err;
  1483. }
  1484. card = oldcard;
  1485. } else {
  1486. /*
  1487. * Allocate card structure.
  1488. */
  1489. card = mmc_alloc_card(host, &mmc_type);
  1490. if (IS_ERR(card)) {
  1491. err = PTR_ERR(card);
  1492. goto err;
  1493. }
  1494. card->ocr = ocr;
  1495. card->type = MMC_TYPE_MMC;
  1496. card->rca = 1;
  1497. memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
  1498. }
  1499. }
  1500. /*
  1501. * Call the optional HC's init_card function to handle quirks.
  1502. */
  1503. if (host->ops->init_card)
  1504. host->ops->init_card(host, card);
  1505. /*
  1506. * For native busses: set card RCA and quit open drain mode.
  1507. */
  1508. if (!mmc_host_is_spi(host)) {
  1509. err = mmc_set_relative_addr(card);
  1510. if (err)
  1511. goto free_card;
  1512. mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
  1513. }
  1514. if (!oldcard) {
  1515. /*
  1516. * Fetch CSD from card.
  1517. */
  1518. err = mmc_send_csd(card, card->raw_csd);
  1519. if (err)
  1520. goto free_card;
  1521. err = mmc_decode_csd(card);
  1522. if (err)
  1523. goto free_card;
  1524. err = mmc_decode_cid(card);
  1525. if (err)
  1526. goto free_card;
  1527. }
  1528. /*
  1529. * handling only for cards supporting DSR and hosts requesting
  1530. * DSR configuration
  1531. */
  1532. if (card->csd.dsr_imp && host->dsr_req)
  1533. mmc_set_dsr(host);
  1534. /*
  1535. * Select card, as all following commands rely on that.
  1536. */
  1537. if (!mmc_host_is_spi(host)) {
  1538. err = mmc_select_card(card);
  1539. if (err)
  1540. goto free_card;
  1541. }
  1542. if (!oldcard) {
  1543. /* Read extended CSD. */
  1544. err = mmc_read_ext_csd(card);
  1545. if (err)
  1546. goto free_card;
  1547. /*
  1548. * If doing byte addressing, check if required to do sector
  1549. * addressing. Handle the case of <2GB cards needing sector
  1550. * addressing. See section 8.1 JEDEC Standard JED84-A441;
  1551. * ocr register has bit 30 set for sector addressing.
  1552. */
  1553. if (rocr & BIT(30))
  1554. mmc_card_set_blockaddr(card);
  1555. /* Erase size depends on CSD and Extended CSD */
  1556. mmc_set_erase_size(card);
  1557. }
  1558. #if 0 //temp delete
  1559. /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
  1560. if (card->ext_csd.rev >= 3) {
  1561. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1562. EXT_CSD_ERASE_GROUP_DEF, 1,
  1563. card->ext_csd.generic_cmd6_time);
  1564. if (err && err != -EBADMSG)
  1565. goto free_card;
  1566. if (err) {
  1567. err = 0;
  1568. /*
  1569. * Just disable enhanced area off & sz
  1570. * will try to enable ERASE_GROUP_DEF
  1571. * during next time reinit
  1572. */
  1573. card->ext_csd.enhanced_area_offset = -EINVAL;
  1574. card->ext_csd.enhanced_area_size = -EINVAL;
  1575. } else {
  1576. card->ext_csd.erase_group_def = 1;
  1577. /*
  1578. * enable ERASE_GRP_DEF successfully.
  1579. * This will affect the erase size, so
  1580. * here need to reset erase size
  1581. */
  1582. mmc_set_erase_size(card);
  1583. }
  1584. }
  1585. #endif
  1586. mmc_set_wp_grp_size(card);
  1587. /*
  1588. * Ensure eMMC user default partition is enabled
  1589. */
  1590. if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
  1591. card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
  1592. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1593. EXT_CSD_PART_CONFIG,
  1594. card->ext_csd.part_config,
  1595. card->ext_csd.part_time);
  1596. if (err && err != -EBADMSG)
  1597. goto free_card;
  1598. }
  1599. /*
  1600. * Enable power_off_notification byte in the ext_csd register
  1601. */
  1602. if (card->ext_csd.rev >= 6) {
  1603. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1604. EXT_CSD_POWER_OFF_NOTIFICATION,
  1605. EXT_CSD_POWER_ON,
  1606. card->ext_csd.generic_cmd6_time);
  1607. if (err && err != -EBADMSG)
  1608. goto free_card;
  1609. /*
  1610. * The err can be -EBADMSG or 0,
  1611. * so check for success and update the flag
  1612. */
  1613. if (!err)
  1614. card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
  1615. }
  1616. /*
  1617. * Select timing interface
  1618. */
  1619. err = mmc_select_timing(card);
  1620. if (err)
  1621. goto free_card;
  1622. if (mmc_card_hs200(card)) {
  1623. err = mmc_hs200_tuning(card);
  1624. if (err)
  1625. goto free_card;
  1626. err = mmc_select_hs400(card);
  1627. if (err)
  1628. goto free_card;
  1629. } else if (!mmc_card_hs400es(card)) {
  1630. /* Select the desired bus width optionally */
  1631. err = mmc_select_bus_width(card);
  1632. if (err > 0 && mmc_card_hs(card)) {
  1633. err = mmc_select_hs_ddr(card);
  1634. if (err)
  1635. goto free_card;
  1636. }
  1637. }
  1638. /*
  1639. * Choose the power class with selected bus interface
  1640. */
  1641. mmc_select_powerclass(card);
  1642. /* enable auto BKOPS if eMMC card supports.
  1643. * AUTO_BKOPS_EN 163 bit1 of ext-csd, multi programmable
  1644. */
  1645. if (card->ext_csd.auto_bkops && !card->ext_csd.man_bkops_en) {
  1646. if (!card->ext_csd.auto_bkops_en) {
  1647. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1648. EXT_CSD_BKOPS_EN, EXT_CSD_AUTO_BKOPS_MASK,
  1649. card->ext_csd.generic_cmd6_time);
  1650. if (err && err != -EBADMSG)
  1651. goto free_card;
  1652. if (err) {
  1653. pr_notice("%s: Enabling AutoBKOPS failed\n",
  1654. mmc_hostname(card->host));
  1655. card->ext_csd.auto_bkops_en = 0;
  1656. err = 0;
  1657. } else {
  1658. card->ext_csd.auto_bkops_en = 1;
  1659. }
  1660. }
  1661. }
  1662. /*
  1663. * Enable HPI feature (if supported)
  1664. */
  1665. if (card->ext_csd.hpi) {
  1666. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1667. EXT_CSD_HPI_MGMT, 1,
  1668. card->ext_csd.generic_cmd6_time);
  1669. if (err && err != -EBADMSG)
  1670. goto free_card;
  1671. if (err) {
  1672. pr_warn("%s: Enabling HPI failed\n",
  1673. mmc_hostname(card->host));
  1674. card->ext_csd.hpi_en = 0;
  1675. err = 0;
  1676. } else {
  1677. card->ext_csd.hpi_en = 1;
  1678. }
  1679. }
  1680. /*
  1681. * If cache size is higher than 0, this indicates the existence of cache
  1682. * and it can be turned on. Note that some eMMCs from Micron has been
  1683. * reported to need ~800 ms timeout, while enabling the cache after
  1684. * sudden power failure tests. Let's extend the timeout to a minimum of
  1685. * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
  1686. */
  1687. if (card->ext_csd.cache_size > 0) {
  1688. unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
  1689. timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
  1690. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1691. EXT_CSD_CACHE_CTRL, 1, timeout_ms);
  1692. if (err && err != -EBADMSG)
  1693. goto free_card;
  1694. /*
  1695. * Only if no error, cache is turned on successfully.
  1696. */
  1697. if (err) {
  1698. pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
  1699. mmc_hostname(card->host), err);
  1700. card->ext_csd.cache_ctrl = 0;
  1701. err = 0;
  1702. } else {
  1703. card->ext_csd.cache_ctrl = 1;
  1704. }
  1705. }
  1706. /*
  1707. * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
  1708. * disabled for a time, so a flag is needed to indicate to re-enable the
  1709. * Command Queue.
  1710. */
  1711. card->reenable_cmdq = card->ext_csd.cmdq_en;
  1712. if (!oldcard)
  1713. host->card = card;
  1714. #if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
  1715. /*
  1716. * Enable Command Queue if supported. Note that Packed Commands cannot
  1717. * be used with Command Queue.
  1718. */
  1719. card->ext_csd.cmdq_en = false;
  1720. if (card->ext_csd.cmdq_support) {
  1721. err = mmc_select_cmdq(card);
  1722. if (err && err != -EBADMSG)
  1723. goto free_card;
  1724. if (err) {
  1725. pr_notice("%s: Enabling CMDQ failed\n",
  1726. mmc_hostname(card->host));
  1727. card->ext_csd.cmdq_support = false;
  1728. card->ext_csd.cmdq_depth = 2;
  1729. err = 0;
  1730. }
  1731. }
  1732. #endif
  1733. return 0;
  1734. free_card:
  1735. if (!oldcard)
  1736. mmc_remove_card(card);
  1737. err:
  1738. return err;
  1739. }
  1740. #ifdef CONFIG_MMC_FFU
  1741. int mmc_reinit_oldcard(struct mmc_host *host)
  1742. {
  1743. return mmc_init_card(host, host->card->ocr, host->card);
  1744. }
  1745. #endif
  1746. static int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
  1747. {
  1748. struct mmc_card *card = host->card;
  1749. unsigned int timeout;
  1750. int err = 0;
  1751. if (card && mmc_card_mmc(card) &&
  1752. (card->ext_csd.cache_size > 0)) {
  1753. enable = !!enable;
  1754. if (card->ext_csd.cache_ctrl ^ enable) {
  1755. timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
  1756. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1757. EXT_CSD_CACHE_CTRL, enable, timeout);
  1758. if (err)
  1759. pr_err("%s: cache %s error %d\n",
  1760. mmc_hostname(card->host),
  1761. enable ? "on" : "off",
  1762. err);
  1763. else
  1764. card->ext_csd.cache_ctrl = enable;
  1765. }
  1766. }
  1767. return err;
  1768. }
  1769. static int mmc_can_sleep(struct mmc_card *card)
  1770. {
  1771. return (card && card->ext_csd.rev >= 3);
  1772. }
  1773. static int mmc_sleep(struct mmc_host *host)
  1774. {
  1775. struct mmc_command cmd = {};
  1776. struct mmc_card *card = host->card;
  1777. unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
  1778. unsigned int sn_timeout_ms =
  1779. DIV_ROUND_UP(card->ext_csd.sleep_notification_time, 100);
  1780. int err;
  1781. /* Re-tuning can't be done once the card is deselected */
  1782. mmc_retune_hold(host);
  1783. /* Send sleep_notification if eMMC reversion after v5.0 */
  1784. if (card->ext_csd.rev >= 7 && !(card->quirks & MMC_QUIRK_DISABLE_SNO)) {
  1785. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1786. EXT_CSD_POWER_OFF_NOTIFICATION,
  1787. EXT_CSD_SLEEP_NOTIFICATION, sn_timeout_ms, 0,
  1788. true, false, false);
  1789. if (err)
  1790. pr_err("%s: Sleep Notification timed out %u\n",
  1791. mmc_hostname(card->host), sn_timeout_ms);
  1792. }
  1793. err = mmc_deselect_cards(host);
  1794. if (err)
  1795. goto out_release;
  1796. cmd.opcode = MMC_SLEEP_AWAKE;
  1797. cmd.arg = card->rca << 16;
  1798. cmd.arg |= 1 << 15;
  1799. /*
  1800. * If the max_busy_timeout of the host is specified, validate it against
  1801. * the sleep cmd timeout. A failure means we need to prevent the host
  1802. * from doing hw busy detection, which is done by converting to a R1
  1803. * response instead of a R1B.
  1804. */
  1805. if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
  1806. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  1807. } else {
  1808. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  1809. cmd.busy_timeout = timeout_ms;
  1810. }
  1811. err = mmc_wait_for_cmd(host, &cmd, 0);
  1812. if (err)
  1813. goto out_release;
  1814. /*
  1815. * If the host does not wait while the card signals busy, then we will
  1816. * will have to wait the sleep/awake timeout. Note, we cannot use the
  1817. * SEND_STATUS command to poll the status because that command (and most
  1818. * others) is invalid while the card sleeps.
  1819. */
  1820. if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
  1821. mmc_delay(timeout_ms);
  1822. out_release:
  1823. mmc_retune_release(host);
  1824. return err;
  1825. }
  1826. #ifdef CONFIG_MMC_MTK_PRO
  1827. static int mmc_awake(struct mmc_host *host)
  1828. {
  1829. struct mmc_command cmd = {0};
  1830. struct mmc_card *card = host->card;
  1831. int err;
  1832. cmd.opcode = MMC_SLEEP_AWAKE;
  1833. cmd.arg = card->rca << 16;
  1834. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  1835. err = mmc_wait_for_cmd(host, &cmd, 0);
  1836. if (err)
  1837. return err;
  1838. err = mmc_select_card(host->card);
  1839. return err;
  1840. }
  1841. #endif
  1842. static int mmc_can_poweroff_notify(const struct mmc_card *card)
  1843. {
  1844. return card &&
  1845. mmc_card_mmc(card) &&
  1846. (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
  1847. }
  1848. static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
  1849. {
  1850. unsigned int timeout = card->ext_csd.generic_cmd6_time;
  1851. int err;
  1852. /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
  1853. if (notify_type == EXT_CSD_POWER_OFF_LONG)
  1854. timeout = card->ext_csd.power_off_longtime;
  1855. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1856. EXT_CSD_POWER_OFF_NOTIFICATION,
  1857. notify_type, timeout, 0, true, false, false);
  1858. if (err)
  1859. pr_err("%s: Power Off Notification timed out, %u\n",
  1860. mmc_hostname(card->host), timeout);
  1861. /* Disable the power off notification after the switch operation. */
  1862. card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
  1863. return err;
  1864. }
  1865. /*
  1866. * Host is being removed. Free up the current card.
  1867. */
  1868. static void mmc_remove(struct mmc_host *host)
  1869. {
  1870. mmc_remove_card(host->card);
  1871. host->card = NULL;
  1872. }
  1873. /*
  1874. * Card detection - card is alive.
  1875. */
  1876. static int mmc_alive(struct mmc_host *host)
  1877. {
  1878. return mmc_send_status(host->card, NULL);
  1879. }
  1880. /*
  1881. * Card detection callback from host.
  1882. */
  1883. static void mmc_detect(struct mmc_host *host)
  1884. {
  1885. int err;
  1886. mmc_get_card(host->card);
  1887. /*
  1888. * Just check if our card has been removed.
  1889. */
  1890. err = _mmc_detect_card_removed(host);
  1891. mmc_put_card(host->card);
  1892. if (err) {
  1893. mmc_remove(host);
  1894. mmc_claim_host(host);
  1895. mmc_detach_bus(host);
  1896. mmc_power_off(host);
  1897. mmc_release_host(host);
  1898. }
  1899. }
  1900. static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
  1901. {
  1902. int err = 0;
  1903. unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
  1904. EXT_CSD_POWER_OFF_LONG;
  1905. #ifdef CONFIG_MTK_EMMC_HW_CQ
  1906. int ret;
  1907. #endif
  1908. mmc_claim_host(host);
  1909. if (mmc_card_suspended(host->card))
  1910. goto out;
  1911. #ifdef CONFIG_MTK_EMMC_HW_CQ
  1912. if (host->card->cqe_init) {
  1913. WARN_ON(host->cmdq_ctx.active_reqs); /*bug*/
  1914. err = mmc_cmdq_halt(host, true);
  1915. if (err) {
  1916. pr_notice("%s: halt: failed: %d\n", __func__, err);
  1917. goto out;
  1918. }
  1919. host->cmdq_ops->disable(host, true);
  1920. }
  1921. #endif
  1922. if (mmc_card_doing_bkops(host->card)) {
  1923. err = mmc_stop_bkops(host->card);
  1924. if (err)
  1925. goto out_err;
  1926. }
  1927. /* Turn off cache if eMMC reversion before v5.0 */
  1928. if (host->card->ext_csd.rev < 7)
  1929. err = mmc_cache_ctrl(host, 0);
  1930. else
  1931. err = mmc_flush_cache(host->card);
  1932. if (err)
  1933. goto out_err;
  1934. if (mmc_can_poweroff_notify(host->card) &&
  1935. ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
  1936. err = mmc_poweroff_notify(host->card, notify_type);
  1937. else if (mmc_can_sleep(host->card)) {
  1938. memcpy(&host->cached_ios, &host->ios, sizeof(host->ios));
  1939. err = mmc_sleep(host);
  1940. } else if (!mmc_host_is_spi(host))
  1941. err = mmc_deselect_cards(host);
  1942. #ifdef CONFIG_MTK_EMMC_HW_CQ
  1943. if (err)
  1944. goto out_err;
  1945. #endif
  1946. if (!err) {
  1947. mmc_power_off(host);
  1948. mmc_card_set_suspended(host->card);
  1949. }
  1950. #ifdef CONFIG_MTK_EMMC_HW_CQ
  1951. goto out;
  1952. out_err:
  1953. /*
  1954. * In case of err let's put controller back in cmdq mode and unhalt
  1955. * the controller.
  1956. * We expect cmdq_enable and unhalt won't return any error
  1957. * since it is anyway enabling few registers.
  1958. */
  1959. if (host->card->cqe_init) {
  1960. ret = host->cmdq_ops->enable(host);
  1961. if (ret)
  1962. pr_notice("%s: %s: enabling CMDQ mode failed (%d)\n",
  1963. mmc_hostname(host), __func__, ret);
  1964. mmc_cmdq_halt(host, false);
  1965. }
  1966. out:
  1967. /* Kick CMDQ thread to process any requests came in while suspending */
  1968. if (host->card->cqe_init)
  1969. wake_up(&host->cmdq_ctx.wait);
  1970. #else
  1971. out_err:
  1972. out:
  1973. #endif
  1974. mmc_release_host(host);
  1975. return err;
  1976. }
  1977. /*
  1978. * Suspend callback
  1979. */
  1980. static int mmc_suspend(struct mmc_host *host)
  1981. {
  1982. int err;
  1983. err = _mmc_suspend(host, true);
  1984. if (!err) {
  1985. pm_runtime_disable(&host->card->dev);
  1986. pm_runtime_set_suspended(&host->card->dev);
  1987. }
  1988. return err;
  1989. }
  1990. /*
  1991. * This function tries to determine if the same card is still present
  1992. * and, if so, restore all state to it.
  1993. */
  1994. static int _mmc_resume(struct mmc_host *host)
  1995. {
  1996. int err = 0;
  1997. mmc_claim_host(host);
  1998. if (!mmc_card_suspended(host->card))
  1999. goto out;
  2000. mmc_power_up(host, host->card->ocr);
  2001. #ifdef CONFIG_MMC_MTK_PRO
  2002. if (mmc_can_sleep(host->card)) {
  2003. err = mmc_awake(host);
  2004. if (err) {
  2005. pr_err("%s: %s: awake failed (%d)\n",
  2006. mmc_hostname(host), __func__, err);
  2007. goto out;
  2008. }
  2009. memcpy(&host->ios, &host->cached_ios, sizeof(host->ios));
  2010. host->ops->set_ios(host, &host->ios);
  2011. } else
  2012. err = mmc_init_card(host, host->card->ocr, host->card);
  2013. #else
  2014. err = mmc_init_card(host, host->card->ocr, host->card);
  2015. #endif
  2016. /* Turn on cache if eMMC reversion before v5.0 */
  2017. if (!err && host->card->ext_csd.rev < 7)
  2018. err = mmc_cache_ctrl(host, 1);
  2019. #ifdef CONFIG_MTK_EMMC_HW_CQ
  2020. if (host->card->cqe_init &&
  2021. host->caps2 & MMC_CAP2_CQE) {
  2022. /* enable for cqhci */
  2023. host->cmdq_ops->enable(host);
  2024. /* un-halt when enable */
  2025. if (mmc_host_halt(host) &&
  2026. mmc_cmdq_halt(host, false))
  2027. pr_notice("%s: %s: cmdq unhalt failed\n",
  2028. mmc_hostname(host), __func__);
  2029. }
  2030. #endif
  2031. out:
  2032. mmc_card_clr_suspended(host->card);
  2033. mmc_release_host(host);
  2034. return err;
  2035. }
  2036. /*
  2037. * Shutdown callback
  2038. */
  2039. static int mmc_shutdown(struct mmc_host *host)
  2040. {
  2041. int err = 0;
  2042. /*
  2043. * In a specific case for poweroff notify, we need to resume the card
  2044. * before we can shutdown it properly.
  2045. */
  2046. if (mmc_can_poweroff_notify(host->card) &&
  2047. !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
  2048. err = _mmc_resume(host);
  2049. if (!err)
  2050. err = _mmc_suspend(host, false);
  2051. return err;
  2052. }
  2053. /*
  2054. * Callback for resume.
  2055. */
  2056. static int mmc_resume(struct mmc_host *host)
  2057. {
  2058. pm_runtime_enable(&host->card->dev);
  2059. return 0;
  2060. }
  2061. /*
  2062. * Callback for runtime_suspend.
  2063. */
  2064. static int mmc_runtime_suspend(struct mmc_host *host)
  2065. {
  2066. int err;
  2067. if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
  2068. return 0;
  2069. err = _mmc_suspend(host, true);
  2070. if (err)
  2071. pr_err("%s: error %d doing aggressive suspend\n",
  2072. mmc_hostname(host), err);
  2073. return err;
  2074. }
  2075. /*
  2076. * Callback for runtime_resume.
  2077. */
  2078. static int mmc_runtime_resume(struct mmc_host *host)
  2079. {
  2080. int err;
  2081. err = _mmc_resume(host);
  2082. if (err && err != -ENOMEDIUM)
  2083. pr_err("%s: error %d doing runtime resume\n",
  2084. mmc_hostname(host), err);
  2085. return 0;
  2086. }
  2087. static int mmc_can_reset(struct mmc_card *card)
  2088. {
  2089. u8 rst_n_function;
  2090. rst_n_function = card->ext_csd.rst_n_function;
  2091. if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
  2092. return 0;
  2093. return 1;
  2094. }
  2095. static int mmc_reset(struct mmc_host *host)
  2096. {
  2097. struct mmc_card *card = host->card;
  2098. /*
  2099. * In the case of recovery, we can't expect flushing the cache to work
  2100. * always, but we have a go and ignore errors.
  2101. */
  2102. mmc_flush_cache(host->card);
  2103. if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
  2104. mmc_can_reset(card)) {
  2105. /* If the card accept RST_n signal, send it. */
  2106. mmc_set_clock(host, host->f_init);
  2107. host->ops->hw_reset(host);
  2108. /* Set initial state and call mmc_set_ios */
  2109. mmc_set_initial_state(host);
  2110. } else {
  2111. /* Do a brute force power cycle */
  2112. /* mmc_power_cycle(host, card->ocr); */
  2113. /* mmc_pwrseq_reset(host); */
  2114. /*
  2115. * Instead power cycle by only setting initial state for
  2116. * keeping power-on wp
  2117. */
  2118. mmc_set_clock(host, host->f_init);
  2119. #ifdef CONFIG_MTK_EMMC_HW_CQ
  2120. /* reset here, host driver will check MMC_CAP_HW_RESET */
  2121. if (host->ops->hw_reset)
  2122. host->ops->hw_reset(host);
  2123. #endif
  2124. mmc_set_initial_state(host);
  2125. }
  2126. return mmc_init_card(host, card->ocr, card);
  2127. }
  2128. static const struct mmc_bus_ops mmc_ops = {
  2129. .remove = mmc_remove,
  2130. .detect = mmc_detect,
  2131. .suspend = mmc_suspend,
  2132. .resume = mmc_resume,
  2133. .runtime_suspend = mmc_runtime_suspend,
  2134. .runtime_resume = mmc_runtime_resume,
  2135. .alive = mmc_alive,
  2136. .shutdown = mmc_shutdown,
  2137. .reset = mmc_reset,
  2138. };
  2139. /*
  2140. * Starting point for MMC card init.
  2141. */
  2142. int mmc_attach_mmc(struct mmc_host *host)
  2143. {
  2144. int err;
  2145. u32 ocr = 0, rocr;
  2146. WARN_ON(!host->claimed);
  2147. /* Set correct bus mode for MMC before attempting attach */
  2148. if (!mmc_host_is_spi(host))
  2149. mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
  2150. err = mmc_send_op_cond(host, 0, &ocr);
  2151. if (err)
  2152. return err;
  2153. mmc_attach_bus(host, &mmc_ops);
  2154. if (host->ocr_avail_mmc)
  2155. host->ocr_avail = host->ocr_avail_mmc;
  2156. /*
  2157. * We need to get OCR a different way for SPI.
  2158. */
  2159. if (mmc_host_is_spi(host)) {
  2160. err = mmc_spi_read_ocr(host, 1, &ocr);
  2161. if (err)
  2162. goto err;
  2163. }
  2164. rocr = mmc_select_voltage(host, ocr);
  2165. /*
  2166. * Can we support the voltage of the card?
  2167. */
  2168. if (!rocr) {
  2169. err = -EINVAL;
  2170. goto err;
  2171. }
  2172. /*
  2173. * Detect and init the card.
  2174. */
  2175. err = mmc_init_card(host, rocr, NULL);
  2176. if (err)
  2177. goto err;
  2178. mmc_release_host(host);
  2179. err = mmc_add_card(host->card);
  2180. if (err)
  2181. goto remove_card;
  2182. mmc_claim_host(host);
  2183. return 0;
  2184. remove_card:
  2185. mmc_remove_card(host->card);
  2186. mmc_claim_host(host);
  2187. host->card = NULL;
  2188. err:
  2189. mmc_detach_bus(host);
  2190. pr_err("%s: error %d whilst initialising MMC card\n",
  2191. mmc_hostname(host), err);
  2192. return err;
  2193. }