mmci.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908
  1. /*
  2. * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
  3. *
  4. * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
  5. * Copyright (C) 2010 ST-Ericsson SA
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/init.h>
  14. #include <linux/ioport.h>
  15. #include <linux/device.h>
  16. #include <linux/io.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/delay.h>
  21. #include <linux/err.h>
  22. #include <linux/highmem.h>
  23. #include <linux/log2.h>
  24. #include <linux/mmc/pm.h>
  25. #include <linux/mmc/host.h>
  26. #include <linux/mmc/card.h>
  27. #include <linux/mmc/slot-gpio.h>
  28. #include <linux/amba/bus.h>
  29. #include <linux/clk.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/gpio.h>
  32. #include <linux/of_gpio.h>
  33. #include <linux/regulator/consumer.h>
  34. #include <linux/dmaengine.h>
  35. #include <linux/dma-mapping.h>
  36. #include <linux/amba/mmci.h>
  37. #include <linux/pm_runtime.h>
  38. #include <linux/types.h>
  39. #include <linux/pinctrl/consumer.h>
  40. #include <asm/div64.h>
  41. #include <asm/io.h>
  42. #include "mmci.h"
  43. #include "mmci_qcom_dml.h"
  44. #define DRIVER_NAME "mmci-pl18x"
  45. static unsigned int fmax = 515633;
  46. /**
  47. * struct variant_data - MMCI variant-specific quirks
  48. * @clkreg: default value for MCICLOCK register
  49. * @clkreg_enable: enable value for MMCICLOCK register
  50. * @clkreg_8bit_bus_enable: enable value for 8 bit bus
  51. * @clkreg_neg_edge_enable: enable value for inverted data/cmd output
  52. * @datalength_bits: number of bits in the MMCIDATALENGTH register
  53. * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
  54. * is asserted (likewise for RX)
  55. * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
  56. * is asserted (likewise for RX)
  57. * @data_cmd_enable: enable value for data commands.
  58. * @st_sdio: enable ST specific SDIO logic
  59. * @st_clkdiv: true if using a ST-specific clock divider algorithm
  60. * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
  61. * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
  62. * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
  63. * register
  64. * @datactrl_mask_sdio: SDIO enable mask in datactrl register
  65. * @pwrreg_powerup: power up value for MMCIPOWER register
  66. * @f_max: maximum clk frequency supported by the controller.
  67. * @signal_direction: input/out direction of bus signals can be indicated
  68. * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
  69. * @busy_detect: true if busy detection on dat0 is supported
  70. * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
  71. * @explicit_mclk_control: enable explicit mclk control in driver.
  72. * @qcom_fifo: enables qcom specific fifo pio read logic.
  73. * @qcom_dml: enables qcom specific dma glue for dma transfers.
  74. * @reversed_irq_handling: handle data irq before cmd irq.
  75. */
  76. struct variant_data {
  77. unsigned int clkreg;
  78. unsigned int clkreg_enable;
  79. unsigned int clkreg_8bit_bus_enable;
  80. unsigned int clkreg_neg_edge_enable;
  81. unsigned int datalength_bits;
  82. unsigned int fifosize;
  83. unsigned int fifohalfsize;
  84. unsigned int data_cmd_enable;
  85. unsigned int datactrl_mask_ddrmode;
  86. unsigned int datactrl_mask_sdio;
  87. bool st_sdio;
  88. bool st_clkdiv;
  89. bool blksz_datactrl16;
  90. bool blksz_datactrl4;
  91. u32 pwrreg_powerup;
  92. u32 f_max;
  93. bool signal_direction;
  94. bool pwrreg_clkgate;
  95. bool busy_detect;
  96. bool pwrreg_nopower;
  97. bool explicit_mclk_control;
  98. bool qcom_fifo;
  99. bool qcom_dml;
  100. bool reversed_irq_handling;
  101. };
  102. static struct variant_data variant_arm = {
  103. .fifosize = 16 * 4,
  104. .fifohalfsize = 8 * 4,
  105. .datalength_bits = 16,
  106. .pwrreg_powerup = MCI_PWR_UP,
  107. .f_max = 100000000,
  108. .reversed_irq_handling = true,
  109. };
  110. static struct variant_data variant_arm_extended_fifo = {
  111. .fifosize = 128 * 4,
  112. .fifohalfsize = 64 * 4,
  113. .datalength_bits = 16,
  114. .pwrreg_powerup = MCI_PWR_UP,
  115. .f_max = 100000000,
  116. };
  117. static struct variant_data variant_arm_extended_fifo_hwfc = {
  118. .fifosize = 128 * 4,
  119. .fifohalfsize = 64 * 4,
  120. .clkreg_enable = MCI_ARM_HWFCEN,
  121. .datalength_bits = 16,
  122. .pwrreg_powerup = MCI_PWR_UP,
  123. .f_max = 100000000,
  124. };
  125. static struct variant_data variant_u300 = {
  126. .fifosize = 16 * 4,
  127. .fifohalfsize = 8 * 4,
  128. .clkreg_enable = MCI_ST_U300_HWFCEN,
  129. .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
  130. .datalength_bits = 16,
  131. .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
  132. .st_sdio = true,
  133. .pwrreg_powerup = MCI_PWR_ON,
  134. .f_max = 100000000,
  135. .signal_direction = true,
  136. .pwrreg_clkgate = true,
  137. .pwrreg_nopower = true,
  138. };
  139. static struct variant_data variant_nomadik = {
  140. .fifosize = 16 * 4,
  141. .fifohalfsize = 8 * 4,
  142. .clkreg = MCI_CLK_ENABLE,
  143. .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
  144. .datalength_bits = 24,
  145. .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
  146. .st_sdio = true,
  147. .st_clkdiv = true,
  148. .pwrreg_powerup = MCI_PWR_ON,
  149. .f_max = 100000000,
  150. .signal_direction = true,
  151. .pwrreg_clkgate = true,
  152. .pwrreg_nopower = true,
  153. };
  154. static struct variant_data variant_ux500 = {
  155. .fifosize = 30 * 4,
  156. .fifohalfsize = 8 * 4,
  157. .clkreg = MCI_CLK_ENABLE,
  158. .clkreg_enable = MCI_ST_UX500_HWFCEN,
  159. .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
  160. .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
  161. .datalength_bits = 24,
  162. .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
  163. .st_sdio = true,
  164. .st_clkdiv = true,
  165. .pwrreg_powerup = MCI_PWR_ON,
  166. .f_max = 100000000,
  167. .signal_direction = true,
  168. .pwrreg_clkgate = true,
  169. .busy_detect = true,
  170. .pwrreg_nopower = true,
  171. };
  172. static struct variant_data variant_ux500v2 = {
  173. .fifosize = 30 * 4,
  174. .fifohalfsize = 8 * 4,
  175. .clkreg = MCI_CLK_ENABLE,
  176. .clkreg_enable = MCI_ST_UX500_HWFCEN,
  177. .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
  178. .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
  179. .datactrl_mask_ddrmode = MCI_ST_DPSM_DDRMODE,
  180. .datalength_bits = 24,
  181. .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
  182. .st_sdio = true,
  183. .st_clkdiv = true,
  184. .blksz_datactrl16 = true,
  185. .pwrreg_powerup = MCI_PWR_ON,
  186. .f_max = 100000000,
  187. .signal_direction = true,
  188. .pwrreg_clkgate = true,
  189. .busy_detect = true,
  190. .pwrreg_nopower = true,
  191. };
  192. static struct variant_data variant_qcom = {
  193. .fifosize = 16 * 4,
  194. .fifohalfsize = 8 * 4,
  195. .clkreg = MCI_CLK_ENABLE,
  196. .clkreg_enable = MCI_QCOM_CLK_FLOWENA |
  197. MCI_QCOM_CLK_SELECT_IN_FBCLK,
  198. .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
  199. .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
  200. .data_cmd_enable = MCI_QCOM_CSPM_DATCMD,
  201. .blksz_datactrl4 = true,
  202. .datalength_bits = 24,
  203. .pwrreg_powerup = MCI_PWR_UP,
  204. .f_max = 208000000,
  205. .explicit_mclk_control = true,
  206. .qcom_fifo = true,
  207. .qcom_dml = true,
  208. };
  209. static int mmci_card_busy(struct mmc_host *mmc)
  210. {
  211. struct mmci_host *host = mmc_priv(mmc);
  212. unsigned long flags;
  213. int busy = 0;
  214. spin_lock_irqsave(&host->lock, flags);
  215. if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
  216. busy = 1;
  217. spin_unlock_irqrestore(&host->lock, flags);
  218. return busy;
  219. }
  220. /*
  221. * Validate mmc prerequisites
  222. */
  223. static int mmci_validate_data(struct mmci_host *host,
  224. struct mmc_data *data)
  225. {
  226. if (!data)
  227. return 0;
  228. if (!is_power_of_2(data->blksz)) {
  229. dev_err(mmc_dev(host->mmc),
  230. "unsupported block size (%d bytes)\n", data->blksz);
  231. return -EINVAL;
  232. }
  233. return 0;
  234. }
  235. static void mmci_reg_delay(struct mmci_host *host)
  236. {
  237. /*
  238. * According to the spec, at least three feedback clock cycles
  239. * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
  240. * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
  241. * Worst delay time during card init is at 100 kHz => 30 us.
  242. * Worst delay time when up and running is at 25 MHz => 120 ns.
  243. */
  244. if (host->cclk < 25000000)
  245. udelay(30);
  246. else
  247. ndelay(120);
  248. }
  249. /*
  250. * This must be called with host->lock held
  251. */
  252. static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
  253. {
  254. if (host->clk_reg != clk) {
  255. host->clk_reg = clk;
  256. writel(clk, host->base + MMCICLOCK);
  257. }
  258. }
  259. /*
  260. * This must be called with host->lock held
  261. */
  262. static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
  263. {
  264. if (host->pwr_reg != pwr) {
  265. host->pwr_reg = pwr;
  266. writel(pwr, host->base + MMCIPOWER);
  267. }
  268. }
  269. /*
  270. * This must be called with host->lock held
  271. */
  272. static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
  273. {
  274. /* Keep ST Micro busy mode if enabled */
  275. datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
  276. if (host->datactrl_reg != datactrl) {
  277. host->datactrl_reg = datactrl;
  278. writel(datactrl, host->base + MMCIDATACTRL);
  279. }
  280. }
  281. /*
  282. * This must be called with host->lock held
  283. */
  284. static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
  285. {
  286. struct variant_data *variant = host->variant;
  287. u32 clk = variant->clkreg;
  288. /* Make sure cclk reflects the current calculated clock */
  289. host->cclk = 0;
  290. if (desired) {
  291. if (variant->explicit_mclk_control) {
  292. host->cclk = host->mclk;
  293. } else if (desired >= host->mclk) {
  294. clk = MCI_CLK_BYPASS;
  295. if (variant->st_clkdiv)
  296. clk |= MCI_ST_UX500_NEG_EDGE;
  297. host->cclk = host->mclk;
  298. } else if (variant->st_clkdiv) {
  299. /*
  300. * DB8500 TRM says f = mclk / (clkdiv + 2)
  301. * => clkdiv = (mclk / f) - 2
  302. * Round the divider up so we don't exceed the max
  303. * frequency
  304. */
  305. clk = DIV_ROUND_UP(host->mclk, desired) - 2;
  306. if (clk >= 256)
  307. clk = 255;
  308. host->cclk = host->mclk / (clk + 2);
  309. } else {
  310. /*
  311. * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
  312. * => clkdiv = mclk / (2 * f) - 1
  313. */
  314. clk = host->mclk / (2 * desired) - 1;
  315. if (clk >= 256)
  316. clk = 255;
  317. host->cclk = host->mclk / (2 * (clk + 1));
  318. }
  319. clk |= variant->clkreg_enable;
  320. clk |= MCI_CLK_ENABLE;
  321. /* This hasn't proven to be worthwhile */
  322. /* clk |= MCI_CLK_PWRSAVE; */
  323. }
  324. /* Set actual clock for debug */
  325. host->mmc->actual_clock = host->cclk;
  326. if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
  327. clk |= MCI_4BIT_BUS;
  328. if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
  329. clk |= variant->clkreg_8bit_bus_enable;
  330. if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
  331. host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
  332. clk |= variant->clkreg_neg_edge_enable;
  333. mmci_write_clkreg(host, clk);
  334. }
  335. static void
  336. mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
  337. {
  338. writel(0, host->base + MMCICOMMAND);
  339. BUG_ON(host->data);
  340. host->mrq = NULL;
  341. host->cmd = NULL;
  342. mmc_request_done(host->mmc, mrq);
  343. }
  344. static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
  345. {
  346. void __iomem *base = host->base;
  347. if (host->singleirq) {
  348. unsigned int mask0 = readl(base + MMCIMASK0);
  349. mask0 &= ~MCI_IRQ1MASK;
  350. mask0 |= mask;
  351. writel(mask0, base + MMCIMASK0);
  352. }
  353. writel(mask, base + MMCIMASK1);
  354. }
  355. static void mmci_stop_data(struct mmci_host *host)
  356. {
  357. mmci_write_datactrlreg(host, 0);
  358. mmci_set_mask1(host, 0);
  359. host->data = NULL;
  360. }
  361. static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
  362. {
  363. unsigned int flags = SG_MITER_ATOMIC;
  364. if (data->flags & MMC_DATA_READ)
  365. flags |= SG_MITER_TO_SG;
  366. else
  367. flags |= SG_MITER_FROM_SG;
  368. sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
  369. }
  370. /*
  371. * All the DMA operation mode stuff goes inside this ifdef.
  372. * This assumes that you have a generic DMA device interface,
  373. * no custom DMA interfaces are supported.
  374. */
  375. #ifdef CONFIG_DMA_ENGINE
  376. static void mmci_dma_setup(struct mmci_host *host)
  377. {
  378. const char *rxname, *txname;
  379. struct variant_data *variant = host->variant;
  380. host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
  381. host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
  382. /* initialize pre request cookie */
  383. host->next_data.cookie = 1;
  384. /*
  385. * If only an RX channel is specified, the driver will
  386. * attempt to use it bidirectionally, however if it is
  387. * is specified but cannot be located, DMA will be disabled.
  388. */
  389. if (host->dma_rx_channel && !host->dma_tx_channel)
  390. host->dma_tx_channel = host->dma_rx_channel;
  391. if (host->dma_rx_channel)
  392. rxname = dma_chan_name(host->dma_rx_channel);
  393. else
  394. rxname = "none";
  395. if (host->dma_tx_channel)
  396. txname = dma_chan_name(host->dma_tx_channel);
  397. else
  398. txname = "none";
  399. dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
  400. rxname, txname);
  401. /*
  402. * Limit the maximum segment size in any SG entry according to
  403. * the parameters of the DMA engine device.
  404. */
  405. if (host->dma_tx_channel) {
  406. struct device *dev = host->dma_tx_channel->device->dev;
  407. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  408. if (max_seg_size < host->mmc->max_seg_size)
  409. host->mmc->max_seg_size = max_seg_size;
  410. }
  411. if (host->dma_rx_channel) {
  412. struct device *dev = host->dma_rx_channel->device->dev;
  413. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  414. if (max_seg_size < host->mmc->max_seg_size)
  415. host->mmc->max_seg_size = max_seg_size;
  416. }
  417. if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel)
  418. if (dml_hw_init(host, host->mmc->parent->of_node))
  419. variant->qcom_dml = false;
  420. }
  421. /*
  422. * This is used in or so inline it
  423. * so it can be discarded.
  424. */
  425. static inline void mmci_dma_release(struct mmci_host *host)
  426. {
  427. if (host->dma_rx_channel)
  428. dma_release_channel(host->dma_rx_channel);
  429. if (host->dma_tx_channel)
  430. dma_release_channel(host->dma_tx_channel);
  431. host->dma_rx_channel = host->dma_tx_channel = NULL;
  432. }
  433. static void mmci_dma_data_error(struct mmci_host *host)
  434. {
  435. dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
  436. dmaengine_terminate_all(host->dma_current);
  437. host->dma_current = NULL;
  438. host->dma_desc_current = NULL;
  439. host->data->host_cookie = 0;
  440. }
  441. static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
  442. {
  443. struct dma_chan *chan;
  444. enum dma_data_direction dir;
  445. if (data->flags & MMC_DATA_READ) {
  446. dir = DMA_FROM_DEVICE;
  447. chan = host->dma_rx_channel;
  448. } else {
  449. dir = DMA_TO_DEVICE;
  450. chan = host->dma_tx_channel;
  451. }
  452. dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
  453. }
  454. static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
  455. {
  456. u32 status;
  457. int i;
  458. /* Wait up to 1ms for the DMA to complete */
  459. for (i = 0; ; i++) {
  460. status = readl(host->base + MMCISTATUS);
  461. if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
  462. break;
  463. udelay(10);
  464. }
  465. /*
  466. * Check to see whether we still have some data left in the FIFO -
  467. * this catches DMA controllers which are unable to monitor the
  468. * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
  469. * contiguous buffers. On TX, we'll get a FIFO underrun error.
  470. */
  471. if (status & MCI_RXDATAAVLBLMASK) {
  472. mmci_dma_data_error(host);
  473. if (!data->error)
  474. data->error = -EIO;
  475. }
  476. if (!data->host_cookie)
  477. mmci_dma_unmap(host, data);
  478. /*
  479. * Use of DMA with scatter-gather is impossible.
  480. * Give up with DMA and switch back to PIO mode.
  481. */
  482. if (status & MCI_RXDATAAVLBLMASK) {
  483. dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
  484. mmci_dma_release(host);
  485. }
  486. host->dma_current = NULL;
  487. host->dma_desc_current = NULL;
  488. }
  489. /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
  490. static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
  491. struct dma_chan **dma_chan,
  492. struct dma_async_tx_descriptor **dma_desc)
  493. {
  494. struct variant_data *variant = host->variant;
  495. struct dma_slave_config conf = {
  496. .src_addr = host->phybase + MMCIFIFO,
  497. .dst_addr = host->phybase + MMCIFIFO,
  498. .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
  499. .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
  500. .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
  501. .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
  502. .device_fc = false,
  503. };
  504. struct dma_chan *chan;
  505. struct dma_device *device;
  506. struct dma_async_tx_descriptor *desc;
  507. enum dma_data_direction buffer_dirn;
  508. int nr_sg;
  509. unsigned long flags = DMA_CTRL_ACK;
  510. if (data->flags & MMC_DATA_READ) {
  511. conf.direction = DMA_DEV_TO_MEM;
  512. buffer_dirn = DMA_FROM_DEVICE;
  513. chan = host->dma_rx_channel;
  514. } else {
  515. conf.direction = DMA_MEM_TO_DEV;
  516. buffer_dirn = DMA_TO_DEVICE;
  517. chan = host->dma_tx_channel;
  518. }
  519. /* If there's no DMA channel, fall back to PIO */
  520. if (!chan)
  521. return -EINVAL;
  522. /* If less than or equal to the fifo size, don't bother with DMA */
  523. if (data->blksz * data->blocks <= variant->fifosize)
  524. return -EINVAL;
  525. device = chan->device;
  526. nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
  527. if (nr_sg == 0)
  528. return -EINVAL;
  529. if (host->variant->qcom_dml)
  530. flags |= DMA_PREP_INTERRUPT;
  531. dmaengine_slave_config(chan, &conf);
  532. desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
  533. conf.direction, flags);
  534. if (!desc)
  535. goto unmap_exit;
  536. *dma_chan = chan;
  537. *dma_desc = desc;
  538. return 0;
  539. unmap_exit:
  540. dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
  541. return -ENOMEM;
  542. }
  543. static inline int mmci_dma_prep_data(struct mmci_host *host,
  544. struct mmc_data *data)
  545. {
  546. /* Check if next job is already prepared. */
  547. if (host->dma_current && host->dma_desc_current)
  548. return 0;
  549. /* No job were prepared thus do it now. */
  550. return __mmci_dma_prep_data(host, data, &host->dma_current,
  551. &host->dma_desc_current);
  552. }
  553. static inline int mmci_dma_prep_next(struct mmci_host *host,
  554. struct mmc_data *data)
  555. {
  556. struct mmci_host_next *nd = &host->next_data;
  557. return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
  558. }
  559. static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
  560. {
  561. int ret;
  562. struct mmc_data *data = host->data;
  563. ret = mmci_dma_prep_data(host, host->data);
  564. if (ret)
  565. return ret;
  566. /* Okay, go for it. */
  567. dev_vdbg(mmc_dev(host->mmc),
  568. "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
  569. data->sg_len, data->blksz, data->blocks, data->flags);
  570. dmaengine_submit(host->dma_desc_current);
  571. dma_async_issue_pending(host->dma_current);
  572. if (host->variant->qcom_dml)
  573. dml_start_xfer(host, data);
  574. datactrl |= MCI_DPSM_DMAENABLE;
  575. /* Trigger the DMA transfer */
  576. mmci_write_datactrlreg(host, datactrl);
  577. /*
  578. * Let the MMCI say when the data is ended and it's time
  579. * to fire next DMA request. When that happens, MMCI will
  580. * call mmci_data_end()
  581. */
  582. writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
  583. host->base + MMCIMASK0);
  584. return 0;
  585. }
  586. static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
  587. {
  588. struct mmci_host_next *next = &host->next_data;
  589. WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
  590. WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
  591. host->dma_desc_current = next->dma_desc;
  592. host->dma_current = next->dma_chan;
  593. next->dma_desc = NULL;
  594. next->dma_chan = NULL;
  595. }
  596. static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
  597. bool is_first_req)
  598. {
  599. struct mmci_host *host = mmc_priv(mmc);
  600. struct mmc_data *data = mrq->data;
  601. struct mmci_host_next *nd = &host->next_data;
  602. if (!data)
  603. return;
  604. BUG_ON(data->host_cookie);
  605. if (mmci_validate_data(host, data))
  606. return;
  607. if (!mmci_dma_prep_next(host, data))
  608. data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
  609. }
  610. static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
  611. int err)
  612. {
  613. struct mmci_host *host = mmc_priv(mmc);
  614. struct mmc_data *data = mrq->data;
  615. if (!data || !data->host_cookie)
  616. return;
  617. mmci_dma_unmap(host, data);
  618. if (err) {
  619. struct mmci_host_next *next = &host->next_data;
  620. struct dma_chan *chan;
  621. if (data->flags & MMC_DATA_READ)
  622. chan = host->dma_rx_channel;
  623. else
  624. chan = host->dma_tx_channel;
  625. dmaengine_terminate_all(chan);
  626. if (host->dma_desc_current == next->dma_desc)
  627. host->dma_desc_current = NULL;
  628. if (host->dma_current == next->dma_chan)
  629. host->dma_current = NULL;
  630. next->dma_desc = NULL;
  631. next->dma_chan = NULL;
  632. data->host_cookie = 0;
  633. }
  634. }
  635. #else
  636. /* Blank functions if the DMA engine is not available */
  637. static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
  638. {
  639. }
  640. static inline void mmci_dma_setup(struct mmci_host *host)
  641. {
  642. }
  643. static inline void mmci_dma_release(struct mmci_host *host)
  644. {
  645. }
  646. static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
  647. {
  648. }
  649. static inline void mmci_dma_finalize(struct mmci_host *host,
  650. struct mmc_data *data)
  651. {
  652. }
  653. static inline void mmci_dma_data_error(struct mmci_host *host)
  654. {
  655. }
  656. static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
  657. {
  658. return -ENOSYS;
  659. }
  660. #define mmci_pre_request NULL
  661. #define mmci_post_request NULL
  662. #endif
  663. static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
  664. {
  665. struct variant_data *variant = host->variant;
  666. unsigned int datactrl, timeout, irqmask;
  667. unsigned long long clks;
  668. void __iomem *base;
  669. int blksz_bits;
  670. dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
  671. data->blksz, data->blocks, data->flags);
  672. host->data = data;
  673. host->size = data->blksz * data->blocks;
  674. data->bytes_xfered = 0;
  675. clks = (unsigned long long)data->timeout_ns * host->cclk;
  676. do_div(clks, NSEC_PER_SEC);
  677. timeout = data->timeout_clks + (unsigned int)clks;
  678. base = host->base;
  679. writel(timeout, base + MMCIDATATIMER);
  680. writel(host->size, base + MMCIDATALENGTH);
  681. blksz_bits = ffs(data->blksz) - 1;
  682. BUG_ON(1 << blksz_bits != data->blksz);
  683. if (variant->blksz_datactrl16)
  684. datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
  685. else if (variant->blksz_datactrl4)
  686. datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
  687. else
  688. datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
  689. if (data->flags & MMC_DATA_READ)
  690. datactrl |= MCI_DPSM_DIRECTION;
  691. if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
  692. u32 clk;
  693. datactrl |= variant->datactrl_mask_sdio;
  694. /*
  695. * The ST Micro variant for SDIO small write transfers
  696. * needs to have clock H/W flow control disabled,
  697. * otherwise the transfer will not start. The threshold
  698. * depends on the rate of MCLK.
  699. */
  700. if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
  701. (host->size < 8 ||
  702. (host->size <= 8 && host->mclk > 50000000)))
  703. clk = host->clk_reg & ~variant->clkreg_enable;
  704. else
  705. clk = host->clk_reg | variant->clkreg_enable;
  706. mmci_write_clkreg(host, clk);
  707. }
  708. if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
  709. host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
  710. datactrl |= variant->datactrl_mask_ddrmode;
  711. /*
  712. * Attempt to use DMA operation mode, if this
  713. * should fail, fall back to PIO mode
  714. */
  715. if (!mmci_dma_start_data(host, datactrl))
  716. return;
  717. /* IRQ mode, map the SG list for CPU reading/writing */
  718. mmci_init_sg(host, data);
  719. if (data->flags & MMC_DATA_READ) {
  720. irqmask = MCI_RXFIFOHALFFULLMASK;
  721. /*
  722. * If we have less than the fifo 'half-full' threshold to
  723. * transfer, trigger a PIO interrupt as soon as any data
  724. * is available.
  725. */
  726. if (host->size < variant->fifohalfsize)
  727. irqmask |= MCI_RXDATAAVLBLMASK;
  728. } else {
  729. /*
  730. * We don't actually need to include "FIFO empty" here
  731. * since its implicit in "FIFO half empty".
  732. */
  733. irqmask = MCI_TXFIFOHALFEMPTYMASK;
  734. }
  735. mmci_write_datactrlreg(host, datactrl);
  736. writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
  737. mmci_set_mask1(host, irqmask);
  738. }
  739. static void
  740. mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
  741. {
  742. void __iomem *base = host->base;
  743. dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
  744. cmd->opcode, cmd->arg, cmd->flags);
  745. if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
  746. writel(0, base + MMCICOMMAND);
  747. mmci_reg_delay(host);
  748. }
  749. c |= cmd->opcode | MCI_CPSM_ENABLE;
  750. if (cmd->flags & MMC_RSP_PRESENT) {
  751. if (cmd->flags & MMC_RSP_136)
  752. c |= MCI_CPSM_LONGRSP;
  753. c |= MCI_CPSM_RESPONSE;
  754. }
  755. if (/*interrupt*/0)
  756. c |= MCI_CPSM_INTERRUPT;
  757. if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
  758. c |= host->variant->data_cmd_enable;
  759. host->cmd = cmd;
  760. writel(cmd->arg, base + MMCIARGUMENT);
  761. writel(c, base + MMCICOMMAND);
  762. }
  763. static void
  764. mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
  765. unsigned int status)
  766. {
  767. /* Make sure we have data to handle */
  768. if (!data)
  769. return;
  770. /* First check for errors */
  771. if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
  772. MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
  773. u32 remain, success;
  774. /* Terminate the DMA transfer */
  775. if (dma_inprogress(host)) {
  776. mmci_dma_data_error(host);
  777. mmci_dma_unmap(host, data);
  778. }
  779. /*
  780. * Calculate how far we are into the transfer. Note that
  781. * the data counter gives the number of bytes transferred
  782. * on the MMC bus, not on the host side. On reads, this
  783. * can be as much as a FIFO-worth of data ahead. This
  784. * matters for FIFO overruns only.
  785. */
  786. remain = readl(host->base + MMCIDATACNT);
  787. success = data->blksz * data->blocks - remain;
  788. dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
  789. status, success);
  790. if (status & MCI_DATACRCFAIL) {
  791. /* Last block was not successful */
  792. success -= 1;
  793. data->error = -EILSEQ;
  794. } else if (status & MCI_DATATIMEOUT) {
  795. data->error = -ETIMEDOUT;
  796. } else if (status & MCI_STARTBITERR) {
  797. data->error = -ECOMM;
  798. } else if (status & MCI_TXUNDERRUN) {
  799. data->error = -EIO;
  800. } else if (status & MCI_RXOVERRUN) {
  801. if (success > host->variant->fifosize)
  802. success -= host->variant->fifosize;
  803. else
  804. success = 0;
  805. data->error = -EIO;
  806. }
  807. data->bytes_xfered = round_down(success, data->blksz);
  808. }
  809. if (status & MCI_DATABLOCKEND)
  810. dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
  811. if (status & MCI_DATAEND || data->error) {
  812. if (dma_inprogress(host))
  813. mmci_dma_finalize(host, data);
  814. mmci_stop_data(host);
  815. if (!data->error)
  816. /* The error clause is handled above, success! */
  817. data->bytes_xfered = data->blksz * data->blocks;
  818. if (!data->stop || host->mrq->sbc) {
  819. mmci_request_end(host, data->mrq);
  820. } else {
  821. mmci_start_command(host, data->stop, 0);
  822. }
  823. }
  824. }
  825. static void
  826. mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
  827. unsigned int status)
  828. {
  829. void __iomem *base = host->base;
  830. bool sbc, busy_resp;
  831. if (!cmd)
  832. return;
  833. sbc = (cmd == host->mrq->sbc);
  834. busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY);
  835. if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
  836. MCI_CMDSENT|MCI_CMDRESPEND)))
  837. return;
  838. /* Check if we need to wait for busy completion. */
  839. if (host->busy_status && (status & MCI_ST_CARDBUSY))
  840. return;
  841. /* Enable busy completion if needed and supported. */
  842. if (!host->busy_status && busy_resp &&
  843. !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
  844. (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
  845. writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
  846. base + MMCIMASK0);
  847. host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
  848. return;
  849. }
  850. /* At busy completion, mask the IRQ and complete the request. */
  851. if (host->busy_status) {
  852. writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
  853. base + MMCIMASK0);
  854. host->busy_status = 0;
  855. }
  856. host->cmd = NULL;
  857. if (status & MCI_CMDTIMEOUT) {
  858. cmd->error = -ETIMEDOUT;
  859. } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
  860. cmd->error = -EILSEQ;
  861. } else {
  862. cmd->resp[0] = readl(base + MMCIRESPONSE0);
  863. cmd->resp[1] = readl(base + MMCIRESPONSE1);
  864. cmd->resp[2] = readl(base + MMCIRESPONSE2);
  865. cmd->resp[3] = readl(base + MMCIRESPONSE3);
  866. }
  867. if ((!sbc && !cmd->data) || cmd->error) {
  868. if (host->data) {
  869. /* Terminate the DMA transfer */
  870. if (dma_inprogress(host)) {
  871. mmci_dma_data_error(host);
  872. mmci_dma_unmap(host, host->data);
  873. }
  874. mmci_stop_data(host);
  875. }
  876. mmci_request_end(host, host->mrq);
  877. } else if (sbc) {
  878. mmci_start_command(host, host->mrq->cmd, 0);
  879. } else if (!(cmd->data->flags & MMC_DATA_READ)) {
  880. mmci_start_data(host, cmd->data);
  881. }
  882. }
  883. static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
  884. {
  885. return remain - (readl(host->base + MMCIFIFOCNT) << 2);
  886. }
  887. static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
  888. {
  889. /*
  890. * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
  891. * from the fifo range should be used
  892. */
  893. if (status & MCI_RXFIFOHALFFULL)
  894. return host->variant->fifohalfsize;
  895. else if (status & MCI_RXDATAAVLBL)
  896. return 4;
  897. return 0;
  898. }
  899. static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
  900. {
  901. void __iomem *base = host->base;
  902. char *ptr = buffer;
  903. u32 status = readl(host->base + MMCISTATUS);
  904. int host_remain = host->size;
  905. do {
  906. int count = host->get_rx_fifocnt(host, status, host_remain);
  907. if (count > remain)
  908. count = remain;
  909. if (count <= 0)
  910. break;
  911. /*
  912. * SDIO especially may want to send something that is
  913. * not divisible by 4 (as opposed to card sectors
  914. * etc). Therefore make sure to always read the last bytes
  915. * while only doing full 32-bit reads towards the FIFO.
  916. */
  917. if (unlikely(count & 0x3)) {
  918. if (count < 4) {
  919. unsigned char buf[4];
  920. ioread32_rep(base + MMCIFIFO, buf, 1);
  921. memcpy(ptr, buf, count);
  922. } else {
  923. ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
  924. count &= ~0x3;
  925. }
  926. } else {
  927. ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
  928. }
  929. ptr += count;
  930. remain -= count;
  931. host_remain -= count;
  932. if (remain == 0)
  933. break;
  934. status = readl(base + MMCISTATUS);
  935. } while (status & MCI_RXDATAAVLBL);
  936. return ptr - buffer;
  937. }
  938. static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
  939. {
  940. struct variant_data *variant = host->variant;
  941. void __iomem *base = host->base;
  942. char *ptr = buffer;
  943. do {
  944. unsigned int count, maxcnt;
  945. maxcnt = status & MCI_TXFIFOEMPTY ?
  946. variant->fifosize : variant->fifohalfsize;
  947. count = min(remain, maxcnt);
  948. /*
  949. * SDIO especially may want to send something that is
  950. * not divisible by 4 (as opposed to card sectors
  951. * etc), and the FIFO only accept full 32-bit writes.
  952. * So compensate by adding +3 on the count, a single
  953. * byte become a 32bit write, 7 bytes will be two
  954. * 32bit writes etc.
  955. */
  956. iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
  957. ptr += count;
  958. remain -= count;
  959. if (remain == 0)
  960. break;
  961. status = readl(base + MMCISTATUS);
  962. } while (status & MCI_TXFIFOHALFEMPTY);
  963. return ptr - buffer;
  964. }
  965. /*
  966. * PIO data transfer IRQ handler.
  967. */
  968. static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
  969. {
  970. struct mmci_host *host = dev_id;
  971. struct sg_mapping_iter *sg_miter = &host->sg_miter;
  972. struct variant_data *variant = host->variant;
  973. void __iomem *base = host->base;
  974. unsigned long flags;
  975. u32 status;
  976. status = readl(base + MMCISTATUS);
  977. dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
  978. local_irq_save(flags);
  979. do {
  980. unsigned int remain, len;
  981. char *buffer;
  982. /*
  983. * For write, we only need to test the half-empty flag
  984. * here - if the FIFO is completely empty, then by
  985. * definition it is more than half empty.
  986. *
  987. * For read, check for data available.
  988. */
  989. if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
  990. break;
  991. if (!sg_miter_next(sg_miter))
  992. break;
  993. buffer = sg_miter->addr;
  994. remain = sg_miter->length;
  995. len = 0;
  996. if (status & MCI_RXACTIVE)
  997. len = mmci_pio_read(host, buffer, remain);
  998. if (status & MCI_TXACTIVE)
  999. len = mmci_pio_write(host, buffer, remain, status);
  1000. sg_miter->consumed = len;
  1001. host->size -= len;
  1002. remain -= len;
  1003. if (remain)
  1004. break;
  1005. status = readl(base + MMCISTATUS);
  1006. } while (1);
  1007. sg_miter_stop(sg_miter);
  1008. local_irq_restore(flags);
  1009. /*
  1010. * If we have less than the fifo 'half-full' threshold to transfer,
  1011. * trigger a PIO interrupt as soon as any data is available.
  1012. */
  1013. if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
  1014. mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
  1015. /*
  1016. * If we run out of data, disable the data IRQs; this
  1017. * prevents a race where the FIFO becomes empty before
  1018. * the chip itself has disabled the data path, and
  1019. * stops us racing with our data end IRQ.
  1020. */
  1021. if (host->size == 0) {
  1022. mmci_set_mask1(host, 0);
  1023. writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
  1024. }
  1025. return IRQ_HANDLED;
  1026. }
  1027. /*
  1028. * Handle completion of command and data transfers.
  1029. */
  1030. static irqreturn_t mmci_irq(int irq, void *dev_id)
  1031. {
  1032. struct mmci_host *host = dev_id;
  1033. u32 status;
  1034. int ret = 0;
  1035. spin_lock(&host->lock);
  1036. do {
  1037. status = readl(host->base + MMCISTATUS);
  1038. if (host->singleirq) {
  1039. if (status & readl(host->base + MMCIMASK1))
  1040. mmci_pio_irq(irq, dev_id);
  1041. status &= ~MCI_IRQ1MASK;
  1042. }
  1043. /*
  1044. * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
  1045. * enabled) since the HW seems to be triggering the IRQ on both
  1046. * edges while monitoring DAT0 for busy completion.
  1047. */
  1048. status &= readl(host->base + MMCIMASK0);
  1049. writel(status, host->base + MMCICLEAR);
  1050. dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
  1051. if (host->variant->reversed_irq_handling) {
  1052. mmci_data_irq(host, host->data, status);
  1053. mmci_cmd_irq(host, host->cmd, status);
  1054. } else {
  1055. mmci_cmd_irq(host, host->cmd, status);
  1056. mmci_data_irq(host, host->data, status);
  1057. }
  1058. /* Don't poll for busy completion in irq context. */
  1059. if (host->busy_status)
  1060. status &= ~MCI_ST_CARDBUSY;
  1061. ret = 1;
  1062. } while (status);
  1063. spin_unlock(&host->lock);
  1064. return IRQ_RETVAL(ret);
  1065. }
  1066. static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  1067. {
  1068. struct mmci_host *host = mmc_priv(mmc);
  1069. unsigned long flags;
  1070. WARN_ON(host->mrq != NULL);
  1071. mrq->cmd->error = mmci_validate_data(host, mrq->data);
  1072. if (mrq->cmd->error) {
  1073. mmc_request_done(mmc, mrq);
  1074. return;
  1075. }
  1076. spin_lock_irqsave(&host->lock, flags);
  1077. host->mrq = mrq;
  1078. if (mrq->data)
  1079. mmci_get_next_data(host, mrq->data);
  1080. if (mrq->data && mrq->data->flags & MMC_DATA_READ)
  1081. mmci_start_data(host, mrq->data);
  1082. if (mrq->sbc)
  1083. mmci_start_command(host, mrq->sbc, 0);
  1084. else
  1085. mmci_start_command(host, mrq->cmd, 0);
  1086. spin_unlock_irqrestore(&host->lock, flags);
  1087. }
  1088. static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  1089. {
  1090. struct mmci_host *host = mmc_priv(mmc);
  1091. struct variant_data *variant = host->variant;
  1092. u32 pwr = 0;
  1093. unsigned long flags;
  1094. int ret;
  1095. if (host->plat->ios_handler &&
  1096. host->plat->ios_handler(mmc_dev(mmc), ios))
  1097. dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
  1098. switch (ios->power_mode) {
  1099. case MMC_POWER_OFF:
  1100. if (!IS_ERR(mmc->supply.vmmc))
  1101. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  1102. if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
  1103. regulator_disable(mmc->supply.vqmmc);
  1104. host->vqmmc_enabled = false;
  1105. }
  1106. break;
  1107. case MMC_POWER_UP:
  1108. if (!IS_ERR(mmc->supply.vmmc))
  1109. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  1110. /*
  1111. * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
  1112. * and instead uses MCI_PWR_ON so apply whatever value is
  1113. * configured in the variant data.
  1114. */
  1115. pwr |= variant->pwrreg_powerup;
  1116. break;
  1117. case MMC_POWER_ON:
  1118. if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
  1119. ret = regulator_enable(mmc->supply.vqmmc);
  1120. if (ret < 0)
  1121. dev_err(mmc_dev(mmc),
  1122. "failed to enable vqmmc regulator\n");
  1123. else
  1124. host->vqmmc_enabled = true;
  1125. }
  1126. pwr |= MCI_PWR_ON;
  1127. break;
  1128. }
  1129. if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
  1130. /*
  1131. * The ST Micro variant has some additional bits
  1132. * indicating signal direction for the signals in
  1133. * the SD/MMC bus and feedback-clock usage.
  1134. */
  1135. pwr |= host->pwr_reg_add;
  1136. if (ios->bus_width == MMC_BUS_WIDTH_4)
  1137. pwr &= ~MCI_ST_DATA74DIREN;
  1138. else if (ios->bus_width == MMC_BUS_WIDTH_1)
  1139. pwr &= (~MCI_ST_DATA74DIREN &
  1140. ~MCI_ST_DATA31DIREN &
  1141. ~MCI_ST_DATA2DIREN);
  1142. }
  1143. if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
  1144. if (host->hw_designer != AMBA_VENDOR_ST)
  1145. pwr |= MCI_ROD;
  1146. else {
  1147. /*
  1148. * The ST Micro variant use the ROD bit for something
  1149. * else and only has OD (Open Drain).
  1150. */
  1151. pwr |= MCI_OD;
  1152. }
  1153. }
  1154. /*
  1155. * If clock = 0 and the variant requires the MMCIPOWER to be used for
  1156. * gating the clock, the MCI_PWR_ON bit is cleared.
  1157. */
  1158. if (!ios->clock && variant->pwrreg_clkgate)
  1159. pwr &= ~MCI_PWR_ON;
  1160. if (host->variant->explicit_mclk_control &&
  1161. ios->clock != host->clock_cache) {
  1162. ret = clk_set_rate(host->clk, ios->clock);
  1163. if (ret < 0)
  1164. dev_err(mmc_dev(host->mmc),
  1165. "Error setting clock rate (%d)\n", ret);
  1166. else
  1167. host->mclk = clk_get_rate(host->clk);
  1168. }
  1169. host->clock_cache = ios->clock;
  1170. spin_lock_irqsave(&host->lock, flags);
  1171. mmci_set_clkreg(host, ios->clock);
  1172. mmci_write_pwrreg(host, pwr);
  1173. mmci_reg_delay(host);
  1174. spin_unlock_irqrestore(&host->lock, flags);
  1175. }
  1176. static int mmci_get_cd(struct mmc_host *mmc)
  1177. {
  1178. struct mmci_host *host = mmc_priv(mmc);
  1179. struct mmci_platform_data *plat = host->plat;
  1180. unsigned int status = mmc_gpio_get_cd(mmc);
  1181. if (status == -ENOSYS) {
  1182. if (!plat->status)
  1183. return 1; /* Assume always present */
  1184. status = plat->status(mmc_dev(host->mmc));
  1185. }
  1186. return status;
  1187. }
  1188. static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
  1189. {
  1190. int ret = 0;
  1191. if (!IS_ERR(mmc->supply.vqmmc)) {
  1192. switch (ios->signal_voltage) {
  1193. case MMC_SIGNAL_VOLTAGE_330:
  1194. ret = regulator_set_voltage(mmc->supply.vqmmc,
  1195. 2700000, 3600000);
  1196. break;
  1197. case MMC_SIGNAL_VOLTAGE_180:
  1198. ret = regulator_set_voltage(mmc->supply.vqmmc,
  1199. 1700000, 1950000);
  1200. break;
  1201. case MMC_SIGNAL_VOLTAGE_120:
  1202. ret = regulator_set_voltage(mmc->supply.vqmmc,
  1203. 1100000, 1300000);
  1204. break;
  1205. }
  1206. if (ret)
  1207. dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
  1208. }
  1209. return ret;
  1210. }
  1211. static struct mmc_host_ops mmci_ops = {
  1212. .request = mmci_request,
  1213. .pre_req = mmci_pre_request,
  1214. .post_req = mmci_post_request,
  1215. .set_ios = mmci_set_ios,
  1216. .get_ro = mmc_gpio_get_ro,
  1217. .get_cd = mmci_get_cd,
  1218. .start_signal_voltage_switch = mmci_sig_volt_switch,
  1219. };
  1220. static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
  1221. {
  1222. struct mmci_host *host = mmc_priv(mmc);
  1223. int ret = mmc_of_parse(mmc);
  1224. if (ret)
  1225. return ret;
  1226. if (of_get_property(np, "st,sig-dir-dat0", NULL))
  1227. host->pwr_reg_add |= MCI_ST_DATA0DIREN;
  1228. if (of_get_property(np, "st,sig-dir-dat2", NULL))
  1229. host->pwr_reg_add |= MCI_ST_DATA2DIREN;
  1230. if (of_get_property(np, "st,sig-dir-dat31", NULL))
  1231. host->pwr_reg_add |= MCI_ST_DATA31DIREN;
  1232. if (of_get_property(np, "st,sig-dir-dat74", NULL))
  1233. host->pwr_reg_add |= MCI_ST_DATA74DIREN;
  1234. if (of_get_property(np, "st,sig-dir-cmd", NULL))
  1235. host->pwr_reg_add |= MCI_ST_CMDDIREN;
  1236. if (of_get_property(np, "st,sig-pin-fbclk", NULL))
  1237. host->pwr_reg_add |= MCI_ST_FBCLKEN;
  1238. if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
  1239. mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
  1240. if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
  1241. mmc->caps |= MMC_CAP_SD_HIGHSPEED;
  1242. return 0;
  1243. }
  1244. static int mmci_probe(struct amba_device *dev,
  1245. const struct amba_id *id)
  1246. {
  1247. struct mmci_platform_data *plat = dev->dev.platform_data;
  1248. struct device_node *np = dev->dev.of_node;
  1249. struct variant_data *variant = id->data;
  1250. struct mmci_host *host;
  1251. struct mmc_host *mmc;
  1252. int ret;
  1253. /* Must have platform data or Device Tree. */
  1254. if (!plat && !np) {
  1255. dev_err(&dev->dev, "No plat data or DT found\n");
  1256. return -EINVAL;
  1257. }
  1258. if (!plat) {
  1259. plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
  1260. if (!plat)
  1261. return -ENOMEM;
  1262. }
  1263. mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
  1264. if (!mmc)
  1265. return -ENOMEM;
  1266. ret = mmci_of_parse(np, mmc);
  1267. if (ret)
  1268. goto host_free;
  1269. host = mmc_priv(mmc);
  1270. host->mmc = mmc;
  1271. host->hw_designer = amba_manf(dev);
  1272. host->hw_revision = amba_rev(dev);
  1273. dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
  1274. dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
  1275. host->clk = devm_clk_get(&dev->dev, NULL);
  1276. if (IS_ERR(host->clk)) {
  1277. ret = PTR_ERR(host->clk);
  1278. goto host_free;
  1279. }
  1280. ret = clk_prepare_enable(host->clk);
  1281. if (ret)
  1282. goto host_free;
  1283. if (variant->qcom_fifo)
  1284. host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
  1285. else
  1286. host->get_rx_fifocnt = mmci_get_rx_fifocnt;
  1287. host->plat = plat;
  1288. host->variant = variant;
  1289. host->mclk = clk_get_rate(host->clk);
  1290. /*
  1291. * According to the spec, mclk is max 100 MHz,
  1292. * so we try to adjust the clock down to this,
  1293. * (if possible).
  1294. */
  1295. if (host->mclk > variant->f_max) {
  1296. ret = clk_set_rate(host->clk, variant->f_max);
  1297. if (ret < 0)
  1298. goto clk_disable;
  1299. host->mclk = clk_get_rate(host->clk);
  1300. dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
  1301. host->mclk);
  1302. }
  1303. host->phybase = dev->res.start;
  1304. host->base = devm_ioremap_resource(&dev->dev, &dev->res);
  1305. if (IS_ERR(host->base)) {
  1306. ret = PTR_ERR(host->base);
  1307. goto clk_disable;
  1308. }
  1309. /*
  1310. * The ARM and ST versions of the block have slightly different
  1311. * clock divider equations which means that the minimum divider
  1312. * differs too.
  1313. * on Qualcomm like controllers get the nearest minimum clock to 100Khz
  1314. */
  1315. if (variant->st_clkdiv)
  1316. mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
  1317. else if (variant->explicit_mclk_control)
  1318. mmc->f_min = clk_round_rate(host->clk, 100000);
  1319. else
  1320. mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
  1321. /*
  1322. * If no maximum operating frequency is supplied, fall back to use
  1323. * the module parameter, which has a (low) default value in case it
  1324. * is not specified. Either value must not exceed the clock rate into
  1325. * the block, of course.
  1326. */
  1327. if (mmc->f_max)
  1328. mmc->f_max = variant->explicit_mclk_control ?
  1329. min(variant->f_max, mmc->f_max) :
  1330. min(host->mclk, mmc->f_max);
  1331. else
  1332. mmc->f_max = variant->explicit_mclk_control ?
  1333. fmax : min(host->mclk, fmax);
  1334. dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
  1335. /* Get regulators and the supported OCR mask */
  1336. ret = mmc_regulator_get_supply(mmc);
  1337. if (ret == -EPROBE_DEFER)
  1338. goto clk_disable;
  1339. if (!mmc->ocr_avail)
  1340. mmc->ocr_avail = plat->ocr_mask;
  1341. else if (plat->ocr_mask)
  1342. dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
  1343. /* DT takes precedence over platform data. */
  1344. if (!np) {
  1345. if (!plat->cd_invert)
  1346. mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
  1347. mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
  1348. }
  1349. /* We support these capabilities. */
  1350. mmc->caps |= MMC_CAP_CMD23;
  1351. if (variant->busy_detect) {
  1352. mmci_ops.card_busy = mmci_card_busy;
  1353. mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
  1354. mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
  1355. mmc->max_busy_timeout = 0;
  1356. }
  1357. mmc->ops = &mmci_ops;
  1358. /* We support these PM capabilities. */
  1359. mmc->pm_caps |= MMC_PM_KEEP_POWER;
  1360. /*
  1361. * We can do SGIO
  1362. */
  1363. mmc->max_segs = NR_SG;
  1364. /*
  1365. * Since only a certain number of bits are valid in the data length
  1366. * register, we must ensure that we don't exceed 2^num-1 bytes in a
  1367. * single request.
  1368. */
  1369. mmc->max_req_size = (1 << variant->datalength_bits) - 1;
  1370. /*
  1371. * Set the maximum segment size. Since we aren't doing DMA
  1372. * (yet) we are only limited by the data length register.
  1373. */
  1374. mmc->max_seg_size = mmc->max_req_size;
  1375. /*
  1376. * Block size can be up to 2048 bytes, but must be a power of two.
  1377. */
  1378. mmc->max_blk_size = 1 << 11;
  1379. /*
  1380. * Limit the number of blocks transferred so that we don't overflow
  1381. * the maximum request size.
  1382. */
  1383. mmc->max_blk_count = mmc->max_req_size >> 11;
  1384. spin_lock_init(&host->lock);
  1385. writel(0, host->base + MMCIMASK0);
  1386. writel(0, host->base + MMCIMASK1);
  1387. writel(0xfff, host->base + MMCICLEAR);
  1388. /*
  1389. * If:
  1390. * - not using DT but using a descriptor table, or
  1391. * - using a table of descriptors ALONGSIDE DT, or
  1392. * look up these descriptors named "cd" and "wp" right here, fail
  1393. * silently of these do not exist and proceed to try platform data
  1394. */
  1395. if (!np) {
  1396. ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
  1397. if (ret < 0) {
  1398. if (ret == -EPROBE_DEFER)
  1399. goto clk_disable;
  1400. else if (gpio_is_valid(plat->gpio_cd)) {
  1401. ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
  1402. if (ret)
  1403. goto clk_disable;
  1404. }
  1405. }
  1406. ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
  1407. if (ret < 0) {
  1408. if (ret == -EPROBE_DEFER)
  1409. goto clk_disable;
  1410. else if (gpio_is_valid(plat->gpio_wp)) {
  1411. ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
  1412. if (ret)
  1413. goto clk_disable;
  1414. }
  1415. }
  1416. }
  1417. ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
  1418. DRIVER_NAME " (cmd)", host);
  1419. if (ret)
  1420. goto clk_disable;
  1421. if (!dev->irq[1])
  1422. host->singleirq = true;
  1423. else {
  1424. ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
  1425. IRQF_SHARED, DRIVER_NAME " (pio)", host);
  1426. if (ret)
  1427. goto clk_disable;
  1428. }
  1429. writel(MCI_IRQENABLE, host->base + MMCIMASK0);
  1430. amba_set_drvdata(dev, mmc);
  1431. dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
  1432. mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
  1433. amba_rev(dev), (unsigned long long)dev->res.start,
  1434. dev->irq[0], dev->irq[1]);
  1435. mmci_dma_setup(host);
  1436. pm_runtime_set_autosuspend_delay(&dev->dev, 50);
  1437. pm_runtime_use_autosuspend(&dev->dev);
  1438. mmc_add_host(mmc);
  1439. pm_runtime_put(&dev->dev);
  1440. return 0;
  1441. clk_disable:
  1442. clk_disable_unprepare(host->clk);
  1443. host_free:
  1444. mmc_free_host(mmc);
  1445. return ret;
  1446. }
  1447. static int mmci_remove(struct amba_device *dev)
  1448. {
  1449. struct mmc_host *mmc = amba_get_drvdata(dev);
  1450. if (mmc) {
  1451. struct mmci_host *host = mmc_priv(mmc);
  1452. /*
  1453. * Undo pm_runtime_put() in probe. We use the _sync
  1454. * version here so that we can access the primecell.
  1455. */
  1456. pm_runtime_get_sync(&dev->dev);
  1457. mmc_remove_host(mmc);
  1458. writel(0, host->base + MMCIMASK0);
  1459. writel(0, host->base + MMCIMASK1);
  1460. writel(0, host->base + MMCICOMMAND);
  1461. writel(0, host->base + MMCIDATACTRL);
  1462. mmci_dma_release(host);
  1463. clk_disable_unprepare(host->clk);
  1464. mmc_free_host(mmc);
  1465. }
  1466. return 0;
  1467. }
  1468. #ifdef CONFIG_PM
  1469. static void mmci_save(struct mmci_host *host)
  1470. {
  1471. unsigned long flags;
  1472. spin_lock_irqsave(&host->lock, flags);
  1473. writel(0, host->base + MMCIMASK0);
  1474. if (host->variant->pwrreg_nopower) {
  1475. writel(0, host->base + MMCIDATACTRL);
  1476. writel(0, host->base + MMCIPOWER);
  1477. writel(0, host->base + MMCICLOCK);
  1478. }
  1479. mmci_reg_delay(host);
  1480. spin_unlock_irqrestore(&host->lock, flags);
  1481. }
  1482. static void mmci_restore(struct mmci_host *host)
  1483. {
  1484. unsigned long flags;
  1485. spin_lock_irqsave(&host->lock, flags);
  1486. if (host->variant->pwrreg_nopower) {
  1487. writel(host->clk_reg, host->base + MMCICLOCK);
  1488. writel(host->datactrl_reg, host->base + MMCIDATACTRL);
  1489. writel(host->pwr_reg, host->base + MMCIPOWER);
  1490. }
  1491. writel(MCI_IRQENABLE, host->base + MMCIMASK0);
  1492. mmci_reg_delay(host);
  1493. spin_unlock_irqrestore(&host->lock, flags);
  1494. }
  1495. static int mmci_runtime_suspend(struct device *dev)
  1496. {
  1497. struct amba_device *adev = to_amba_device(dev);
  1498. struct mmc_host *mmc = amba_get_drvdata(adev);
  1499. if (mmc) {
  1500. struct mmci_host *host = mmc_priv(mmc);
  1501. pinctrl_pm_select_sleep_state(dev);
  1502. mmci_save(host);
  1503. clk_disable_unprepare(host->clk);
  1504. }
  1505. return 0;
  1506. }
  1507. static int mmci_runtime_resume(struct device *dev)
  1508. {
  1509. struct amba_device *adev = to_amba_device(dev);
  1510. struct mmc_host *mmc = amba_get_drvdata(adev);
  1511. if (mmc) {
  1512. struct mmci_host *host = mmc_priv(mmc);
  1513. clk_prepare_enable(host->clk);
  1514. mmci_restore(host);
  1515. pinctrl_pm_select_default_state(dev);
  1516. }
  1517. return 0;
  1518. }
  1519. #endif
  1520. static const struct dev_pm_ops mmci_dev_pm_ops = {
  1521. SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
  1522. pm_runtime_force_resume)
  1523. SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
  1524. };
  1525. static struct amba_id mmci_ids[] = {
  1526. {
  1527. .id = 0x00041180,
  1528. .mask = 0xff0fffff,
  1529. .data = &variant_arm,
  1530. },
  1531. {
  1532. .id = 0x01041180,
  1533. .mask = 0xff0fffff,
  1534. .data = &variant_arm_extended_fifo,
  1535. },
  1536. {
  1537. .id = 0x02041180,
  1538. .mask = 0xff0fffff,
  1539. .data = &variant_arm_extended_fifo_hwfc,
  1540. },
  1541. {
  1542. .id = 0x00041181,
  1543. .mask = 0x000fffff,
  1544. .data = &variant_arm,
  1545. },
  1546. /* ST Micro variants */
  1547. {
  1548. .id = 0x00180180,
  1549. .mask = 0x00ffffff,
  1550. .data = &variant_u300,
  1551. },
  1552. {
  1553. .id = 0x10180180,
  1554. .mask = 0xf0ffffff,
  1555. .data = &variant_nomadik,
  1556. },
  1557. {
  1558. .id = 0x00280180,
  1559. .mask = 0x00ffffff,
  1560. .data = &variant_nomadik,
  1561. },
  1562. {
  1563. .id = 0x00480180,
  1564. .mask = 0xf0ffffff,
  1565. .data = &variant_ux500,
  1566. },
  1567. {
  1568. .id = 0x10480180,
  1569. .mask = 0xf0ffffff,
  1570. .data = &variant_ux500v2,
  1571. },
  1572. /* Qualcomm variants */
  1573. {
  1574. .id = 0x00051180,
  1575. .mask = 0x000fffff,
  1576. .data = &variant_qcom,
  1577. },
  1578. { 0, 0 },
  1579. };
  1580. MODULE_DEVICE_TABLE(amba, mmci_ids);
  1581. static struct amba_driver mmci_driver = {
  1582. .drv = {
  1583. .name = DRIVER_NAME,
  1584. .pm = &mmci_dev_pm_ops,
  1585. },
  1586. .probe = mmci_probe,
  1587. .remove = mmci_remove,
  1588. .id_table = mmci_ids,
  1589. };
  1590. module_amba_driver(mmci_driver);
  1591. module_param(fmax, uint, 0444);
  1592. MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
  1593. MODULE_LICENSE("GPL");