wmt-sdmmc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. /*
  2. * WM8505/WM8650 SD/MMC Host Controller
  3. *
  4. * Copyright (C) 2010 Tony Prisk
  5. * Copyright (C) 2008 WonderMedia Technologies, Inc.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation
  10. */
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/ioport.h>
  15. #include <linux/errno.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/delay.h>
  18. #include <linux/io.h>
  19. #include <linux/irq.h>
  20. #include <linux/clk.h>
  21. #include <linux/gpio.h>
  22. #include <linux/of.h>
  23. #include <linux/of_address.h>
  24. #include <linux/of_irq.h>
  25. #include <linux/of_device.h>
  26. #include <linux/mmc/host.h>
  27. #include <linux/mmc/mmc.h>
  28. #include <linux/mmc/sd.h>
  29. #include <asm/byteorder.h>
  30. #define DRIVER_NAME "wmt-sdhc"
  31. /* MMC/SD controller registers */
  32. #define SDMMC_CTLR 0x00
  33. #define SDMMC_CMD 0x01
  34. #define SDMMC_RSPTYPE 0x02
  35. #define SDMMC_ARG 0x04
  36. #define SDMMC_BUSMODE 0x08
  37. #define SDMMC_BLKLEN 0x0C
  38. #define SDMMC_BLKCNT 0x0E
  39. #define SDMMC_RSP 0x10
  40. #define SDMMC_CBCR 0x20
  41. #define SDMMC_INTMASK0 0x24
  42. #define SDMMC_INTMASK1 0x25
  43. #define SDMMC_STS0 0x28
  44. #define SDMMC_STS1 0x29
  45. #define SDMMC_STS2 0x2A
  46. #define SDMMC_STS3 0x2B
  47. #define SDMMC_RSPTIMEOUT 0x2C
  48. #define SDMMC_CLK 0x30 /* VT8500 only */
  49. #define SDMMC_EXTCTRL 0x34
  50. #define SDMMC_SBLKLEN 0x38
  51. #define SDMMC_DMATIMEOUT 0x3C
  52. /* SDMMC_CTLR bit fields */
  53. #define CTLR_CMD_START 0x01
  54. #define CTLR_CMD_WRITE 0x04
  55. #define CTLR_FIFO_RESET 0x08
  56. /* SDMMC_BUSMODE bit fields */
  57. #define BM_SPI_MODE 0x01
  58. #define BM_FOURBIT_MODE 0x02
  59. #define BM_EIGHTBIT_MODE 0x04
  60. #define BM_SD_OFF 0x10
  61. #define BM_SPI_CS 0x20
  62. #define BM_SD_POWER 0x40
  63. #define BM_SOFT_RESET 0x80
  64. /* SDMMC_BLKLEN bit fields */
  65. #define BLKL_CRCERR_ABORT 0x0800
  66. #define BLKL_CD_POL_HIGH 0x1000
  67. #define BLKL_GPI_CD 0x2000
  68. #define BLKL_DATA3_CD 0x4000
  69. #define BLKL_INT_ENABLE 0x8000
  70. /* SDMMC_INTMASK0 bit fields */
  71. #define INT0_MBLK_TRAN_DONE_INT_EN 0x10
  72. #define INT0_BLK_TRAN_DONE_INT_EN 0x20
  73. #define INT0_CD_INT_EN 0x40
  74. #define INT0_DI_INT_EN 0x80
  75. /* SDMMC_INTMASK1 bit fields */
  76. #define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02
  77. #define INT1_CMD_RES_TOUT_INT_EN 0x04
  78. #define INT1_MBLK_AUTO_STOP_INT_EN 0x08
  79. #define INT1_DATA_TOUT_INT_EN 0x10
  80. #define INT1_RESCRC_ERR_INT_EN 0x20
  81. #define INT1_RCRC_ERR_INT_EN 0x40
  82. #define INT1_WCRC_ERR_INT_EN 0x80
  83. /* SDMMC_STS0 bit fields */
  84. #define STS0_WRITE_PROTECT 0x02
  85. #define STS0_CD_DATA3 0x04
  86. #define STS0_CD_GPI 0x08
  87. #define STS0_MBLK_DONE 0x10
  88. #define STS0_BLK_DONE 0x20
  89. #define STS0_CARD_DETECT 0x40
  90. #define STS0_DEVICE_INS 0x80
  91. /* SDMMC_STS1 bit fields */
  92. #define STS1_SDIO_INT 0x01
  93. #define STS1_CMDRSP_DONE 0x02
  94. #define STS1_RSP_TIMEOUT 0x04
  95. #define STS1_AUTOSTOP_DONE 0x08
  96. #define STS1_DATA_TIMEOUT 0x10
  97. #define STS1_RSP_CRC_ERR 0x20
  98. #define STS1_RCRC_ERR 0x40
  99. #define STS1_WCRC_ERR 0x80
  100. /* SDMMC_STS2 bit fields */
  101. #define STS2_CMD_RES_BUSY 0x10
  102. #define STS2_DATARSP_BUSY 0x20
  103. #define STS2_DIS_FORCECLK 0x80
  104. /* SDMMC_EXTCTRL bit fields */
  105. #define EXT_EIGHTBIT 0x04
  106. /* MMC/SD DMA Controller Registers */
  107. #define SDDMA_GCR 0x100
  108. #define SDDMA_IER 0x104
  109. #define SDDMA_ISR 0x108
  110. #define SDDMA_DESPR 0x10C
  111. #define SDDMA_RBR 0x110
  112. #define SDDMA_DAR 0x114
  113. #define SDDMA_BAR 0x118
  114. #define SDDMA_CPR 0x11C
  115. #define SDDMA_CCR 0x120
  116. /* SDDMA_GCR bit fields */
  117. #define DMA_GCR_DMA_EN 0x00000001
  118. #define DMA_GCR_SOFT_RESET 0x00000100
  119. /* SDDMA_IER bit fields */
  120. #define DMA_IER_INT_EN 0x00000001
  121. /* SDDMA_ISR bit fields */
  122. #define DMA_ISR_INT_STS 0x00000001
  123. /* SDDMA_RBR bit fields */
  124. #define DMA_RBR_FORMAT 0x40000000
  125. #define DMA_RBR_END 0x80000000
  126. /* SDDMA_CCR bit fields */
  127. #define DMA_CCR_RUN 0x00000080
  128. #define DMA_CCR_IF_TO_PERIPHERAL 0x00000000
  129. #define DMA_CCR_PERIPHERAL_TO_IF 0x00400000
  130. /* SDDMA_CCR event status */
  131. #define DMA_CCR_EVT_NO_STATUS 0x00000000
  132. #define DMA_CCR_EVT_UNDERRUN 0x00000001
  133. #define DMA_CCR_EVT_OVERRUN 0x00000002
  134. #define DMA_CCR_EVT_DESP_READ 0x00000003
  135. #define DMA_CCR_EVT_DATA_RW 0x00000004
  136. #define DMA_CCR_EVT_EARLY_END 0x00000005
  137. #define DMA_CCR_EVT_SUCCESS 0x0000000F
  138. #define PDMA_READ 0x00
  139. #define PDMA_WRITE 0x01
  140. #define WMT_SD_POWER_OFF 0
  141. #define WMT_SD_POWER_ON 1
  142. struct wmt_dma_descriptor {
  143. u32 flags;
  144. u32 data_buffer_addr;
  145. u32 branch_addr;
  146. u32 reserved1;
  147. };
  148. struct wmt_mci_caps {
  149. unsigned int f_min;
  150. unsigned int f_max;
  151. u32 ocr_avail;
  152. u32 caps;
  153. u32 max_seg_size;
  154. u32 max_segs;
  155. u32 max_blk_size;
  156. };
  157. struct wmt_mci_priv {
  158. struct mmc_host *mmc;
  159. void __iomem *sdmmc_base;
  160. int irq_regular;
  161. int irq_dma;
  162. void *dma_desc_buffer;
  163. dma_addr_t dma_desc_device_addr;
  164. struct completion cmdcomp;
  165. struct completion datacomp;
  166. struct completion *comp_cmd;
  167. struct completion *comp_dma;
  168. struct mmc_request *req;
  169. struct mmc_command *cmd;
  170. struct clk *clk_sdmmc;
  171. struct device *dev;
  172. u8 power_inverted;
  173. u8 cd_inverted;
  174. };
  175. static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
  176. {
  177. u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
  178. if (enable ^ priv->power_inverted)
  179. reg_tmp &= ~BM_SD_OFF;
  180. else
  181. reg_tmp |= BM_SD_OFF;
  182. writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
  183. }
  184. static void wmt_mci_read_response(struct mmc_host *mmc)
  185. {
  186. struct wmt_mci_priv *priv;
  187. int idx1, idx2;
  188. u8 tmp_resp;
  189. u32 response;
  190. priv = mmc_priv(mmc);
  191. for (idx1 = 0; idx1 < 4; idx1++) {
  192. response = 0;
  193. for (idx2 = 0; idx2 < 4; idx2++) {
  194. if ((idx1 == 3) && (idx2 == 3))
  195. tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
  196. else
  197. tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
  198. (idx1*4) + idx2 + 1);
  199. response |= (tmp_resp << (idx2 * 8));
  200. }
  201. priv->cmd->resp[idx1] = cpu_to_be32(response);
  202. }
  203. }
  204. static void wmt_mci_start_command(struct wmt_mci_priv *priv)
  205. {
  206. u32 reg_tmp;
  207. reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
  208. writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
  209. }
  210. static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
  211. u32 arg, u8 rsptype)
  212. {
  213. struct wmt_mci_priv *priv;
  214. u32 reg_tmp;
  215. priv = mmc_priv(mmc);
  216. /* write command, arg, resptype registers */
  217. writeb(command, priv->sdmmc_base + SDMMC_CMD);
  218. writel(arg, priv->sdmmc_base + SDMMC_ARG);
  219. writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
  220. /* reset response FIFO */
  221. reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
  222. writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
  223. /* ensure clock enabled - VT3465 */
  224. wmt_set_sd_power(priv, WMT_SD_POWER_ON);
  225. /* clear status bits */
  226. writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
  227. writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
  228. writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
  229. writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
  230. /* set command type */
  231. reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
  232. writeb((reg_tmp & 0x0F) | (cmdtype << 4),
  233. priv->sdmmc_base + SDMMC_CTLR);
  234. return 0;
  235. }
  236. static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
  237. {
  238. writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
  239. writel(0, priv->sdmmc_base + SDDMA_IER);
  240. }
  241. static void wmt_complete_data_request(struct wmt_mci_priv *priv)
  242. {
  243. struct mmc_request *req;
  244. req = priv->req;
  245. req->data->bytes_xfered = req->data->blksz * req->data->blocks;
  246. /* unmap the DMA pages used for write data */
  247. if (req->data->flags & MMC_DATA_WRITE)
  248. dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
  249. req->data->sg_len, DMA_TO_DEVICE);
  250. else
  251. dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
  252. req->data->sg_len, DMA_FROM_DEVICE);
  253. /* Check if the DMA ISR returned a data error */
  254. if ((req->cmd->error) || (req->data->error))
  255. mmc_request_done(priv->mmc, req);
  256. else {
  257. wmt_mci_read_response(priv->mmc);
  258. if (!req->data->stop) {
  259. /* single-block read/write requests end here */
  260. mmc_request_done(priv->mmc, req);
  261. } else {
  262. /*
  263. * we change the priv->cmd variable so the response is
  264. * stored in the stop struct rather than the original
  265. * calling command struct
  266. */
  267. priv->comp_cmd = &priv->cmdcomp;
  268. init_completion(priv->comp_cmd);
  269. priv->cmd = req->data->stop;
  270. wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
  271. 7, req->data->stop->arg, 9);
  272. wmt_mci_start_command(priv);
  273. }
  274. }
  275. }
  276. static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
  277. {
  278. struct wmt_mci_priv *priv;
  279. int status;
  280. priv = (struct wmt_mci_priv *)data;
  281. status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
  282. if (status != DMA_CCR_EVT_SUCCESS) {
  283. dev_err(priv->dev, "DMA Error: Status = %d\n", status);
  284. priv->req->data->error = -ETIMEDOUT;
  285. complete(priv->comp_dma);
  286. return IRQ_HANDLED;
  287. }
  288. priv->req->data->error = 0;
  289. wmt_mci_disable_dma(priv);
  290. complete(priv->comp_dma);
  291. if (priv->comp_cmd) {
  292. if (completion_done(priv->comp_cmd)) {
  293. /*
  294. * if the command (regular) interrupt has already
  295. * completed, finish off the request otherwise we wait
  296. * for the command interrupt and finish from there.
  297. */
  298. wmt_complete_data_request(priv);
  299. }
  300. }
  301. return IRQ_HANDLED;
  302. }
  303. static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
  304. {
  305. struct wmt_mci_priv *priv;
  306. u32 status0;
  307. u32 status1;
  308. u32 status2;
  309. u32 reg_tmp;
  310. int cmd_done;
  311. priv = (struct wmt_mci_priv *)data;
  312. cmd_done = 0;
  313. status0 = readb(priv->sdmmc_base + SDMMC_STS0);
  314. status1 = readb(priv->sdmmc_base + SDMMC_STS1);
  315. status2 = readb(priv->sdmmc_base + SDMMC_STS2);
  316. /* Check for card insertion */
  317. reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
  318. if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
  319. mmc_detect_change(priv->mmc, 0);
  320. if (priv->cmd)
  321. priv->cmd->error = -ETIMEDOUT;
  322. if (priv->comp_cmd)
  323. complete(priv->comp_cmd);
  324. if (priv->comp_dma) {
  325. wmt_mci_disable_dma(priv);
  326. complete(priv->comp_dma);
  327. }
  328. writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
  329. return IRQ_HANDLED;
  330. }
  331. if ((!priv->req->data) ||
  332. ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
  333. /* handle non-data & stop_transmission requests */
  334. if (status1 & STS1_CMDRSP_DONE) {
  335. priv->cmd->error = 0;
  336. cmd_done = 1;
  337. } else if ((status1 & STS1_RSP_TIMEOUT) ||
  338. (status1 & STS1_DATA_TIMEOUT)) {
  339. priv->cmd->error = -ETIMEDOUT;
  340. cmd_done = 1;
  341. }
  342. if (cmd_done) {
  343. priv->comp_cmd = NULL;
  344. if (!priv->cmd->error)
  345. wmt_mci_read_response(priv->mmc);
  346. priv->cmd = NULL;
  347. mmc_request_done(priv->mmc, priv->req);
  348. }
  349. } else {
  350. /* handle data requests */
  351. if (status1 & STS1_CMDRSP_DONE) {
  352. if (priv->cmd)
  353. priv->cmd->error = 0;
  354. if (priv->comp_cmd)
  355. complete(priv->comp_cmd);
  356. }
  357. if ((status1 & STS1_RSP_TIMEOUT) ||
  358. (status1 & STS1_DATA_TIMEOUT)) {
  359. if (priv->cmd)
  360. priv->cmd->error = -ETIMEDOUT;
  361. if (priv->comp_cmd)
  362. complete(priv->comp_cmd);
  363. if (priv->comp_dma) {
  364. wmt_mci_disable_dma(priv);
  365. complete(priv->comp_dma);
  366. }
  367. }
  368. if (priv->comp_dma) {
  369. /*
  370. * If the dma interrupt has already completed, finish
  371. * off the request; otherwise we wait for the DMA
  372. * interrupt and finish from there.
  373. */
  374. if (completion_done(priv->comp_dma))
  375. wmt_complete_data_request(priv);
  376. }
  377. }
  378. writeb(status0, priv->sdmmc_base + SDMMC_STS0);
  379. writeb(status1, priv->sdmmc_base + SDMMC_STS1);
  380. writeb(status2, priv->sdmmc_base + SDMMC_STS2);
  381. return IRQ_HANDLED;
  382. }
  383. static void wmt_reset_hardware(struct mmc_host *mmc)
  384. {
  385. struct wmt_mci_priv *priv;
  386. u32 reg_tmp;
  387. priv = mmc_priv(mmc);
  388. /* reset controller */
  389. reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
  390. writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
  391. /* reset response FIFO */
  392. reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
  393. writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
  394. /* enable GPI pin to detect card */
  395. writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
  396. /* clear interrupt status */
  397. writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
  398. writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
  399. /* setup interrupts */
  400. writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
  401. SDMMC_INTMASK0);
  402. writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
  403. INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
  404. /* set the DMA timeout */
  405. writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
  406. /* auto clock freezing enable */
  407. reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
  408. writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
  409. /* set a default clock speed of 400Khz */
  410. clk_set_rate(priv->clk_sdmmc, 400000);
  411. }
  412. static int wmt_dma_init(struct mmc_host *mmc)
  413. {
  414. struct wmt_mci_priv *priv;
  415. priv = mmc_priv(mmc);
  416. writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
  417. writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
  418. if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
  419. return 0;
  420. else
  421. return 1;
  422. }
  423. static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
  424. u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
  425. {
  426. desc->flags = 0x40000000 | req_count;
  427. if (end)
  428. desc->flags |= 0x80000000;
  429. desc->data_buffer_addr = buffer_addr;
  430. desc->branch_addr = branch_addr;
  431. }
  432. static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
  433. {
  434. struct wmt_mci_priv *priv;
  435. u32 reg_tmp;
  436. priv = mmc_priv(mmc);
  437. /* Enable DMA Interrupts */
  438. writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
  439. /* Write DMA Descriptor Pointer Register */
  440. writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
  441. writel(0x00, priv->sdmmc_base + SDDMA_CCR);
  442. if (dir == PDMA_WRITE) {
  443. reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
  444. writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
  445. SDDMA_CCR);
  446. } else {
  447. reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
  448. writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
  449. SDDMA_CCR);
  450. }
  451. }
  452. static void wmt_dma_start(struct wmt_mci_priv *priv)
  453. {
  454. u32 reg_tmp;
  455. reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
  456. writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
  457. }
  458. static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
  459. {
  460. struct wmt_mci_priv *priv;
  461. struct wmt_dma_descriptor *desc;
  462. u8 command;
  463. u8 cmdtype;
  464. u32 arg;
  465. u8 rsptype;
  466. u32 reg_tmp;
  467. struct scatterlist *sg;
  468. int i;
  469. int sg_cnt;
  470. int offset;
  471. u32 dma_address;
  472. int desc_cnt;
  473. priv = mmc_priv(mmc);
  474. priv->req = req;
  475. /*
  476. * Use the cmd variable to pass a pointer to the resp[] structure
  477. * This is required on multi-block requests to pass the pointer to the
  478. * stop command
  479. */
  480. priv->cmd = req->cmd;
  481. command = req->cmd->opcode;
  482. arg = req->cmd->arg;
  483. rsptype = mmc_resp_type(req->cmd);
  484. cmdtype = 0;
  485. /* rsptype=7 only valid for SPI commands - should be =2 for SD */
  486. if (rsptype == 7)
  487. rsptype = 2;
  488. /* rsptype=21 is R1B, convert for controller */
  489. if (rsptype == 21)
  490. rsptype = 9;
  491. if (!req->data) {
  492. wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
  493. wmt_mci_start_command(priv);
  494. /* completion is now handled in the regular_isr() */
  495. }
  496. if (req->data) {
  497. priv->comp_cmd = &priv->cmdcomp;
  498. init_completion(priv->comp_cmd);
  499. wmt_dma_init(mmc);
  500. /* set controller data length */
  501. reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
  502. writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
  503. priv->sdmmc_base + SDMMC_BLKLEN);
  504. /* set controller block count */
  505. writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
  506. desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
  507. if (req->data->flags & MMC_DATA_WRITE) {
  508. sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
  509. req->data->sg_len, DMA_TO_DEVICE);
  510. cmdtype = 1;
  511. if (req->data->blocks > 1)
  512. cmdtype = 3;
  513. } else {
  514. sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
  515. req->data->sg_len, DMA_FROM_DEVICE);
  516. cmdtype = 2;
  517. if (req->data->blocks > 1)
  518. cmdtype = 4;
  519. }
  520. dma_address = priv->dma_desc_device_addr + 16;
  521. desc_cnt = 0;
  522. for_each_sg(req->data->sg, sg, sg_cnt, i) {
  523. offset = 0;
  524. while (offset < sg_dma_len(sg)) {
  525. wmt_dma_init_descriptor(desc, req->data->blksz,
  526. sg_dma_address(sg)+offset,
  527. dma_address, 0);
  528. desc++;
  529. desc_cnt++;
  530. offset += req->data->blksz;
  531. dma_address += 16;
  532. if (desc_cnt == req->data->blocks)
  533. break;
  534. }
  535. }
  536. desc--;
  537. desc->flags |= 0x80000000;
  538. if (req->data->flags & MMC_DATA_WRITE)
  539. wmt_dma_config(mmc, priv->dma_desc_device_addr,
  540. PDMA_WRITE);
  541. else
  542. wmt_dma_config(mmc, priv->dma_desc_device_addr,
  543. PDMA_READ);
  544. wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
  545. priv->comp_dma = &priv->datacomp;
  546. init_completion(priv->comp_dma);
  547. wmt_dma_start(priv);
  548. wmt_mci_start_command(priv);
  549. }
  550. }
  551. static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  552. {
  553. struct wmt_mci_priv *priv;
  554. u32 busmode, extctrl;
  555. priv = mmc_priv(mmc);
  556. if (ios->power_mode == MMC_POWER_UP) {
  557. wmt_reset_hardware(mmc);
  558. wmt_set_sd_power(priv, WMT_SD_POWER_ON);
  559. }
  560. if (ios->power_mode == MMC_POWER_OFF)
  561. wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
  562. if (ios->clock != 0)
  563. clk_set_rate(priv->clk_sdmmc, ios->clock);
  564. busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE);
  565. extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
  566. busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE);
  567. extctrl &= ~EXT_EIGHTBIT;
  568. switch (ios->bus_width) {
  569. case MMC_BUS_WIDTH_8:
  570. busmode |= BM_EIGHTBIT_MODE;
  571. extctrl |= EXT_EIGHTBIT;
  572. break;
  573. case MMC_BUS_WIDTH_4:
  574. busmode |= BM_FOURBIT_MODE;
  575. break;
  576. case MMC_BUS_WIDTH_1:
  577. break;
  578. }
  579. writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE);
  580. writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL);
  581. }
  582. static int wmt_mci_get_ro(struct mmc_host *mmc)
  583. {
  584. struct wmt_mci_priv *priv = mmc_priv(mmc);
  585. return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
  586. }
  587. static int wmt_mci_get_cd(struct mmc_host *mmc)
  588. {
  589. struct wmt_mci_priv *priv = mmc_priv(mmc);
  590. u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
  591. return !(cd ^ priv->cd_inverted);
  592. }
  593. static struct mmc_host_ops wmt_mci_ops = {
  594. .request = wmt_mci_request,
  595. .set_ios = wmt_mci_set_ios,
  596. .get_ro = wmt_mci_get_ro,
  597. .get_cd = wmt_mci_get_cd,
  598. };
  599. /* Controller capabilities */
  600. static struct wmt_mci_caps wm8505_caps = {
  601. .f_min = 390425,
  602. .f_max = 50000000,
  603. .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
  604. .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
  605. MMC_CAP_SD_HIGHSPEED,
  606. .max_seg_size = 65024,
  607. .max_segs = 128,
  608. .max_blk_size = 2048,
  609. };
  610. static const struct of_device_id wmt_mci_dt_ids[] = {
  611. { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
  612. { /* Sentinel */ },
  613. };
  614. static int wmt_mci_probe(struct platform_device *pdev)
  615. {
  616. struct mmc_host *mmc;
  617. struct wmt_mci_priv *priv;
  618. struct device_node *np = pdev->dev.of_node;
  619. const struct of_device_id *of_id =
  620. of_match_device(wmt_mci_dt_ids, &pdev->dev);
  621. const struct wmt_mci_caps *wmt_caps;
  622. int ret;
  623. int regular_irq, dma_irq;
  624. if (!of_id || !of_id->data) {
  625. dev_err(&pdev->dev, "Controller capabilities data missing\n");
  626. return -EFAULT;
  627. }
  628. wmt_caps = of_id->data;
  629. if (!np) {
  630. dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
  631. return -EFAULT;
  632. }
  633. regular_irq = irq_of_parse_and_map(np, 0);
  634. dma_irq = irq_of_parse_and_map(np, 1);
  635. if (!regular_irq || !dma_irq) {
  636. dev_err(&pdev->dev, "Getting IRQs failed!\n");
  637. ret = -ENXIO;
  638. goto fail1;
  639. }
  640. mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
  641. if (!mmc) {
  642. dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
  643. ret = -ENOMEM;
  644. goto fail1;
  645. }
  646. mmc->ops = &wmt_mci_ops;
  647. mmc->f_min = wmt_caps->f_min;
  648. mmc->f_max = wmt_caps->f_max;
  649. mmc->ocr_avail = wmt_caps->ocr_avail;
  650. mmc->caps = wmt_caps->caps;
  651. mmc->max_seg_size = wmt_caps->max_seg_size;
  652. mmc->max_segs = wmt_caps->max_segs;
  653. mmc->max_blk_size = wmt_caps->max_blk_size;
  654. mmc->max_req_size = (16*512*mmc->max_segs);
  655. mmc->max_blk_count = mmc->max_req_size / 512;
  656. priv = mmc_priv(mmc);
  657. priv->mmc = mmc;
  658. priv->dev = &pdev->dev;
  659. priv->power_inverted = 0;
  660. priv->cd_inverted = 0;
  661. if (of_get_property(np, "sdon-inverted", NULL))
  662. priv->power_inverted = 1;
  663. if (of_get_property(np, "cd-inverted", NULL))
  664. priv->cd_inverted = 1;
  665. priv->sdmmc_base = of_iomap(np, 0);
  666. if (!priv->sdmmc_base) {
  667. dev_err(&pdev->dev, "Failed to map IO space\n");
  668. ret = -ENOMEM;
  669. goto fail2;
  670. }
  671. priv->irq_regular = regular_irq;
  672. priv->irq_dma = dma_irq;
  673. ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
  674. if (ret) {
  675. dev_err(&pdev->dev, "Register regular IRQ fail\n");
  676. goto fail3;
  677. }
  678. ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv);
  679. if (ret) {
  680. dev_err(&pdev->dev, "Register DMA IRQ fail\n");
  681. goto fail4;
  682. }
  683. /* alloc some DMA buffers for descriptors/transfers */
  684. priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
  685. mmc->max_blk_count * 16,
  686. &priv->dma_desc_device_addr,
  687. GFP_KERNEL);
  688. if (!priv->dma_desc_buffer) {
  689. dev_err(&pdev->dev, "DMA alloc fail\n");
  690. ret = -EPERM;
  691. goto fail5;
  692. }
  693. platform_set_drvdata(pdev, mmc);
  694. priv->clk_sdmmc = of_clk_get(np, 0);
  695. if (IS_ERR(priv->clk_sdmmc)) {
  696. dev_err(&pdev->dev, "Error getting clock\n");
  697. ret = PTR_ERR(priv->clk_sdmmc);
  698. goto fail5;
  699. }
  700. clk_prepare_enable(priv->clk_sdmmc);
  701. /* configure the controller to a known 'ready' state */
  702. wmt_reset_hardware(mmc);
  703. mmc_add_host(mmc);
  704. dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
  705. return 0;
  706. fail5:
  707. free_irq(dma_irq, priv);
  708. fail4:
  709. free_irq(regular_irq, priv);
  710. fail3:
  711. iounmap(priv->sdmmc_base);
  712. fail2:
  713. mmc_free_host(mmc);
  714. fail1:
  715. return ret;
  716. }
  717. static int wmt_mci_remove(struct platform_device *pdev)
  718. {
  719. struct mmc_host *mmc;
  720. struct wmt_mci_priv *priv;
  721. struct resource *res;
  722. u32 reg_tmp;
  723. mmc = platform_get_drvdata(pdev);
  724. priv = mmc_priv(mmc);
  725. /* reset SD controller */
  726. reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
  727. writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
  728. reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
  729. writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
  730. writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
  731. writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
  732. /* release the dma buffers */
  733. dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
  734. priv->dma_desc_buffer, priv->dma_desc_device_addr);
  735. mmc_remove_host(mmc);
  736. free_irq(priv->irq_regular, priv);
  737. free_irq(priv->irq_dma, priv);
  738. iounmap(priv->sdmmc_base);
  739. clk_disable_unprepare(priv->clk_sdmmc);
  740. clk_put(priv->clk_sdmmc);
  741. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  742. release_mem_region(res->start, resource_size(res));
  743. mmc_free_host(mmc);
  744. dev_info(&pdev->dev, "WMT MCI device removed\n");
  745. return 0;
  746. }
  747. #ifdef CONFIG_PM
  748. static int wmt_mci_suspend(struct device *dev)
  749. {
  750. u32 reg_tmp;
  751. struct platform_device *pdev = to_platform_device(dev);
  752. struct mmc_host *mmc = platform_get_drvdata(pdev);
  753. struct wmt_mci_priv *priv;
  754. if (!mmc)
  755. return 0;
  756. priv = mmc_priv(mmc);
  757. reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
  758. writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
  759. SDMMC_BUSMODE);
  760. reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
  761. writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
  762. writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
  763. writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
  764. clk_disable(priv->clk_sdmmc);
  765. return 0;
  766. }
  767. static int wmt_mci_resume(struct device *dev)
  768. {
  769. u32 reg_tmp;
  770. struct platform_device *pdev = to_platform_device(dev);
  771. struct mmc_host *mmc = platform_get_drvdata(pdev);
  772. struct wmt_mci_priv *priv;
  773. if (mmc) {
  774. priv = mmc_priv(mmc);
  775. clk_enable(priv->clk_sdmmc);
  776. reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
  777. writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
  778. SDMMC_BUSMODE);
  779. reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
  780. writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
  781. priv->sdmmc_base + SDMMC_BLKLEN);
  782. reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
  783. writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
  784. SDMMC_INTMASK0);
  785. }
  786. return 0;
  787. }
  788. static const struct dev_pm_ops wmt_mci_pm = {
  789. .suspend = wmt_mci_suspend,
  790. .resume = wmt_mci_resume,
  791. };
  792. #define wmt_mci_pm_ops (&wmt_mci_pm)
  793. #else /* !CONFIG_PM */
  794. #define wmt_mci_pm_ops NULL
  795. #endif
  796. static struct platform_driver wmt_mci_driver = {
  797. .probe = wmt_mci_probe,
  798. .remove = wmt_mci_remove,
  799. .driver = {
  800. .name = DRIVER_NAME,
  801. .pm = wmt_mci_pm_ops,
  802. .of_match_table = wmt_mci_dt_ids,
  803. },
  804. };
  805. module_platform_driver(wmt_mci_driver);
  806. MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
  807. MODULE_AUTHOR("Tony Prisk");
  808. MODULE_LICENSE("GPL v2");
  809. MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);