mxs-dma.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. /*
  2. * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
  3. *
  4. * Refer to drivers/dma/imx-sdma.c
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/types.h>
  12. #include <linux/mm.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/clk.h>
  15. #include <linux/wait.h>
  16. #include <linux/sched.h>
  17. #include <linux/semaphore.h>
  18. #include <linux/device.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/slab.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/delay.h>
  24. #include <linux/fsl/mxs-dma.h>
  25. #include <asm/irq.h>
  26. #include <mach/mxs.h>
  27. #include <mach/common.h>
  28. #include "dmaengine.h"
  29. /*
  30. * NOTE: The term "PIO" throughout the mxs-dma implementation means
  31. * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
  32. * dma can program the controller registers of peripheral devices.
  33. */
  34. #define MXS_DMA_APBH 0
  35. #define MXS_DMA_APBX 1
  36. #define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
  37. #define APBH_VERSION_LATEST 3
  38. #define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
  39. #define HW_APBHX_CTRL0 0x000
  40. #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
  41. #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
  42. #define BP_APBH_CTRL0_RESET_CHANNEL 16
  43. #define HW_APBHX_CTRL1 0x010
  44. #define HW_APBHX_CTRL2 0x020
  45. #define HW_APBHX_CHANNEL_CTRL 0x030
  46. #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
  47. #define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
  48. #define HW_APBX_VERSION 0x800
  49. #define BP_APBHX_VERSION_MAJOR 24
  50. #define HW_APBHX_CHn_NXTCMDAR(n) \
  51. (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
  52. #define HW_APBHX_CHn_SEMA(n) \
  53. (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
  54. /*
  55. * ccw bits definitions
  56. *
  57. * COMMAND: 0..1 (2)
  58. * CHAIN: 2 (1)
  59. * IRQ: 3 (1)
  60. * NAND_LOCK: 4 (1) - not implemented
  61. * NAND_WAIT4READY: 5 (1) - not implemented
  62. * DEC_SEM: 6 (1)
  63. * WAIT4END: 7 (1)
  64. * HALT_ON_TERMINATE: 8 (1)
  65. * TERMINATE_FLUSH: 9 (1)
  66. * RESERVED: 10..11 (2)
  67. * PIO_NUM: 12..15 (4)
  68. */
  69. #define BP_CCW_COMMAND 0
  70. #define BM_CCW_COMMAND (3 << 0)
  71. #define CCW_CHAIN (1 << 2)
  72. #define CCW_IRQ (1 << 3)
  73. #define CCW_DEC_SEM (1 << 6)
  74. #define CCW_WAIT4END (1 << 7)
  75. #define CCW_HALT_ON_TERM (1 << 8)
  76. #define CCW_TERM_FLUSH (1 << 9)
  77. #define BP_CCW_PIO_NUM 12
  78. #define BM_CCW_PIO_NUM (0xf << 12)
  79. #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
  80. #define MXS_DMA_CMD_NO_XFER 0
  81. #define MXS_DMA_CMD_WRITE 1
  82. #define MXS_DMA_CMD_READ 2
  83. #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
  84. struct mxs_dma_ccw {
  85. u32 next;
  86. u16 bits;
  87. u16 xfer_bytes;
  88. #define MAX_XFER_BYTES 0xff00
  89. u32 bufaddr;
  90. #define MXS_PIO_WORDS 16
  91. u32 pio_words[MXS_PIO_WORDS];
  92. };
  93. #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
  94. struct mxs_dma_chan {
  95. struct mxs_dma_engine *mxs_dma;
  96. struct dma_chan chan;
  97. struct dma_async_tx_descriptor desc;
  98. struct tasklet_struct tasklet;
  99. int chan_irq;
  100. struct mxs_dma_ccw *ccw;
  101. dma_addr_t ccw_phys;
  102. int desc_count;
  103. enum dma_status status;
  104. unsigned int flags;
  105. #define MXS_DMA_SG_LOOP (1 << 0)
  106. };
  107. #define MXS_DMA_CHANNELS 16
  108. #define MXS_DMA_CHANNELS_MASK 0xffff
  109. struct mxs_dma_engine {
  110. int dev_id;
  111. unsigned int version;
  112. void __iomem *base;
  113. struct clk *clk;
  114. struct dma_device dma_device;
  115. struct device_dma_parameters dma_parms;
  116. struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
  117. };
  118. static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
  119. {
  120. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  121. int chan_id = mxs_chan->chan.chan_id;
  122. if (dma_is_apbh() && apbh_is_old())
  123. writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
  124. mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
  125. else
  126. writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
  127. mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
  128. }
  129. static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
  130. {
  131. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  132. int chan_id = mxs_chan->chan.chan_id;
  133. /* set cmd_addr up */
  134. writel(mxs_chan->ccw_phys,
  135. mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
  136. /* write 1 to SEMA to kick off the channel */
  137. writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
  138. }
  139. static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
  140. {
  141. mxs_chan->status = DMA_SUCCESS;
  142. }
  143. static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
  144. {
  145. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  146. int chan_id = mxs_chan->chan.chan_id;
  147. /* freeze the channel */
  148. if (dma_is_apbh() && apbh_is_old())
  149. writel(1 << chan_id,
  150. mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
  151. else
  152. writel(1 << chan_id,
  153. mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
  154. mxs_chan->status = DMA_PAUSED;
  155. }
  156. static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
  157. {
  158. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  159. int chan_id = mxs_chan->chan.chan_id;
  160. /* unfreeze the channel */
  161. if (dma_is_apbh() && apbh_is_old())
  162. writel(1 << chan_id,
  163. mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
  164. else
  165. writel(1 << chan_id,
  166. mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
  167. mxs_chan->status = DMA_IN_PROGRESS;
  168. }
  169. static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
  170. {
  171. return container_of(chan, struct mxs_dma_chan, chan);
  172. }
  173. static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  174. {
  175. return dma_cookie_assign(tx);
  176. }
  177. static void mxs_dma_tasklet(unsigned long data)
  178. {
  179. struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
  180. if (mxs_chan->desc.callback)
  181. mxs_chan->desc.callback(mxs_chan->desc.callback_param);
  182. }
  183. static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
  184. {
  185. struct mxs_dma_engine *mxs_dma = dev_id;
  186. u32 stat1, stat2;
  187. /* completion status */
  188. stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
  189. stat1 &= MXS_DMA_CHANNELS_MASK;
  190. writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
  191. /* error status */
  192. stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
  193. writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
  194. /*
  195. * When both completion and error of termination bits set at the
  196. * same time, we do not take it as an error. IOW, it only becomes
  197. * an error we need to handle here in case of either it's (1) a bus
  198. * error or (2) a termination error with no completion.
  199. */
  200. stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
  201. (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
  202. /* combine error and completion status for checking */
  203. stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
  204. while (stat1) {
  205. int channel = fls(stat1) - 1;
  206. struct mxs_dma_chan *mxs_chan =
  207. &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
  208. if (channel >= MXS_DMA_CHANNELS) {
  209. dev_dbg(mxs_dma->dma_device.dev,
  210. "%s: error in channel %d\n", __func__,
  211. channel - MXS_DMA_CHANNELS);
  212. mxs_chan->status = DMA_ERROR;
  213. mxs_dma_reset_chan(mxs_chan);
  214. } else {
  215. if (mxs_chan->flags & MXS_DMA_SG_LOOP)
  216. mxs_chan->status = DMA_IN_PROGRESS;
  217. else
  218. mxs_chan->status = DMA_SUCCESS;
  219. }
  220. stat1 &= ~(1 << channel);
  221. if (mxs_chan->status == DMA_SUCCESS)
  222. dma_cookie_complete(&mxs_chan->desc);
  223. /* schedule tasklet on this channel */
  224. tasklet_schedule(&mxs_chan->tasklet);
  225. }
  226. return IRQ_HANDLED;
  227. }
  228. static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
  229. {
  230. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  231. struct mxs_dma_data *data = chan->private;
  232. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  233. int ret;
  234. if (!data)
  235. return -EINVAL;
  236. mxs_chan->chan_irq = data->chan_irq;
  237. mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
  238. &mxs_chan->ccw_phys, GFP_KERNEL);
  239. if (!mxs_chan->ccw) {
  240. ret = -ENOMEM;
  241. goto err_alloc;
  242. }
  243. memset(mxs_chan->ccw, 0, PAGE_SIZE);
  244. if (mxs_chan->chan_irq != NO_IRQ) {
  245. ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
  246. 0, "mxs-dma", mxs_dma);
  247. if (ret)
  248. goto err_irq;
  249. }
  250. ret = clk_prepare_enable(mxs_dma->clk);
  251. if (ret)
  252. goto err_clk;
  253. mxs_dma_reset_chan(mxs_chan);
  254. dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
  255. mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
  256. /* the descriptor is ready */
  257. async_tx_ack(&mxs_chan->desc);
  258. return 0;
  259. err_clk:
  260. free_irq(mxs_chan->chan_irq, mxs_dma);
  261. err_irq:
  262. dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
  263. mxs_chan->ccw, mxs_chan->ccw_phys);
  264. err_alloc:
  265. return ret;
  266. }
  267. static void mxs_dma_free_chan_resources(struct dma_chan *chan)
  268. {
  269. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  270. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  271. mxs_dma_disable_chan(mxs_chan);
  272. free_irq(mxs_chan->chan_irq, mxs_dma);
  273. dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
  274. mxs_chan->ccw, mxs_chan->ccw_phys);
  275. clk_disable_unprepare(mxs_dma->clk);
  276. }
  277. /*
  278. * How to use the flags for ->device_prep_slave_sg() :
  279. * [1] If there is only one DMA command in the DMA chain, the code should be:
  280. * ......
  281. * ->device_prep_slave_sg(DMA_CTRL_ACK);
  282. * ......
  283. * [2] If there are two DMA commands in the DMA chain, the code should be
  284. * ......
  285. * ->device_prep_slave_sg(0);
  286. * ......
  287. * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  288. * ......
  289. * [3] If there are more than two DMA commands in the DMA chain, the code
  290. * should be:
  291. * ......
  292. * ->device_prep_slave_sg(0); // First
  293. * ......
  294. * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
  295. * ......
  296. * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
  297. * ......
  298. */
  299. static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
  300. struct dma_chan *chan, struct scatterlist *sgl,
  301. unsigned int sg_len, enum dma_transfer_direction direction,
  302. unsigned long flags, void *context)
  303. {
  304. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  305. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  306. struct mxs_dma_ccw *ccw;
  307. struct scatterlist *sg;
  308. int i, j;
  309. u32 *pio;
  310. bool append = flags & DMA_PREP_INTERRUPT;
  311. int idx = append ? mxs_chan->desc_count : 0;
  312. if (mxs_chan->status == DMA_IN_PROGRESS && !append)
  313. return NULL;
  314. if (sg_len + (append ? idx : 0) > NUM_CCW) {
  315. dev_err(mxs_dma->dma_device.dev,
  316. "maximum number of sg exceeded: %d > %d\n",
  317. sg_len, NUM_CCW);
  318. goto err_out;
  319. }
  320. mxs_chan->status = DMA_IN_PROGRESS;
  321. mxs_chan->flags = 0;
  322. /*
  323. * If the sg is prepared with append flag set, the sg
  324. * will be appended to the last prepared sg.
  325. */
  326. if (append) {
  327. BUG_ON(idx < 1);
  328. ccw = &mxs_chan->ccw[idx - 1];
  329. ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
  330. ccw->bits |= CCW_CHAIN;
  331. ccw->bits &= ~CCW_IRQ;
  332. ccw->bits &= ~CCW_DEC_SEM;
  333. } else {
  334. idx = 0;
  335. }
  336. if (direction == DMA_TRANS_NONE) {
  337. ccw = &mxs_chan->ccw[idx++];
  338. pio = (u32 *) sgl;
  339. for (j = 0; j < sg_len;)
  340. ccw->pio_words[j++] = *pio++;
  341. ccw->bits = 0;
  342. ccw->bits |= CCW_IRQ;
  343. ccw->bits |= CCW_DEC_SEM;
  344. if (flags & DMA_CTRL_ACK)
  345. ccw->bits |= CCW_WAIT4END;
  346. ccw->bits |= CCW_HALT_ON_TERM;
  347. ccw->bits |= CCW_TERM_FLUSH;
  348. ccw->bits |= BF_CCW(sg_len, PIO_NUM);
  349. ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
  350. } else {
  351. for_each_sg(sgl, sg, sg_len, i) {
  352. if (sg->length > MAX_XFER_BYTES) {
  353. dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
  354. sg->length, MAX_XFER_BYTES);
  355. goto err_out;
  356. }
  357. ccw = &mxs_chan->ccw[idx++];
  358. ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
  359. ccw->bufaddr = sg->dma_address;
  360. ccw->xfer_bytes = sg->length;
  361. ccw->bits = 0;
  362. ccw->bits |= CCW_CHAIN;
  363. ccw->bits |= CCW_HALT_ON_TERM;
  364. ccw->bits |= CCW_TERM_FLUSH;
  365. ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
  366. MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
  367. COMMAND);
  368. if (i + 1 == sg_len) {
  369. ccw->bits &= ~CCW_CHAIN;
  370. ccw->bits |= CCW_IRQ;
  371. ccw->bits |= CCW_DEC_SEM;
  372. if (flags & DMA_CTRL_ACK)
  373. ccw->bits |= CCW_WAIT4END;
  374. }
  375. }
  376. }
  377. mxs_chan->desc_count = idx;
  378. return &mxs_chan->desc;
  379. err_out:
  380. mxs_chan->status = DMA_ERROR;
  381. return NULL;
  382. }
  383. static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
  384. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  385. size_t period_len, enum dma_transfer_direction direction,
  386. void *context)
  387. {
  388. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  389. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  390. int num_periods = buf_len / period_len;
  391. int i = 0, buf = 0;
  392. if (mxs_chan->status == DMA_IN_PROGRESS)
  393. return NULL;
  394. mxs_chan->status = DMA_IN_PROGRESS;
  395. mxs_chan->flags |= MXS_DMA_SG_LOOP;
  396. if (num_periods > NUM_CCW) {
  397. dev_err(mxs_dma->dma_device.dev,
  398. "maximum number of sg exceeded: %d > %d\n",
  399. num_periods, NUM_CCW);
  400. goto err_out;
  401. }
  402. if (period_len > MAX_XFER_BYTES) {
  403. dev_err(mxs_dma->dma_device.dev,
  404. "maximum period size exceeded: %d > %d\n",
  405. period_len, MAX_XFER_BYTES);
  406. goto err_out;
  407. }
  408. while (buf < buf_len) {
  409. struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
  410. if (i + 1 == num_periods)
  411. ccw->next = mxs_chan->ccw_phys;
  412. else
  413. ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
  414. ccw->bufaddr = dma_addr;
  415. ccw->xfer_bytes = period_len;
  416. ccw->bits = 0;
  417. ccw->bits |= CCW_CHAIN;
  418. ccw->bits |= CCW_IRQ;
  419. ccw->bits |= CCW_HALT_ON_TERM;
  420. ccw->bits |= CCW_TERM_FLUSH;
  421. ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
  422. MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
  423. dma_addr += period_len;
  424. buf += period_len;
  425. i++;
  426. }
  427. mxs_chan->desc_count = i;
  428. return &mxs_chan->desc;
  429. err_out:
  430. mxs_chan->status = DMA_ERROR;
  431. return NULL;
  432. }
  433. static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  434. unsigned long arg)
  435. {
  436. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  437. int ret = 0;
  438. switch (cmd) {
  439. case DMA_TERMINATE_ALL:
  440. mxs_dma_reset_chan(mxs_chan);
  441. mxs_dma_disable_chan(mxs_chan);
  442. break;
  443. case DMA_PAUSE:
  444. mxs_dma_pause_chan(mxs_chan);
  445. break;
  446. case DMA_RESUME:
  447. mxs_dma_resume_chan(mxs_chan);
  448. break;
  449. default:
  450. ret = -ENOSYS;
  451. }
  452. return ret;
  453. }
  454. static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
  455. dma_cookie_t cookie, struct dma_tx_state *txstate)
  456. {
  457. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  458. dma_cookie_t last_used;
  459. last_used = chan->cookie;
  460. dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
  461. return mxs_chan->status;
  462. }
  463. static void mxs_dma_issue_pending(struct dma_chan *chan)
  464. {
  465. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  466. mxs_dma_enable_chan(mxs_chan);
  467. }
  468. static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
  469. {
  470. int ret;
  471. ret = clk_prepare_enable(mxs_dma->clk);
  472. if (ret)
  473. return ret;
  474. ret = mxs_reset_block(mxs_dma->base);
  475. if (ret)
  476. goto err_out;
  477. /* only major version matters */
  478. mxs_dma->version = readl(mxs_dma->base +
  479. ((mxs_dma->dev_id == MXS_DMA_APBX) ?
  480. HW_APBX_VERSION : HW_APBH_VERSION)) >>
  481. BP_APBHX_VERSION_MAJOR;
  482. /* enable apbh burst */
  483. if (dma_is_apbh()) {
  484. writel(BM_APBH_CTRL0_APB_BURST_EN,
  485. mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
  486. writel(BM_APBH_CTRL0_APB_BURST8_EN,
  487. mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
  488. }
  489. /* enable irq for all the channels */
  490. writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
  491. mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
  492. err_out:
  493. clk_disable_unprepare(mxs_dma->clk);
  494. return ret;
  495. }
  496. static int __init mxs_dma_probe(struct platform_device *pdev)
  497. {
  498. const struct platform_device_id *id_entry =
  499. platform_get_device_id(pdev);
  500. struct mxs_dma_engine *mxs_dma;
  501. struct resource *iores;
  502. int ret, i;
  503. mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL);
  504. if (!mxs_dma)
  505. return -ENOMEM;
  506. mxs_dma->dev_id = id_entry->driver_data;
  507. iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  508. if (!request_mem_region(iores->start, resource_size(iores),
  509. pdev->name)) {
  510. ret = -EBUSY;
  511. goto err_request_region;
  512. }
  513. mxs_dma->base = ioremap(iores->start, resource_size(iores));
  514. if (!mxs_dma->base) {
  515. ret = -ENOMEM;
  516. goto err_ioremap;
  517. }
  518. mxs_dma->clk = clk_get(&pdev->dev, NULL);
  519. if (IS_ERR(mxs_dma->clk)) {
  520. ret = PTR_ERR(mxs_dma->clk);
  521. goto err_clk;
  522. }
  523. dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
  524. dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
  525. INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
  526. /* Initialize channel parameters */
  527. for (i = 0; i < MXS_DMA_CHANNELS; i++) {
  528. struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
  529. mxs_chan->mxs_dma = mxs_dma;
  530. mxs_chan->chan.device = &mxs_dma->dma_device;
  531. dma_cookie_init(&mxs_chan->chan);
  532. tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
  533. (unsigned long) mxs_chan);
  534. /* Add the channel to mxs_chan list */
  535. list_add_tail(&mxs_chan->chan.device_node,
  536. &mxs_dma->dma_device.channels);
  537. }
  538. ret = mxs_dma_init(mxs_dma);
  539. if (ret)
  540. goto err_init;
  541. mxs_dma->dma_device.dev = &pdev->dev;
  542. /* mxs_dma gets 65535 bytes maximum sg size */
  543. mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
  544. dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
  545. mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
  546. mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
  547. mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
  548. mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
  549. mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
  550. mxs_dma->dma_device.device_control = mxs_dma_control;
  551. mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
  552. ret = dma_async_device_register(&mxs_dma->dma_device);
  553. if (ret) {
  554. dev_err(mxs_dma->dma_device.dev, "unable to register\n");
  555. goto err_init;
  556. }
  557. dev_info(mxs_dma->dma_device.dev, "initialized\n");
  558. return 0;
  559. err_init:
  560. clk_put(mxs_dma->clk);
  561. err_clk:
  562. iounmap(mxs_dma->base);
  563. err_ioremap:
  564. release_mem_region(iores->start, resource_size(iores));
  565. err_request_region:
  566. kfree(mxs_dma);
  567. return ret;
  568. }
  569. static struct platform_device_id mxs_dma_type[] = {
  570. {
  571. .name = "mxs-dma-apbh",
  572. .driver_data = MXS_DMA_APBH,
  573. }, {
  574. .name = "mxs-dma-apbx",
  575. .driver_data = MXS_DMA_APBX,
  576. }, {
  577. /* end of list */
  578. }
  579. };
  580. static struct platform_driver mxs_dma_driver = {
  581. .driver = {
  582. .name = "mxs-dma",
  583. },
  584. .id_table = mxs_dma_type,
  585. };
  586. static int __init mxs_dma_module_init(void)
  587. {
  588. return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
  589. }
  590. subsys_initcall(mxs_dma_module_init);