altera-msgdma.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. /*
  2. * DMA driver for Altera mSGDMA IP core
  3. *
  4. * Copyright (C) 2017 Stefan Roese <sr@denx.de>
  5. *
  6. * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
  7. * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
  8. *
  9. * This program is free software: you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation, either version 2 of the License, or
  12. * (at your option) any later version.
  13. */
  14. #include <linux/bitops.h>
  15. #include <linux/delay.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dmapool.h>
  18. #include <linux/init.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/io.h>
  21. #include <linux/iopoll.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/slab.h>
  25. #include "dmaengine.h"
  26. #define MSGDMA_MAX_TRANS_LEN U32_MAX
  27. #define MSGDMA_DESC_NUM 1024
  28. /**
  29. * struct msgdma_extended_desc - implements an extended descriptor
  30. * @read_addr_lo: data buffer source address low bits
  31. * @write_addr_lo: data buffer destination address low bits
  32. * @len: the number of bytes to transfer per descriptor
  33. * @burst_seq_num: bit 31:24 write burst
  34. * bit 23:16 read burst
  35. * bit 15:00 sequence number
  36. * @stride: bit 31:16 write stride
  37. * bit 15:00 read stride
  38. * @read_addr_hi: data buffer source address high bits
  39. * @write_addr_hi: data buffer destination address high bits
  40. * @control: characteristics of the transfer
  41. */
  42. struct msgdma_extended_desc {
  43. u32 read_addr_lo;
  44. u32 write_addr_lo;
  45. u32 len;
  46. u32 burst_seq_num;
  47. u32 stride;
  48. u32 read_addr_hi;
  49. u32 write_addr_hi;
  50. u32 control;
  51. };
  52. /* mSGDMA descriptor control field bit definitions */
  53. #define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
  54. #define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
  55. #define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
  56. #define MSGDMA_DESC_CTL_PARK_READS BIT(10)
  57. #define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
  58. #define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
  59. #define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
  60. #define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
  61. #define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
  62. #define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16)
  63. #define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
  64. /*
  65. * Writing "1" the "go" bit commits the entire descriptor into the
  66. * descriptor FIFO(s)
  67. */
  68. #define MSGDMA_DESC_CTL_GO BIT(31)
  69. /* Tx buffer control flags */
  70. #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
  71. MSGDMA_DESC_CTL_TR_ERR_IRQ | \
  72. MSGDMA_DESC_CTL_GO)
  73. #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
  74. MSGDMA_DESC_CTL_GO)
  75. #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
  76. MSGDMA_DESC_CTL_TR_COMP_IRQ | \
  77. MSGDMA_DESC_CTL_TR_ERR_IRQ | \
  78. MSGDMA_DESC_CTL_GO)
  79. #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
  80. MSGDMA_DESC_CTL_GEN_EOP | \
  81. MSGDMA_DESC_CTL_TR_COMP_IRQ | \
  82. MSGDMA_DESC_CTL_TR_ERR_IRQ | \
  83. MSGDMA_DESC_CTL_GO)
  84. #define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
  85. MSGDMA_DESC_CTL_END_ON_LEN | \
  86. MSGDMA_DESC_CTL_TR_COMP_IRQ | \
  87. MSGDMA_DESC_CTL_EARLY_IRQ | \
  88. MSGDMA_DESC_CTL_TR_ERR_IRQ | \
  89. MSGDMA_DESC_CTL_GO)
  90. /* mSGDMA extended descriptor stride definitions */
  91. #define MSGDMA_DESC_STRIDE_RD 0x00000001
  92. #define MSGDMA_DESC_STRIDE_WR 0x00010000
  93. #define MSGDMA_DESC_STRIDE_RW 0x00010001
  94. /* mSGDMA dispatcher control and status register map */
  95. #define MSGDMA_CSR_STATUS 0x00 /* Read / Clear */
  96. #define MSGDMA_CSR_CONTROL 0x04 /* Read / Write */
  97. #define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */
  98. /* 15:00 - read fill level */
  99. #define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c /* response FIFO fill level */
  100. #define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */
  101. /* 15:00 - read seq number */
  102. /* mSGDMA CSR status register bit definitions */
  103. #define MSGDMA_CSR_STAT_BUSY BIT(0)
  104. #define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
  105. #define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
  106. #define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
  107. #define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
  108. #define MSGDMA_CSR_STAT_STOPPED BIT(5)
  109. #define MSGDMA_CSR_STAT_RESETTING BIT(6)
  110. #define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
  111. #define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
  112. #define MSGDMA_CSR_STAT_IRQ BIT(9)
  113. #define MSGDMA_CSR_STAT_MASK GENMASK(9, 0)
  114. #define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0)
  115. #define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
  116. MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
  117. /* mSGDMA CSR control register bit definitions */
  118. #define MSGDMA_CSR_CTL_STOP BIT(0)
  119. #define MSGDMA_CSR_CTL_RESET BIT(1)
  120. #define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
  121. #define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
  122. #define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
  123. #define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
  124. /* mSGDMA CSR fill level bits */
  125. #define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
  126. #define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
  127. #define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
  128. #define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16)
  129. /* mSGDMA response register map */
  130. #define MSGDMA_RESP_BYTES_TRANSFERRED 0x00
  131. #define MSGDMA_RESP_STATUS 0x04
  132. /* mSGDMA response register bit definitions */
  133. #define MSGDMA_RESP_EARLY_TERM BIT(8)
  134. #define MSGDMA_RESP_ERR_MASK 0xff
  135. /**
  136. * struct msgdma_sw_desc - implements a sw descriptor
  137. * @async_tx: support for the async_tx api
  138. * @hw_desc: assosiated HW descriptor
  139. * @free_list: node of the free SW descriprots list
  140. */
  141. struct msgdma_sw_desc {
  142. struct dma_async_tx_descriptor async_tx;
  143. struct msgdma_extended_desc hw_desc;
  144. struct list_head node;
  145. struct list_head tx_list;
  146. };
  147. /**
  148. * struct msgdma_device - DMA device structure
  149. */
  150. struct msgdma_device {
  151. spinlock_t lock;
  152. struct device *dev;
  153. struct tasklet_struct irq_tasklet;
  154. struct list_head pending_list;
  155. struct list_head free_list;
  156. struct list_head active_list;
  157. struct list_head done_list;
  158. u32 desc_free_cnt;
  159. bool idle;
  160. struct dma_device dmadev;
  161. struct dma_chan dmachan;
  162. dma_addr_t hw_desq;
  163. struct msgdma_sw_desc *sw_desq;
  164. unsigned int npendings;
  165. struct dma_slave_config slave_cfg;
  166. int irq;
  167. /* mSGDMA controller */
  168. void __iomem *csr;
  169. /* mSGDMA descriptors */
  170. void __iomem *desc;
  171. /* mSGDMA response */
  172. void __iomem *resp;
  173. };
  174. #define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan)
  175. #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
  176. /**
  177. * msgdma_get_descriptor - Get the sw descriptor from the pool
  178. * @mdev: Pointer to the Altera mSGDMA device structure
  179. *
  180. * Return: The sw descriptor
  181. */
  182. static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
  183. {
  184. struct msgdma_sw_desc *desc;
  185. unsigned long flags;
  186. spin_lock_irqsave(&mdev->lock, flags);
  187. desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
  188. list_del(&desc->node);
  189. spin_unlock_irqrestore(&mdev->lock, flags);
  190. INIT_LIST_HEAD(&desc->tx_list);
  191. return desc;
  192. }
  193. /**
  194. * msgdma_free_descriptor - Issue pending transactions
  195. * @mdev: Pointer to the Altera mSGDMA device structure
  196. * @desc: Transaction descriptor pointer
  197. */
  198. static void msgdma_free_descriptor(struct msgdma_device *mdev,
  199. struct msgdma_sw_desc *desc)
  200. {
  201. struct msgdma_sw_desc *child, *next;
  202. mdev->desc_free_cnt++;
  203. list_add_tail(&desc->node, &mdev->free_list);
  204. list_for_each_entry_safe(child, next, &desc->tx_list, node) {
  205. mdev->desc_free_cnt++;
  206. list_move_tail(&child->node, &mdev->free_list);
  207. }
  208. }
  209. /**
  210. * msgdma_free_desc_list - Free descriptors list
  211. * @mdev: Pointer to the Altera mSGDMA device structure
  212. * @list: List to parse and delete the descriptor
  213. */
  214. static void msgdma_free_desc_list(struct msgdma_device *mdev,
  215. struct list_head *list)
  216. {
  217. struct msgdma_sw_desc *desc, *next;
  218. list_for_each_entry_safe(desc, next, list, node)
  219. msgdma_free_descriptor(mdev, desc);
  220. }
  221. /**
  222. * msgdma_desc_config - Configure the descriptor
  223. * @desc: Hw descriptor pointer
  224. * @dst: Destination buffer address
  225. * @src: Source buffer address
  226. * @len: Transfer length
  227. */
  228. static void msgdma_desc_config(struct msgdma_extended_desc *desc,
  229. dma_addr_t dst, dma_addr_t src, size_t len,
  230. u32 stride)
  231. {
  232. /* Set lower 32bits of src & dst addresses in the descriptor */
  233. desc->read_addr_lo = lower_32_bits(src);
  234. desc->write_addr_lo = lower_32_bits(dst);
  235. /* Set upper 32bits of src & dst addresses in the descriptor */
  236. desc->read_addr_hi = upper_32_bits(src);
  237. desc->write_addr_hi = upper_32_bits(dst);
  238. desc->len = len;
  239. desc->stride = stride;
  240. desc->burst_seq_num = 0; /* 0 will result in max burst length */
  241. /*
  242. * Don't set interrupt on xfer end yet, this will be done later
  243. * for the "last" descriptor
  244. */
  245. desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
  246. MSGDMA_DESC_CTL_END_ON_LEN;
  247. }
  248. /**
  249. * msgdma_desc_config_eod - Mark the descriptor as end descriptor
  250. * @desc: Hw descriptor pointer
  251. */
  252. static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
  253. {
  254. desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
  255. }
  256. /**
  257. * msgdma_tx_submit - Submit DMA transaction
  258. * @tx: Async transaction descriptor pointer
  259. *
  260. * Return: cookie value
  261. */
  262. static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
  263. {
  264. struct msgdma_device *mdev = to_mdev(tx->chan);
  265. struct msgdma_sw_desc *new;
  266. dma_cookie_t cookie;
  267. unsigned long flags;
  268. new = tx_to_desc(tx);
  269. spin_lock_irqsave(&mdev->lock, flags);
  270. cookie = dma_cookie_assign(tx);
  271. list_add_tail(&new->node, &mdev->pending_list);
  272. spin_unlock_irqrestore(&mdev->lock, flags);
  273. return cookie;
  274. }
  275. /**
  276. * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
  277. * @dchan: DMA channel
  278. * @dma_dst: Destination buffer address
  279. * @dma_src: Source buffer address
  280. * @len: Transfer length
  281. * @flags: transfer ack flags
  282. *
  283. * Return: Async transaction descriptor on success and NULL on failure
  284. */
  285. static struct dma_async_tx_descriptor *
  286. msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
  287. dma_addr_t dma_src, size_t len, ulong flags)
  288. {
  289. struct msgdma_device *mdev = to_mdev(dchan);
  290. struct msgdma_sw_desc *new, *first = NULL;
  291. struct msgdma_extended_desc *desc;
  292. size_t copy;
  293. u32 desc_cnt;
  294. unsigned long irqflags;
  295. desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
  296. spin_lock_irqsave(&mdev->lock, irqflags);
  297. if (desc_cnt > mdev->desc_free_cnt) {
  298. spin_unlock_irqrestore(&mdev->lock, irqflags);
  299. dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
  300. return NULL;
  301. }
  302. mdev->desc_free_cnt -= desc_cnt;
  303. spin_unlock_irqrestore(&mdev->lock, irqflags);
  304. do {
  305. /* Allocate and populate the descriptor */
  306. new = msgdma_get_descriptor(mdev);
  307. copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
  308. desc = &new->hw_desc;
  309. msgdma_desc_config(desc, dma_dst, dma_src, copy,
  310. MSGDMA_DESC_STRIDE_RW);
  311. len -= copy;
  312. dma_src += copy;
  313. dma_dst += copy;
  314. if (!first)
  315. first = new;
  316. else
  317. list_add_tail(&new->node, &first->tx_list);
  318. } while (len);
  319. msgdma_desc_config_eod(desc);
  320. async_tx_ack(&first->async_tx);
  321. first->async_tx.flags = flags;
  322. return &first->async_tx;
  323. }
  324. /**
  325. * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
  326. *
  327. * @dchan: DMA channel
  328. * @sgl: Destination scatter list
  329. * @sg_len: Number of entries in destination scatter list
  330. * @dir: DMA transfer direction
  331. * @flags: transfer ack flags
  332. * @context: transfer context (unused)
  333. */
  334. static struct dma_async_tx_descriptor *
  335. msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
  336. unsigned int sg_len, enum dma_transfer_direction dir,
  337. unsigned long flags, void *context)
  338. {
  339. struct msgdma_device *mdev = to_mdev(dchan);
  340. struct dma_slave_config *cfg = &mdev->slave_cfg;
  341. struct msgdma_sw_desc *new, *first = NULL;
  342. void *desc = NULL;
  343. size_t len, avail;
  344. dma_addr_t dma_dst, dma_src;
  345. u32 desc_cnt = 0, i;
  346. struct scatterlist *sg;
  347. u32 stride;
  348. unsigned long irqflags;
  349. for_each_sg(sgl, sg, sg_len, i)
  350. desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
  351. spin_lock_irqsave(&mdev->lock, irqflags);
  352. if (desc_cnt > mdev->desc_free_cnt) {
  353. spin_unlock_irqrestore(&mdev->lock, irqflags);
  354. dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
  355. return NULL;
  356. }
  357. mdev->desc_free_cnt -= desc_cnt;
  358. spin_unlock_irqrestore(&mdev->lock, irqflags);
  359. avail = sg_dma_len(sgl);
  360. /* Run until we are out of scatterlist entries */
  361. while (true) {
  362. /* Allocate and populate the descriptor */
  363. new = msgdma_get_descriptor(mdev);
  364. desc = &new->hw_desc;
  365. len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
  366. if (dir == DMA_MEM_TO_DEV) {
  367. dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
  368. dma_dst = cfg->dst_addr;
  369. stride = MSGDMA_DESC_STRIDE_RD;
  370. } else {
  371. dma_src = cfg->src_addr;
  372. dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
  373. stride = MSGDMA_DESC_STRIDE_WR;
  374. }
  375. msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
  376. avail -= len;
  377. if (!first)
  378. first = new;
  379. else
  380. list_add_tail(&new->node, &first->tx_list);
  381. /* Fetch the next scatterlist entry */
  382. if (avail == 0) {
  383. if (sg_len == 0)
  384. break;
  385. sgl = sg_next(sgl);
  386. if (sgl == NULL)
  387. break;
  388. sg_len--;
  389. avail = sg_dma_len(sgl);
  390. }
  391. }
  392. msgdma_desc_config_eod(desc);
  393. first->async_tx.flags = flags;
  394. return &first->async_tx;
  395. }
  396. static int msgdma_dma_config(struct dma_chan *dchan,
  397. struct dma_slave_config *config)
  398. {
  399. struct msgdma_device *mdev = to_mdev(dchan);
  400. memcpy(&mdev->slave_cfg, config, sizeof(*config));
  401. return 0;
  402. }
  403. static void msgdma_reset(struct msgdma_device *mdev)
  404. {
  405. u32 val;
  406. int ret;
  407. /* Reset mSGDMA */
  408. iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
  409. iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
  410. ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
  411. (val & MSGDMA_CSR_STAT_RESETTING) == 0,
  412. 1, 10000);
  413. if (ret)
  414. dev_err(mdev->dev, "DMA channel did not reset\n");
  415. /* Clear all status bits */
  416. iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
  417. /* Enable the DMA controller including interrupts */
  418. iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
  419. MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
  420. mdev->idle = true;
  421. };
  422. static void msgdma_copy_one(struct msgdma_device *mdev,
  423. struct msgdma_sw_desc *desc)
  424. {
  425. void __iomem *hw_desc = mdev->desc;
  426. /*
  427. * Check if the DESC FIFO it not full. If its full, we need to wait
  428. * for at least one entry to become free again
  429. */
  430. while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
  431. MSGDMA_CSR_STAT_DESC_BUF_FULL)
  432. mdelay(1);
  433. /*
  434. * The descriptor needs to get copied into the descriptor FIFO
  435. * of the DMA controller. The descriptor will get flushed to the
  436. * FIFO, once the last word (control word) is written. Since we
  437. * are not 100% sure that memcpy() writes all word in the "correct"
  438. * oder (address from low to high) on all architectures, we make
  439. * sure this control word is written last by single coding it and
  440. * adding some write-barriers here.
  441. */
  442. memcpy((void __force *)hw_desc, &desc->hw_desc,
  443. sizeof(desc->hw_desc) - sizeof(u32));
  444. /* Write control word last to flush this descriptor into the FIFO */
  445. mdev->idle = false;
  446. wmb();
  447. iowrite32(desc->hw_desc.control, hw_desc +
  448. offsetof(struct msgdma_extended_desc, control));
  449. wmb();
  450. }
  451. /**
  452. * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
  453. * @mdev: Pointer to the Altera mSGDMA device structure
  454. * @desc: Transaction descriptor pointer
  455. */
  456. static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
  457. struct msgdma_sw_desc *desc)
  458. {
  459. struct msgdma_sw_desc *sdesc, *next;
  460. msgdma_copy_one(mdev, desc);
  461. list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
  462. msgdma_copy_one(mdev, sdesc);
  463. }
  464. /**
  465. * msgdma_start_transfer - Initiate the new transfer
  466. * @mdev: Pointer to the Altera mSGDMA device structure
  467. */
  468. static void msgdma_start_transfer(struct msgdma_device *mdev)
  469. {
  470. struct msgdma_sw_desc *desc;
  471. if (!mdev->idle)
  472. return;
  473. desc = list_first_entry_or_null(&mdev->pending_list,
  474. struct msgdma_sw_desc, node);
  475. if (!desc)
  476. return;
  477. list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
  478. msgdma_copy_desc_to_fifo(mdev, desc);
  479. }
  480. /**
  481. * msgdma_issue_pending - Issue pending transactions
  482. * @chan: DMA channel pointer
  483. */
  484. static void msgdma_issue_pending(struct dma_chan *chan)
  485. {
  486. struct msgdma_device *mdev = to_mdev(chan);
  487. unsigned long flags;
  488. spin_lock_irqsave(&mdev->lock, flags);
  489. msgdma_start_transfer(mdev);
  490. spin_unlock_irqrestore(&mdev->lock, flags);
  491. }
  492. /**
  493. * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
  494. * @mdev: Pointer to the Altera mSGDMA device structure
  495. */
  496. static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
  497. {
  498. struct msgdma_sw_desc *desc, *next;
  499. list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
  500. dma_async_tx_callback callback;
  501. void *callback_param;
  502. list_del(&desc->node);
  503. callback = desc->async_tx.callback;
  504. callback_param = desc->async_tx.callback_param;
  505. if (callback) {
  506. spin_unlock(&mdev->lock);
  507. callback(callback_param);
  508. spin_lock(&mdev->lock);
  509. }
  510. /* Run any dependencies, then free the descriptor */
  511. msgdma_free_descriptor(mdev, desc);
  512. }
  513. }
  514. /**
  515. * msgdma_complete_descriptor - Mark the active descriptor as complete
  516. * @mdev: Pointer to the Altera mSGDMA device structure
  517. */
  518. static void msgdma_complete_descriptor(struct msgdma_device *mdev)
  519. {
  520. struct msgdma_sw_desc *desc;
  521. desc = list_first_entry_or_null(&mdev->active_list,
  522. struct msgdma_sw_desc, node);
  523. if (!desc)
  524. return;
  525. list_del(&desc->node);
  526. dma_cookie_complete(&desc->async_tx);
  527. list_add_tail(&desc->node, &mdev->done_list);
  528. }
  529. /**
  530. * msgdma_free_descriptors - Free channel descriptors
  531. * @mdev: Pointer to the Altera mSGDMA device structure
  532. */
  533. static void msgdma_free_descriptors(struct msgdma_device *mdev)
  534. {
  535. msgdma_free_desc_list(mdev, &mdev->active_list);
  536. msgdma_free_desc_list(mdev, &mdev->pending_list);
  537. msgdma_free_desc_list(mdev, &mdev->done_list);
  538. }
  539. /**
  540. * msgdma_free_chan_resources - Free channel resources
  541. * @dchan: DMA channel pointer
  542. */
  543. static void msgdma_free_chan_resources(struct dma_chan *dchan)
  544. {
  545. struct msgdma_device *mdev = to_mdev(dchan);
  546. unsigned long flags;
  547. spin_lock_irqsave(&mdev->lock, flags);
  548. msgdma_free_descriptors(mdev);
  549. spin_unlock_irqrestore(&mdev->lock, flags);
  550. kfree(mdev->sw_desq);
  551. }
  552. /**
  553. * msgdma_alloc_chan_resources - Allocate channel resources
  554. * @dchan: DMA channel
  555. *
  556. * Return: Number of descriptors on success and failure value on error
  557. */
  558. static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
  559. {
  560. struct msgdma_device *mdev = to_mdev(dchan);
  561. struct msgdma_sw_desc *desc;
  562. int i;
  563. mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
  564. if (!mdev->sw_desq)
  565. return -ENOMEM;
  566. mdev->idle = true;
  567. mdev->desc_free_cnt = MSGDMA_DESC_NUM;
  568. INIT_LIST_HEAD(&mdev->free_list);
  569. for (i = 0; i < MSGDMA_DESC_NUM; i++) {
  570. desc = mdev->sw_desq + i;
  571. dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
  572. desc->async_tx.tx_submit = msgdma_tx_submit;
  573. list_add_tail(&desc->node, &mdev->free_list);
  574. }
  575. return MSGDMA_DESC_NUM;
  576. }
  577. /**
  578. * msgdma_tasklet - Schedule completion tasklet
  579. * @data: Pointer to the Altera sSGDMA channel structure
  580. */
  581. static void msgdma_tasklet(unsigned long data)
  582. {
  583. struct msgdma_device *mdev = (struct msgdma_device *)data;
  584. u32 count;
  585. u32 __maybe_unused size;
  586. u32 __maybe_unused status;
  587. unsigned long flags;
  588. spin_lock_irqsave(&mdev->lock, flags);
  589. /* Read number of responses that are available */
  590. count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
  591. dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
  592. __func__, __LINE__, count);
  593. while (count--) {
  594. /*
  595. * Read both longwords to purge this response from the FIFO
  596. * On Avalon-MM implementations, size and status do not
  597. * have any real values, like transferred bytes or error
  598. * bits. So we need to just drop these values.
  599. */
  600. size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
  601. status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
  602. msgdma_complete_descriptor(mdev);
  603. msgdma_chan_desc_cleanup(mdev);
  604. }
  605. spin_unlock_irqrestore(&mdev->lock, flags);
  606. }
  607. /**
  608. * msgdma_irq_handler - Altera mSGDMA Interrupt handler
  609. * @irq: IRQ number
  610. * @data: Pointer to the Altera mSGDMA device structure
  611. *
  612. * Return: IRQ_HANDLED/IRQ_NONE
  613. */
  614. static irqreturn_t msgdma_irq_handler(int irq, void *data)
  615. {
  616. struct msgdma_device *mdev = data;
  617. u32 status;
  618. status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
  619. if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
  620. /* Start next transfer if the DMA controller is idle */
  621. spin_lock(&mdev->lock);
  622. mdev->idle = true;
  623. msgdma_start_transfer(mdev);
  624. spin_unlock(&mdev->lock);
  625. }
  626. tasklet_schedule(&mdev->irq_tasklet);
  627. /* Clear interrupt in mSGDMA controller */
  628. iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
  629. return IRQ_HANDLED;
  630. }
  631. /**
  632. * msgdma_chan_remove - Channel remove function
  633. * @mdev: Pointer to the Altera mSGDMA device structure
  634. */
  635. static void msgdma_dev_remove(struct msgdma_device *mdev)
  636. {
  637. if (!mdev)
  638. return;
  639. devm_free_irq(mdev->dev, mdev->irq, mdev);
  640. tasklet_kill(&mdev->irq_tasklet);
  641. list_del(&mdev->dmachan.device_node);
  642. }
  643. static int request_and_map(struct platform_device *pdev, const char *name,
  644. struct resource **res, void __iomem **ptr)
  645. {
  646. struct resource *region;
  647. struct device *device = &pdev->dev;
  648. *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  649. if (*res == NULL) {
  650. dev_err(device, "resource %s not defined\n", name);
  651. return -ENODEV;
  652. }
  653. region = devm_request_mem_region(device, (*res)->start,
  654. resource_size(*res), dev_name(device));
  655. if (region == NULL) {
  656. dev_err(device, "unable to request %s\n", name);
  657. return -EBUSY;
  658. }
  659. *ptr = devm_ioremap_nocache(device, region->start,
  660. resource_size(region));
  661. if (*ptr == NULL) {
  662. dev_err(device, "ioremap_nocache of %s failed!", name);
  663. return -ENOMEM;
  664. }
  665. return 0;
  666. }
  667. /**
  668. * msgdma_probe - Driver probe function
  669. * @pdev: Pointer to the platform_device structure
  670. *
  671. * Return: '0' on success and failure value on error
  672. */
  673. static int msgdma_probe(struct platform_device *pdev)
  674. {
  675. struct msgdma_device *mdev;
  676. struct dma_device *dma_dev;
  677. struct resource *dma_res;
  678. int ret;
  679. mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
  680. if (!mdev)
  681. return -ENOMEM;
  682. mdev->dev = &pdev->dev;
  683. /* Map CSR space */
  684. ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
  685. if (ret)
  686. return ret;
  687. /* Map (extended) descriptor space */
  688. ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
  689. if (ret)
  690. return ret;
  691. /* Map response space */
  692. ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
  693. if (ret)
  694. return ret;
  695. platform_set_drvdata(pdev, mdev);
  696. /* Get interrupt nr from platform data */
  697. mdev->irq = platform_get_irq(pdev, 0);
  698. if (mdev->irq < 0)
  699. return -ENXIO;
  700. ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
  701. 0, dev_name(&pdev->dev), mdev);
  702. if (ret)
  703. return ret;
  704. tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
  705. dma_cookie_init(&mdev->dmachan);
  706. spin_lock_init(&mdev->lock);
  707. INIT_LIST_HEAD(&mdev->active_list);
  708. INIT_LIST_HEAD(&mdev->pending_list);
  709. INIT_LIST_HEAD(&mdev->done_list);
  710. INIT_LIST_HEAD(&mdev->free_list);
  711. dma_dev = &mdev->dmadev;
  712. /* Set DMA capabilities */
  713. dma_cap_zero(dma_dev->cap_mask);
  714. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  715. dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
  716. dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  717. dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  718. dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
  719. BIT(DMA_MEM_TO_MEM);
  720. dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
  721. /* Init DMA link list */
  722. INIT_LIST_HEAD(&dma_dev->channels);
  723. /* Set base routines */
  724. dma_dev->device_tx_status = dma_cookie_status;
  725. dma_dev->device_issue_pending = msgdma_issue_pending;
  726. dma_dev->dev = &pdev->dev;
  727. dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
  728. dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
  729. dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
  730. dma_dev->device_config = msgdma_dma_config;
  731. dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
  732. dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
  733. mdev->dmachan.device = dma_dev;
  734. list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
  735. /* Set DMA mask to 64 bits */
  736. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  737. if (ret) {
  738. dev_warn(&pdev->dev, "unable to set coherent mask to 64");
  739. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  740. if (ret)
  741. goto fail;
  742. }
  743. msgdma_reset(mdev);
  744. ret = dma_async_device_register(dma_dev);
  745. if (ret)
  746. goto fail;
  747. dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
  748. return 0;
  749. fail:
  750. msgdma_dev_remove(mdev);
  751. return ret;
  752. }
  753. /**
  754. * msgdma_dma_remove - Driver remove function
  755. * @pdev: Pointer to the platform_device structure
  756. *
  757. * Return: Always '0'
  758. */
  759. static int msgdma_remove(struct platform_device *pdev)
  760. {
  761. struct msgdma_device *mdev = platform_get_drvdata(pdev);
  762. dma_async_device_unregister(&mdev->dmadev);
  763. msgdma_dev_remove(mdev);
  764. dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
  765. return 0;
  766. }
  767. static struct platform_driver msgdma_driver = {
  768. .driver = {
  769. .name = "altera-msgdma",
  770. },
  771. .probe = msgdma_probe,
  772. .remove = msgdma_remove,
  773. };
  774. module_platform_driver(msgdma_driver);
  775. MODULE_ALIAS("platform:altera-msgdma");
  776. MODULE_DESCRIPTION("Altera mSGDMA driver");
  777. MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
  778. MODULE_LICENSE("GPL");