tsi721_dma.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
  3. *
  4. * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
  5. * Alexandre Bounine <alexandre.bounine@idt.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 2 of the License, or (at your option)
  10. * any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * The full GNU General Public License is included in this distribution in the
  18. * file called COPYING.
  19. */
  20. #include <linux/io.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/ioport.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/rio.h>
  28. #include <linux/rio_drv.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/kfifo.h>
  32. #include <linux/sched.h>
  33. #include <linux/delay.h>
  34. #include "../../dma/dmaengine.h"
  35. #include "tsi721.h"
  36. #ifdef CONFIG_PCI_MSI
  37. static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
  38. #endif
  39. static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
  40. static unsigned int dma_desc_per_channel = 128;
  41. module_param(dma_desc_per_channel, uint, S_IRUGO);
  42. MODULE_PARM_DESC(dma_desc_per_channel,
  43. "Number of DMA descriptors per channel (default: 128)");
  44. static unsigned int dma_txqueue_sz = 16;
  45. module_param(dma_txqueue_sz, uint, S_IRUGO);
  46. MODULE_PARM_DESC(dma_txqueue_sz,
  47. "DMA Transactions Queue Size (default: 16)");
  48. static u8 dma_sel = 0x7f;
  49. module_param(dma_sel, byte, S_IRUGO);
  50. MODULE_PARM_DESC(dma_sel,
  51. "DMA Channel Selection Mask (default: 0x7f = all)");
  52. static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
  53. {
  54. return container_of(chan, struct tsi721_bdma_chan, dchan);
  55. }
  56. static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
  57. {
  58. return container_of(ddev, struct rio_mport, dma)->priv;
  59. }
  60. static inline
  61. struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
  62. {
  63. return container_of(txd, struct tsi721_tx_desc, txd);
  64. }
  65. static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
  66. {
  67. struct tsi721_dma_desc *bd_ptr;
  68. struct device *dev = bdma_chan->dchan.device->dev;
  69. u64 *sts_ptr;
  70. dma_addr_t bd_phys;
  71. dma_addr_t sts_phys;
  72. int sts_size;
  73. #ifdef CONFIG_PCI_MSI
  74. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  75. #endif
  76. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
  77. /*
  78. * Allocate space for DMA descriptors
  79. * (add an extra element for link descriptor)
  80. */
  81. bd_ptr = dma_zalloc_coherent(dev,
  82. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  83. &bd_phys, GFP_ATOMIC);
  84. if (!bd_ptr)
  85. return -ENOMEM;
  86. bdma_chan->bd_num = bd_num;
  87. bdma_chan->bd_phys = bd_phys;
  88. bdma_chan->bd_base = bd_ptr;
  89. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  90. "DMAC%d descriptors @ %p (phys = %pad)",
  91. bdma_chan->id, bd_ptr, &bd_phys);
  92. /* Allocate space for descriptor status FIFO */
  93. sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
  94. (bd_num + 1) : TSI721_DMA_MINSTSSZ;
  95. sts_size = roundup_pow_of_two(sts_size);
  96. sts_ptr = dma_zalloc_coherent(dev,
  97. sts_size * sizeof(struct tsi721_dma_sts),
  98. &sts_phys, GFP_ATOMIC);
  99. if (!sts_ptr) {
  100. /* Free space allocated for DMA descriptors */
  101. dma_free_coherent(dev,
  102. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  103. bd_ptr, bd_phys);
  104. bdma_chan->bd_base = NULL;
  105. return -ENOMEM;
  106. }
  107. bdma_chan->sts_phys = sts_phys;
  108. bdma_chan->sts_base = sts_ptr;
  109. bdma_chan->sts_size = sts_size;
  110. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  111. "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
  112. bdma_chan->id, sts_ptr, &sts_phys, sts_size);
  113. /* Initialize DMA descriptors ring using added link descriptor */
  114. bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
  115. bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
  116. TSI721_DMAC_DPTRL_MASK);
  117. bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
  118. /* Setup DMA descriptor pointers */
  119. iowrite32(((u64)bd_phys >> 32),
  120. bdma_chan->regs + TSI721_DMAC_DPTRH);
  121. iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
  122. bdma_chan->regs + TSI721_DMAC_DPTRL);
  123. /* Setup descriptor status FIFO */
  124. iowrite32(((u64)sts_phys >> 32),
  125. bdma_chan->regs + TSI721_DMAC_DSBH);
  126. iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
  127. bdma_chan->regs + TSI721_DMAC_DSBL);
  128. iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
  129. bdma_chan->regs + TSI721_DMAC_DSSZ);
  130. /* Clear interrupt bits */
  131. iowrite32(TSI721_DMAC_INT_ALL,
  132. bdma_chan->regs + TSI721_DMAC_INT);
  133. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  134. #ifdef CONFIG_PCI_MSI
  135. /* Request interrupt service if we are in MSI-X mode */
  136. if (priv->flags & TSI721_USING_MSIX) {
  137. int rc, idx;
  138. idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
  139. rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
  140. priv->msix[idx].irq_name, (void *)bdma_chan);
  141. if (rc) {
  142. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  143. "Unable to get MSI-X for DMAC%d-DONE",
  144. bdma_chan->id);
  145. goto err_out;
  146. }
  147. idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
  148. rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
  149. priv->msix[idx].irq_name, (void *)bdma_chan);
  150. if (rc) {
  151. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  152. "Unable to get MSI-X for DMAC%d-INT",
  153. bdma_chan->id);
  154. free_irq(
  155. priv->msix[TSI721_VECT_DMA0_DONE +
  156. bdma_chan->id].vector,
  157. (void *)bdma_chan);
  158. }
  159. err_out:
  160. if (rc) {
  161. /* Free space allocated for DMA descriptors */
  162. dma_free_coherent(dev,
  163. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  164. bd_ptr, bd_phys);
  165. bdma_chan->bd_base = NULL;
  166. /* Free space allocated for status descriptors */
  167. dma_free_coherent(dev,
  168. sts_size * sizeof(struct tsi721_dma_sts),
  169. sts_ptr, sts_phys);
  170. bdma_chan->sts_base = NULL;
  171. return -EIO;
  172. }
  173. }
  174. #endif /* CONFIG_PCI_MSI */
  175. /* Toggle DMA channel initialization */
  176. iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
  177. ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
  178. bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
  179. bdma_chan->sts_rdptr = 0;
  180. udelay(10);
  181. return 0;
  182. }
  183. static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
  184. {
  185. u32 ch_stat;
  186. #ifdef CONFIG_PCI_MSI
  187. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  188. #endif
  189. if (bdma_chan->bd_base == NULL)
  190. return 0;
  191. /* Check if DMA channel still running */
  192. ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  193. if (ch_stat & TSI721_DMAC_STS_RUN)
  194. return -EFAULT;
  195. /* Put DMA channel into init state */
  196. iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
  197. #ifdef CONFIG_PCI_MSI
  198. if (priv->flags & TSI721_USING_MSIX) {
  199. free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
  200. bdma_chan->id].vector, (void *)bdma_chan);
  201. free_irq(priv->msix[TSI721_VECT_DMA0_INT +
  202. bdma_chan->id].vector, (void *)bdma_chan);
  203. }
  204. #endif /* CONFIG_PCI_MSI */
  205. /* Free space allocated for DMA descriptors */
  206. dma_free_coherent(bdma_chan->dchan.device->dev,
  207. (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
  208. bdma_chan->bd_base, bdma_chan->bd_phys);
  209. bdma_chan->bd_base = NULL;
  210. /* Free space allocated for status FIFO */
  211. dma_free_coherent(bdma_chan->dchan.device->dev,
  212. bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
  213. bdma_chan->sts_base, bdma_chan->sts_phys);
  214. bdma_chan->sts_base = NULL;
  215. return 0;
  216. }
  217. static void
  218. tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
  219. {
  220. if (enable) {
  221. /* Clear pending BDMA channel interrupts */
  222. iowrite32(TSI721_DMAC_INT_ALL,
  223. bdma_chan->regs + TSI721_DMAC_INT);
  224. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  225. /* Enable BDMA channel interrupts */
  226. iowrite32(TSI721_DMAC_INT_ALL,
  227. bdma_chan->regs + TSI721_DMAC_INTE);
  228. } else {
  229. /* Disable BDMA channel interrupts */
  230. iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
  231. /* Clear pending BDMA channel interrupts */
  232. iowrite32(TSI721_DMAC_INT_ALL,
  233. bdma_chan->regs + TSI721_DMAC_INT);
  234. }
  235. }
  236. static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
  237. {
  238. u32 sts;
  239. sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  240. return ((sts & TSI721_DMAC_STS_RUN) == 0);
  241. }
  242. void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
  243. {
  244. /* Disable BDMA channel interrupts */
  245. iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
  246. if (bdma_chan->active)
  247. tasklet_hi_schedule(&bdma_chan->tasklet);
  248. }
  249. #ifdef CONFIG_PCI_MSI
  250. /**
  251. * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
  252. * @irq: Linux interrupt number
  253. * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
  254. *
  255. * Handles BDMA channel interrupts signaled using MSI-X.
  256. */
  257. static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
  258. {
  259. struct tsi721_bdma_chan *bdma_chan = ptr;
  260. if (bdma_chan->active)
  261. tasklet_hi_schedule(&bdma_chan->tasklet);
  262. return IRQ_HANDLED;
  263. }
  264. #endif /* CONFIG_PCI_MSI */
  265. /* Must be called with the spinlock held */
  266. static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
  267. {
  268. if (!tsi721_dma_is_idle(bdma_chan)) {
  269. tsi_err(&bdma_chan->dchan.dev->device,
  270. "DMAC%d Attempt to start non-idle channel",
  271. bdma_chan->id);
  272. return;
  273. }
  274. if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
  275. tsi_err(&bdma_chan->dchan.dev->device,
  276. "DMAC%d Attempt to start DMA with no BDs ready %d",
  277. bdma_chan->id, task_pid_nr(current));
  278. return;
  279. }
  280. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
  281. bdma_chan->id, bdma_chan->wr_count_next,
  282. task_pid_nr(current));
  283. iowrite32(bdma_chan->wr_count_next,
  284. bdma_chan->regs + TSI721_DMAC_DWRCNT);
  285. ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
  286. bdma_chan->wr_count = bdma_chan->wr_count_next;
  287. }
  288. static int
  289. tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
  290. struct tsi721_dma_desc *bd_ptr,
  291. struct scatterlist *sg, u32 sys_size)
  292. {
  293. u64 rio_addr;
  294. if (bd_ptr == NULL)
  295. return -EINVAL;
  296. /* Initialize DMA descriptor */
  297. bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
  298. (desc->rtype << 19) | desc->destid);
  299. bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
  300. (sys_size << 26));
  301. rio_addr = (desc->rio_addr >> 2) |
  302. ((u64)(desc->rio_addr_u & 0x3) << 62);
  303. bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
  304. bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
  305. bd_ptr->t1.bufptr_lo = cpu_to_le32(
  306. (u64)sg_dma_address(sg) & 0xffffffff);
  307. bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
  308. bd_ptr->t1.s_dist = 0;
  309. bd_ptr->t1.s_size = 0;
  310. return 0;
  311. }
  312. static int
  313. tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
  314. {
  315. if (bd_ptr == NULL)
  316. return -EINVAL;
  317. /* Update DMA descriptor */
  318. if (interrupt)
  319. bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
  320. bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
  321. return 0;
  322. }
  323. static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
  324. struct tsi721_tx_desc *desc)
  325. {
  326. struct dma_async_tx_descriptor *txd = &desc->txd;
  327. dma_async_tx_callback callback = txd->callback;
  328. void *param = txd->callback_param;
  329. list_move(&desc->desc_node, &bdma_chan->free_list);
  330. if (callback)
  331. callback(param);
  332. }
  333. static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
  334. {
  335. u32 srd_ptr;
  336. u64 *sts_ptr;
  337. int i, j;
  338. /* Check and clear descriptor status FIFO entries */
  339. srd_ptr = bdma_chan->sts_rdptr;
  340. sts_ptr = bdma_chan->sts_base;
  341. j = srd_ptr * 8;
  342. while (sts_ptr[j]) {
  343. for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
  344. sts_ptr[j] = 0;
  345. ++srd_ptr;
  346. srd_ptr %= bdma_chan->sts_size;
  347. j = srd_ptr * 8;
  348. }
  349. iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
  350. bdma_chan->sts_rdptr = srd_ptr;
  351. }
  352. /* Must be called with the channel spinlock held */
  353. static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
  354. {
  355. struct dma_chan *dchan = desc->txd.chan;
  356. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  357. u32 sys_size;
  358. u64 rio_addr;
  359. dma_addr_t next_addr;
  360. u32 bcount;
  361. struct scatterlist *sg;
  362. unsigned int i;
  363. int err = 0;
  364. struct tsi721_dma_desc *bd_ptr = NULL;
  365. u32 idx, rd_idx;
  366. u32 add_count = 0;
  367. struct device *ch_dev = &dchan->dev->device;
  368. if (!tsi721_dma_is_idle(bdma_chan)) {
  369. tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
  370. bdma_chan->id);
  371. return -EIO;
  372. }
  373. /*
  374. * Fill DMA channel's hardware buffer descriptors.
  375. * (NOTE: RapidIO destination address is limited to 64 bits for now)
  376. */
  377. rio_addr = desc->rio_addr;
  378. next_addr = -1;
  379. bcount = 0;
  380. sys_size = dma_to_mport(dchan->device)->sys_size;
  381. rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
  382. rd_idx %= (bdma_chan->bd_num + 1);
  383. idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
  384. if (idx == bdma_chan->bd_num) {
  385. /* wrap around link descriptor */
  386. idx = 0;
  387. add_count++;
  388. }
  389. tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
  390. bdma_chan->id, rd_idx, idx);
  391. for_each_sg(desc->sg, sg, desc->sg_len, i) {
  392. tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
  393. bdma_chan->id, i, desc->sg_len,
  394. (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
  395. if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
  396. tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
  397. bdma_chan->id, i);
  398. err = -EINVAL;
  399. break;
  400. }
  401. /*
  402. * If this sg entry forms contiguous block with previous one,
  403. * try to merge it into existing DMA descriptor
  404. */
  405. if (next_addr == sg_dma_address(sg) &&
  406. bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
  407. /* Adjust byte count of the descriptor */
  408. bcount += sg_dma_len(sg);
  409. goto entry_done;
  410. } else if (next_addr != -1) {
  411. /* Finalize descriptor using total byte count value */
  412. tsi721_desc_fill_end(bd_ptr, bcount, 0);
  413. tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
  414. bdma_chan->id, bcount);
  415. }
  416. desc->rio_addr = rio_addr;
  417. if (i && idx == rd_idx) {
  418. tsi_debug(DMAV, ch_dev,
  419. "DMAC%d HW descriptor ring is full @ %d",
  420. bdma_chan->id, i);
  421. desc->sg = sg;
  422. desc->sg_len -= i;
  423. break;
  424. }
  425. bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
  426. err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
  427. if (err) {
  428. tsi_err(ch_dev, "Failed to build desc: err=%d", err);
  429. break;
  430. }
  431. tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
  432. bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
  433. next_addr = sg_dma_address(sg);
  434. bcount = sg_dma_len(sg);
  435. add_count++;
  436. if (++idx == bdma_chan->bd_num) {
  437. /* wrap around link descriptor */
  438. idx = 0;
  439. add_count++;
  440. }
  441. entry_done:
  442. if (sg_is_last(sg)) {
  443. tsi721_desc_fill_end(bd_ptr, bcount, 0);
  444. tsi_debug(DMAV, ch_dev,
  445. "DMAC%d last desc final len: %d",
  446. bdma_chan->id, bcount);
  447. desc->sg_len = 0;
  448. } else {
  449. rio_addr += sg_dma_len(sg);
  450. next_addr += sg_dma_len(sg);
  451. }
  452. }
  453. if (!err)
  454. bdma_chan->wr_count_next += add_count;
  455. return err;
  456. }
  457. static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
  458. struct tsi721_tx_desc *desc)
  459. {
  460. int err;
  461. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
  462. if (!tsi721_dma_is_idle(bdma_chan))
  463. return;
  464. /*
  465. * If there is no data transfer in progress, fetch new descriptor from
  466. * the pending queue.
  467. */
  468. if (desc == NULL && bdma_chan->active_tx == NULL &&
  469. !list_empty(&bdma_chan->queue)) {
  470. desc = list_first_entry(&bdma_chan->queue,
  471. struct tsi721_tx_desc, desc_node);
  472. list_del_init((&desc->desc_node));
  473. bdma_chan->active_tx = desc;
  474. }
  475. if (desc) {
  476. err = tsi721_submit_sg(desc);
  477. if (!err)
  478. tsi721_start_dma(bdma_chan);
  479. else {
  480. tsi721_dma_tx_err(bdma_chan, desc);
  481. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  482. "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
  483. bdma_chan->id, err);
  484. }
  485. }
  486. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
  487. bdma_chan->id);
  488. }
  489. static void tsi721_dma_tasklet(unsigned long data)
  490. {
  491. struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
  492. u32 dmac_int, dmac_sts;
  493. dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  494. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
  495. bdma_chan->id, dmac_int);
  496. /* Clear channel interrupts */
  497. iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
  498. if (dmac_int & TSI721_DMAC_INT_ERR) {
  499. int i = 10000;
  500. struct tsi721_tx_desc *desc;
  501. desc = bdma_chan->active_tx;
  502. dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  503. tsi_err(&bdma_chan->dchan.dev->device,
  504. "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
  505. bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
  506. /* Re-initialize DMA channel if possible */
  507. if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
  508. goto err_out;
  509. tsi721_clr_stat(bdma_chan);
  510. spin_lock(&bdma_chan->lock);
  511. /* Put DMA channel into init state */
  512. iowrite32(TSI721_DMAC_CTL_INIT,
  513. bdma_chan->regs + TSI721_DMAC_CTL);
  514. do {
  515. udelay(1);
  516. dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  517. i--;
  518. } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
  519. if (dmac_sts & TSI721_DMAC_STS_ABORT) {
  520. tsi_err(&bdma_chan->dchan.dev->device,
  521. "Failed to re-initiate DMAC%d", bdma_chan->id);
  522. spin_unlock(&bdma_chan->lock);
  523. goto err_out;
  524. }
  525. /* Setup DMA descriptor pointers */
  526. iowrite32(((u64)bdma_chan->bd_phys >> 32),
  527. bdma_chan->regs + TSI721_DMAC_DPTRH);
  528. iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
  529. bdma_chan->regs + TSI721_DMAC_DPTRL);
  530. /* Setup descriptor status FIFO */
  531. iowrite32(((u64)bdma_chan->sts_phys >> 32),
  532. bdma_chan->regs + TSI721_DMAC_DSBH);
  533. iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
  534. bdma_chan->regs + TSI721_DMAC_DSBL);
  535. iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
  536. bdma_chan->regs + TSI721_DMAC_DSSZ);
  537. /* Clear interrupt bits */
  538. iowrite32(TSI721_DMAC_INT_ALL,
  539. bdma_chan->regs + TSI721_DMAC_INT);
  540. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  541. bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
  542. bdma_chan->sts_rdptr = 0;
  543. udelay(10);
  544. desc = bdma_chan->active_tx;
  545. desc->status = DMA_ERROR;
  546. dma_cookie_complete(&desc->txd);
  547. list_add(&desc->desc_node, &bdma_chan->free_list);
  548. bdma_chan->active_tx = NULL;
  549. if (bdma_chan->active)
  550. tsi721_advance_work(bdma_chan, NULL);
  551. spin_unlock(&bdma_chan->lock);
  552. }
  553. if (dmac_int & TSI721_DMAC_INT_STFULL) {
  554. tsi_err(&bdma_chan->dchan.dev->device,
  555. "DMAC%d descriptor status FIFO is full",
  556. bdma_chan->id);
  557. }
  558. if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
  559. struct tsi721_tx_desc *desc;
  560. tsi721_clr_stat(bdma_chan);
  561. spin_lock(&bdma_chan->lock);
  562. desc = bdma_chan->active_tx;
  563. if (desc->sg_len == 0) {
  564. dma_async_tx_callback callback = NULL;
  565. void *param = NULL;
  566. desc->status = DMA_COMPLETE;
  567. dma_cookie_complete(&desc->txd);
  568. if (desc->txd.flags & DMA_PREP_INTERRUPT) {
  569. callback = desc->txd.callback;
  570. param = desc->txd.callback_param;
  571. }
  572. list_add(&desc->desc_node, &bdma_chan->free_list);
  573. bdma_chan->active_tx = NULL;
  574. if (bdma_chan->active)
  575. tsi721_advance_work(bdma_chan, NULL);
  576. spin_unlock(&bdma_chan->lock);
  577. if (callback)
  578. callback(param);
  579. } else {
  580. if (bdma_chan->active)
  581. tsi721_advance_work(bdma_chan,
  582. bdma_chan->active_tx);
  583. spin_unlock(&bdma_chan->lock);
  584. }
  585. }
  586. err_out:
  587. /* Re-Enable BDMA channel interrupts */
  588. iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
  589. }
  590. static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
  591. {
  592. struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
  593. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
  594. dma_cookie_t cookie;
  595. /* Check if the descriptor is detached from any lists */
  596. if (!list_empty(&desc->desc_node)) {
  597. tsi_err(&bdma_chan->dchan.dev->device,
  598. "DMAC%d wrong state of descriptor %p",
  599. bdma_chan->id, txd);
  600. return -EIO;
  601. }
  602. spin_lock_bh(&bdma_chan->lock);
  603. if (!bdma_chan->active) {
  604. spin_unlock_bh(&bdma_chan->lock);
  605. return -ENODEV;
  606. }
  607. cookie = dma_cookie_assign(txd);
  608. desc->status = DMA_IN_PROGRESS;
  609. list_add_tail(&desc->desc_node, &bdma_chan->queue);
  610. tsi721_advance_work(bdma_chan, NULL);
  611. spin_unlock_bh(&bdma_chan->lock);
  612. return cookie;
  613. }
  614. static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
  615. {
  616. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  617. struct tsi721_tx_desc *desc = NULL;
  618. int i;
  619. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  620. if (bdma_chan->bd_base)
  621. return dma_txqueue_sz;
  622. /* Initialize BDMA channel */
  623. if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
  624. tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
  625. bdma_chan->id);
  626. return -ENODEV;
  627. }
  628. /* Allocate queue of transaction descriptors */
  629. desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
  630. GFP_ATOMIC);
  631. if (!desc) {
  632. tsi_err(&dchan->dev->device,
  633. "DMAC%d Failed to allocate logical descriptors",
  634. bdma_chan->id);
  635. tsi721_bdma_ch_free(bdma_chan);
  636. return -ENOMEM;
  637. }
  638. bdma_chan->tx_desc = desc;
  639. for (i = 0; i < dma_txqueue_sz; i++) {
  640. dma_async_tx_descriptor_init(&desc[i].txd, dchan);
  641. desc[i].txd.tx_submit = tsi721_tx_submit;
  642. desc[i].txd.flags = DMA_CTRL_ACK;
  643. list_add(&desc[i].desc_node, &bdma_chan->free_list);
  644. }
  645. dma_cookie_init(dchan);
  646. bdma_chan->active = true;
  647. tsi721_bdma_interrupt_enable(bdma_chan, 1);
  648. return dma_txqueue_sz;
  649. }
  650. static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
  651. {
  652. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  653. #ifdef CONFIG_PCI_MSI
  654. if (priv->flags & TSI721_USING_MSIX) {
  655. synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
  656. bdma_chan->id].vector);
  657. synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
  658. bdma_chan->id].vector);
  659. } else
  660. #endif
  661. synchronize_irq(priv->pdev->irq);
  662. }
  663. static void tsi721_free_chan_resources(struct dma_chan *dchan)
  664. {
  665. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  666. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  667. if (bdma_chan->bd_base == NULL)
  668. return;
  669. tsi721_bdma_interrupt_enable(bdma_chan, 0);
  670. bdma_chan->active = false;
  671. tsi721_sync_dma_irq(bdma_chan);
  672. tasklet_kill(&bdma_chan->tasklet);
  673. INIT_LIST_HEAD(&bdma_chan->free_list);
  674. kfree(bdma_chan->tx_desc);
  675. tsi721_bdma_ch_free(bdma_chan);
  676. }
  677. static
  678. enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
  679. struct dma_tx_state *txstate)
  680. {
  681. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  682. enum dma_status status;
  683. spin_lock_bh(&bdma_chan->lock);
  684. status = dma_cookie_status(dchan, cookie, txstate);
  685. spin_unlock_bh(&bdma_chan->lock);
  686. return status;
  687. }
  688. static void tsi721_issue_pending(struct dma_chan *dchan)
  689. {
  690. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  691. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  692. spin_lock_bh(&bdma_chan->lock);
  693. if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
  694. tsi721_advance_work(bdma_chan, NULL);
  695. }
  696. spin_unlock_bh(&bdma_chan->lock);
  697. }
  698. static
  699. struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
  700. struct scatterlist *sgl, unsigned int sg_len,
  701. enum dma_transfer_direction dir, unsigned long flags,
  702. void *tinfo)
  703. {
  704. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  705. struct tsi721_tx_desc *desc;
  706. struct rio_dma_ext *rext = tinfo;
  707. enum dma_rtype rtype;
  708. struct dma_async_tx_descriptor *txd = NULL;
  709. if (!sgl || !sg_len) {
  710. tsi_err(&dchan->dev->device, "DMAC%d No SG list",
  711. bdma_chan->id);
  712. return ERR_PTR(-EINVAL);
  713. }
  714. tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
  715. (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
  716. if (dir == DMA_DEV_TO_MEM)
  717. rtype = NREAD;
  718. else if (dir == DMA_MEM_TO_DEV) {
  719. switch (rext->wr_type) {
  720. case RDW_ALL_NWRITE:
  721. rtype = ALL_NWRITE;
  722. break;
  723. case RDW_ALL_NWRITE_R:
  724. rtype = ALL_NWRITE_R;
  725. break;
  726. case RDW_LAST_NWRITE_R:
  727. default:
  728. rtype = LAST_NWRITE_R;
  729. break;
  730. }
  731. } else {
  732. tsi_err(&dchan->dev->device,
  733. "DMAC%d Unsupported DMA direction option",
  734. bdma_chan->id);
  735. return ERR_PTR(-EINVAL);
  736. }
  737. spin_lock_bh(&bdma_chan->lock);
  738. if (!list_empty(&bdma_chan->free_list)) {
  739. desc = list_first_entry(&bdma_chan->free_list,
  740. struct tsi721_tx_desc, desc_node);
  741. list_del_init(&desc->desc_node);
  742. desc->destid = rext->destid;
  743. desc->rio_addr = rext->rio_addr;
  744. desc->rio_addr_u = 0;
  745. desc->rtype = rtype;
  746. desc->sg_len = sg_len;
  747. desc->sg = sgl;
  748. txd = &desc->txd;
  749. txd->flags = flags;
  750. }
  751. spin_unlock_bh(&bdma_chan->lock);
  752. if (!txd) {
  753. tsi_debug(DMA, &dchan->dev->device,
  754. "DMAC%d free TXD is not available", bdma_chan->id);
  755. return ERR_PTR(-EBUSY);
  756. }
  757. return txd;
  758. }
  759. static int tsi721_terminate_all(struct dma_chan *dchan)
  760. {
  761. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  762. struct tsi721_tx_desc *desc, *_d;
  763. LIST_HEAD(list);
  764. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  765. spin_lock_bh(&bdma_chan->lock);
  766. bdma_chan->active = false;
  767. while (!tsi721_dma_is_idle(bdma_chan)) {
  768. udelay(5);
  769. #if (0)
  770. /* make sure to stop the transfer */
  771. iowrite32(TSI721_DMAC_CTL_SUSP,
  772. bdma_chan->regs + TSI721_DMAC_CTL);
  773. /* Wait until DMA channel stops */
  774. do {
  775. dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  776. } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
  777. #endif
  778. }
  779. if (bdma_chan->active_tx)
  780. list_add(&bdma_chan->active_tx->desc_node, &list);
  781. list_splice_init(&bdma_chan->queue, &list);
  782. list_for_each_entry_safe(desc, _d, &list, desc_node)
  783. tsi721_dma_tx_err(bdma_chan, desc);
  784. spin_unlock_bh(&bdma_chan->lock);
  785. return 0;
  786. }
  787. static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
  788. {
  789. if (!bdma_chan->active)
  790. return;
  791. spin_lock_bh(&bdma_chan->lock);
  792. if (!tsi721_dma_is_idle(bdma_chan)) {
  793. int timeout = 100000;
  794. /* stop the transfer in progress */
  795. iowrite32(TSI721_DMAC_CTL_SUSP,
  796. bdma_chan->regs + TSI721_DMAC_CTL);
  797. /* Wait until DMA channel stops */
  798. while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
  799. udelay(1);
  800. }
  801. spin_unlock_bh(&bdma_chan->lock);
  802. }
  803. void tsi721_dma_stop_all(struct tsi721_device *priv)
  804. {
  805. int i;
  806. for (i = 0; i < TSI721_DMA_MAXCH; i++) {
  807. if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
  808. tsi721_dma_stop(&priv->bdma[i]);
  809. }
  810. }
  811. int tsi721_register_dma(struct tsi721_device *priv)
  812. {
  813. int i;
  814. int nr_channels = 0;
  815. int err;
  816. struct rio_mport *mport = &priv->mport;
  817. INIT_LIST_HEAD(&mport->dma.channels);
  818. for (i = 0; i < TSI721_DMA_MAXCH; i++) {
  819. struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
  820. if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
  821. continue;
  822. bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
  823. bdma_chan->dchan.device = &mport->dma;
  824. bdma_chan->dchan.cookie = 1;
  825. bdma_chan->dchan.chan_id = i;
  826. bdma_chan->id = i;
  827. bdma_chan->active = false;
  828. spin_lock_init(&bdma_chan->lock);
  829. bdma_chan->active_tx = NULL;
  830. INIT_LIST_HEAD(&bdma_chan->queue);
  831. INIT_LIST_HEAD(&bdma_chan->free_list);
  832. tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
  833. (unsigned long)bdma_chan);
  834. list_add_tail(&bdma_chan->dchan.device_node,
  835. &mport->dma.channels);
  836. nr_channels++;
  837. }
  838. mport->dma.chancnt = nr_channels;
  839. dma_cap_zero(mport->dma.cap_mask);
  840. dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
  841. dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
  842. mport->dma.dev = &priv->pdev->dev;
  843. mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
  844. mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
  845. mport->dma.device_tx_status = tsi721_tx_status;
  846. mport->dma.device_issue_pending = tsi721_issue_pending;
  847. mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
  848. mport->dma.device_terminate_all = tsi721_terminate_all;
  849. err = dma_async_device_register(&mport->dma);
  850. if (err)
  851. tsi_err(&priv->pdev->dev, "Failed to register DMA device");
  852. return err;
  853. }
  854. void tsi721_unregister_dma(struct tsi721_device *priv)
  855. {
  856. struct rio_mport *mport = &priv->mport;
  857. struct dma_chan *chan, *_c;
  858. struct tsi721_bdma_chan *bdma_chan;
  859. tsi721_dma_stop_all(priv);
  860. dma_async_device_unregister(&mport->dma);
  861. list_for_each_entry_safe(chan, _c, &mport->dma.channels,
  862. device_node) {
  863. bdma_chan = to_tsi721_chan(chan);
  864. if (bdma_chan->active) {
  865. tsi721_bdma_interrupt_enable(bdma_chan, 0);
  866. bdma_chan->active = false;
  867. tsi721_sync_dma_irq(bdma_chan);
  868. tasklet_kill(&bdma_chan->tasklet);
  869. INIT_LIST_HEAD(&bdma_chan->free_list);
  870. kfree(bdma_chan->tx_desc);
  871. tsi721_bdma_ch_free(bdma_chan);
  872. }
  873. list_del(&chan->device_node);
  874. }
  875. }