omap24xxcam-dma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. /*
  2. * drivers/media/video/omap24xxcam-dma.c
  3. *
  4. * Copyright (C) 2004 MontaVista Software, Inc.
  5. * Copyright (C) 2004 Texas Instruments.
  6. * Copyright (C) 2007 Nokia Corporation.
  7. *
  8. * Contact: Sakari Ailus <sakari.ailus@nokia.com>
  9. *
  10. * Based on code from Andy Lowe <source@mvista.com> and
  11. * David Cohen <david.cohen@indt.org.br>.
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License
  15. * version 2 as published by the Free Software Foundation.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  25. * 02110-1301 USA
  26. */
  27. #include <linux/kernel.h>
  28. #include <linux/io.h>
  29. #include <linux/scatterlist.h>
  30. #include "omap24xxcam.h"
  31. /*
  32. *
  33. * DMA hardware.
  34. *
  35. */
  36. /* Ack all interrupt on CSR and IRQSTATUS_L0 */
  37. static void omap24xxcam_dmahw_ack_all(unsigned long base)
  38. {
  39. u32 csr;
  40. int i;
  41. for (i = 0; i < NUM_CAMDMA_CHANNELS; ++i) {
  42. csr = omap24xxcam_reg_in(base, CAMDMA_CSR(i));
  43. /* ack interrupt in CSR */
  44. omap24xxcam_reg_out(base, CAMDMA_CSR(i), csr);
  45. }
  46. omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, 0xf);
  47. }
  48. /* Ack dmach on CSR and IRQSTATUS_L0 */
  49. static u32 omap24xxcam_dmahw_ack_ch(unsigned long base, int dmach)
  50. {
  51. u32 csr;
  52. csr = omap24xxcam_reg_in(base, CAMDMA_CSR(dmach));
  53. /* ack interrupt in CSR */
  54. omap24xxcam_reg_out(base, CAMDMA_CSR(dmach), csr);
  55. /* ack interrupt in IRQSTATUS */
  56. omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, (1 << dmach));
  57. return csr;
  58. }
  59. static int omap24xxcam_dmahw_running(unsigned long base, int dmach)
  60. {
  61. return omap24xxcam_reg_in(base, CAMDMA_CCR(dmach)) & CAMDMA_CCR_ENABLE;
  62. }
  63. static void omap24xxcam_dmahw_transfer_setup(unsigned long base, int dmach,
  64. dma_addr_t start, u32 len)
  65. {
  66. omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
  67. CAMDMA_CCR_SEL_SRC_DST_SYNC
  68. | CAMDMA_CCR_BS
  69. | CAMDMA_CCR_DST_AMODE_POST_INC
  70. | CAMDMA_CCR_SRC_AMODE_POST_INC
  71. | CAMDMA_CCR_FS
  72. | CAMDMA_CCR_WR_ACTIVE
  73. | CAMDMA_CCR_RD_ACTIVE
  74. | CAMDMA_CCR_SYNCHRO_CAMERA);
  75. omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(dmach), 0);
  76. omap24xxcam_reg_out(base, CAMDMA_CEN(dmach), len);
  77. omap24xxcam_reg_out(base, CAMDMA_CFN(dmach), 1);
  78. omap24xxcam_reg_out(base, CAMDMA_CSDP(dmach),
  79. CAMDMA_CSDP_WRITE_MODE_POSTED
  80. | CAMDMA_CSDP_DST_BURST_EN_32
  81. | CAMDMA_CSDP_DST_PACKED
  82. | CAMDMA_CSDP_SRC_BURST_EN_32
  83. | CAMDMA_CSDP_SRC_PACKED
  84. | CAMDMA_CSDP_DATA_TYPE_8BITS);
  85. omap24xxcam_reg_out(base, CAMDMA_CSSA(dmach), 0);
  86. omap24xxcam_reg_out(base, CAMDMA_CDSA(dmach), start);
  87. omap24xxcam_reg_out(base, CAMDMA_CSEI(dmach), 0);
  88. omap24xxcam_reg_out(base, CAMDMA_CSFI(dmach), DMA_THRESHOLD);
  89. omap24xxcam_reg_out(base, CAMDMA_CDEI(dmach), 0);
  90. omap24xxcam_reg_out(base, CAMDMA_CDFI(dmach), 0);
  91. omap24xxcam_reg_out(base, CAMDMA_CSR(dmach),
  92. CAMDMA_CSR_MISALIGNED_ERR
  93. | CAMDMA_CSR_SECURE_ERR
  94. | CAMDMA_CSR_TRANS_ERR
  95. | CAMDMA_CSR_BLOCK
  96. | CAMDMA_CSR_DROP);
  97. omap24xxcam_reg_out(base, CAMDMA_CICR(dmach),
  98. CAMDMA_CICR_MISALIGNED_ERR_IE
  99. | CAMDMA_CICR_SECURE_ERR_IE
  100. | CAMDMA_CICR_TRANS_ERR_IE
  101. | CAMDMA_CICR_BLOCK_IE
  102. | CAMDMA_CICR_DROP_IE);
  103. }
  104. static void omap24xxcam_dmahw_transfer_start(unsigned long base, int dmach)
  105. {
  106. omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
  107. CAMDMA_CCR_SEL_SRC_DST_SYNC
  108. | CAMDMA_CCR_BS
  109. | CAMDMA_CCR_DST_AMODE_POST_INC
  110. | CAMDMA_CCR_SRC_AMODE_POST_INC
  111. | CAMDMA_CCR_ENABLE
  112. | CAMDMA_CCR_FS
  113. | CAMDMA_CCR_SYNCHRO_CAMERA);
  114. }
  115. static void omap24xxcam_dmahw_transfer_chain(unsigned long base, int dmach,
  116. int free_dmach)
  117. {
  118. int prev_dmach, ch;
  119. if (dmach == 0)
  120. prev_dmach = NUM_CAMDMA_CHANNELS - 1;
  121. else
  122. prev_dmach = dmach - 1;
  123. omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(prev_dmach),
  124. CAMDMA_CLNK_CTRL_ENABLE_LNK | dmach);
  125. /* Did we chain the DMA transfer before the previous one
  126. * finished?
  127. */
  128. ch = (dmach + free_dmach) % NUM_CAMDMA_CHANNELS;
  129. while (!(omap24xxcam_reg_in(base, CAMDMA_CCR(ch))
  130. & CAMDMA_CCR_ENABLE)) {
  131. if (ch == dmach) {
  132. /* The previous transfer has ended and this one
  133. * hasn't started, so we must not have chained
  134. * to the previous one in time. We'll have to
  135. * start it now.
  136. */
  137. omap24xxcam_dmahw_transfer_start(base, dmach);
  138. break;
  139. } else
  140. ch = (ch + 1) % NUM_CAMDMA_CHANNELS;
  141. }
  142. }
  143. /* Abort all chained DMA transfers. After all transfers have been
  144. * aborted and the DMA controller is idle, the completion routines for
  145. * any aborted transfers will be called in sequence. The DMA
  146. * controller may not be idle after this routine completes, because
  147. * the completion routines might start new transfers.
  148. */
  149. static void omap24xxcam_dmahw_abort_ch(unsigned long base, int dmach)
  150. {
  151. /* mask all interrupts from this channel */
  152. omap24xxcam_reg_out(base, CAMDMA_CICR(dmach), 0);
  153. /* unlink this channel */
  154. omap24xxcam_reg_merge(base, CAMDMA_CLNK_CTRL(dmach), 0,
  155. CAMDMA_CLNK_CTRL_ENABLE_LNK);
  156. /* disable this channel */
  157. omap24xxcam_reg_merge(base, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE);
  158. }
  159. static void omap24xxcam_dmahw_init(unsigned long base)
  160. {
  161. omap24xxcam_reg_out(base, CAMDMA_OCP_SYSCONFIG,
  162. CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY
  163. | CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE
  164. | CAMDMA_OCP_SYSCONFIG_AUTOIDLE);
  165. omap24xxcam_reg_merge(base, CAMDMA_GCR, 0x10,
  166. CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH);
  167. omap24xxcam_reg_out(base, CAMDMA_IRQENABLE_L0, 0xf);
  168. }
  169. /*
  170. *
  171. * Individual DMA channel handling.
  172. *
  173. */
  174. /* Start a DMA transfer from the camera to memory.
  175. * Returns zero if the transfer was successfully started, or non-zero if all
  176. * DMA channels are already in use or starting is currently inhibited.
  177. */
  178. static int omap24xxcam_dma_start(struct omap24xxcam_dma *dma, dma_addr_t start,
  179. u32 len, dma_callback_t callback, void *arg)
  180. {
  181. unsigned long flags;
  182. int dmach;
  183. spin_lock_irqsave(&dma->lock, flags);
  184. if (!dma->free_dmach || atomic_read(&dma->dma_stop)) {
  185. spin_unlock_irqrestore(&dma->lock, flags);
  186. return -EBUSY;
  187. }
  188. dmach = dma->next_dmach;
  189. dma->ch_state[dmach].callback = callback;
  190. dma->ch_state[dmach].arg = arg;
  191. omap24xxcam_dmahw_transfer_setup(dma->base, dmach, start, len);
  192. /* We're ready to start the DMA transfer. */
  193. if (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
  194. /* A transfer is already in progress, so try to chain to it. */
  195. omap24xxcam_dmahw_transfer_chain(dma->base, dmach,
  196. dma->free_dmach);
  197. } else {
  198. /* No transfer is in progress, so we'll just start this one
  199. * now.
  200. */
  201. omap24xxcam_dmahw_transfer_start(dma->base, dmach);
  202. }
  203. dma->next_dmach = (dma->next_dmach + 1) % NUM_CAMDMA_CHANNELS;
  204. dma->free_dmach--;
  205. spin_unlock_irqrestore(&dma->lock, flags);
  206. return 0;
  207. }
  208. /* Abort all chained DMA transfers. After all transfers have been
  209. * aborted and the DMA controller is idle, the completion routines for
  210. * any aborted transfers will be called in sequence. The DMA
  211. * controller may not be idle after this routine completes, because
  212. * the completion routines might start new transfers.
  213. */
  214. static void omap24xxcam_dma_abort(struct omap24xxcam_dma *dma, u32 csr)
  215. {
  216. unsigned long flags;
  217. int dmach, i, free_dmach;
  218. dma_callback_t callback;
  219. void *arg;
  220. spin_lock_irqsave(&dma->lock, flags);
  221. /* stop any DMA transfers in progress */
  222. dmach = (dma->next_dmach + dma->free_dmach) % NUM_CAMDMA_CHANNELS;
  223. for (i = 0; i < NUM_CAMDMA_CHANNELS; i++) {
  224. omap24xxcam_dmahw_abort_ch(dma->base, dmach);
  225. dmach = (dmach + 1) % NUM_CAMDMA_CHANNELS;
  226. }
  227. /* We have to be careful here because the callback routine
  228. * might start a new DMA transfer, and we only want to abort
  229. * transfers that were started before this routine was called.
  230. */
  231. free_dmach = dma->free_dmach;
  232. while ((dma->free_dmach < NUM_CAMDMA_CHANNELS) &&
  233. (free_dmach < NUM_CAMDMA_CHANNELS)) {
  234. dmach = (dma->next_dmach + dma->free_dmach)
  235. % NUM_CAMDMA_CHANNELS;
  236. callback = dma->ch_state[dmach].callback;
  237. arg = dma->ch_state[dmach].arg;
  238. dma->free_dmach++;
  239. free_dmach++;
  240. if (callback) {
  241. /* leave interrupts disabled during callback */
  242. spin_unlock(&dma->lock);
  243. (*callback) (dma, csr, arg);
  244. spin_lock(&dma->lock);
  245. }
  246. }
  247. spin_unlock_irqrestore(&dma->lock, flags);
  248. }
  249. /* Abort all chained DMA transfers. After all transfers have been
  250. * aborted and the DMA controller is idle, the completion routines for
  251. * any aborted transfers will be called in sequence. If the completion
  252. * routines attempt to start a new DMA transfer it will fail, so the
  253. * DMA controller will be idle after this routine completes.
  254. */
  255. static void omap24xxcam_dma_stop(struct omap24xxcam_dma *dma, u32 csr)
  256. {
  257. atomic_inc(&dma->dma_stop);
  258. omap24xxcam_dma_abort(dma, csr);
  259. atomic_dec(&dma->dma_stop);
  260. }
  261. /* Camera DMA interrupt service routine. */
  262. void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma)
  263. {
  264. int dmach;
  265. dma_callback_t callback;
  266. void *arg;
  267. u32 csr;
  268. const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
  269. | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
  270. | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
  271. spin_lock(&dma->lock);
  272. if (dma->free_dmach == NUM_CAMDMA_CHANNELS) {
  273. /* A camera DMA interrupt occurred while all channels
  274. * are idle, so we'll acknowledge the interrupt in the
  275. * IRQSTATUS register and exit.
  276. */
  277. omap24xxcam_dmahw_ack_all(dma->base);
  278. spin_unlock(&dma->lock);
  279. return;
  280. }
  281. while (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
  282. dmach = (dma->next_dmach + dma->free_dmach)
  283. % NUM_CAMDMA_CHANNELS;
  284. if (omap24xxcam_dmahw_running(dma->base, dmach)) {
  285. /* This buffer hasn't finished yet, so we're done. */
  286. break;
  287. }
  288. csr = omap24xxcam_dmahw_ack_ch(dma->base, dmach);
  289. if (csr & csr_error) {
  290. /* A DMA error occurred, so stop all DMA
  291. * transfers in progress.
  292. */
  293. spin_unlock(&dma->lock);
  294. omap24xxcam_dma_stop(dma, csr);
  295. return;
  296. } else {
  297. callback = dma->ch_state[dmach].callback;
  298. arg = dma->ch_state[dmach].arg;
  299. dma->free_dmach++;
  300. if (callback) {
  301. spin_unlock(&dma->lock);
  302. (*callback) (dma, csr, arg);
  303. spin_lock(&dma->lock);
  304. }
  305. }
  306. }
  307. spin_unlock(&dma->lock);
  308. omap24xxcam_sgdma_process(
  309. container_of(dma, struct omap24xxcam_sgdma, dma));
  310. }
  311. void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma)
  312. {
  313. unsigned long flags;
  314. spin_lock_irqsave(&dma->lock, flags);
  315. omap24xxcam_dmahw_init(dma->base);
  316. spin_unlock_irqrestore(&dma->lock, flags);
  317. }
  318. static void omap24xxcam_dma_init(struct omap24xxcam_dma *dma,
  319. unsigned long base)
  320. {
  321. int ch;
  322. /* group all channels on DMA IRQ0 and unmask irq */
  323. spin_lock_init(&dma->lock);
  324. dma->base = base;
  325. dma->free_dmach = NUM_CAMDMA_CHANNELS;
  326. dma->next_dmach = 0;
  327. for (ch = 0; ch < NUM_CAMDMA_CHANNELS; ch++) {
  328. dma->ch_state[ch].callback = NULL;
  329. dma->ch_state[ch].arg = NULL;
  330. }
  331. }
  332. /*
  333. *
  334. * Scatter-gather DMA.
  335. *
  336. * High-level DMA construct for transferring whole picture frames to
  337. * memory that is discontinuous.
  338. *
  339. */
  340. /* DMA completion routine for the scatter-gather DMA fragments. */
  341. static void omap24xxcam_sgdma_callback(struct omap24xxcam_dma *dma, u32 csr,
  342. void *arg)
  343. {
  344. struct omap24xxcam_sgdma *sgdma =
  345. container_of(dma, struct omap24xxcam_sgdma, dma);
  346. int sgslot = (int)arg;
  347. struct sgdma_state *sg_state;
  348. const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
  349. | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
  350. | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
  351. spin_lock(&sgdma->lock);
  352. /* We got an interrupt, we can remove the timer */
  353. del_timer(&sgdma->reset_timer);
  354. sg_state = sgdma->sg_state + sgslot;
  355. if (!sg_state->queued_sglist) {
  356. spin_unlock(&sgdma->lock);
  357. printk(KERN_ERR "%s: sgdma completed when none queued!\n",
  358. __func__);
  359. return;
  360. }
  361. sg_state->csr |= csr;
  362. if (!--sg_state->queued_sglist) {
  363. /* Queue for this sglist is empty, so check to see if we're
  364. * done.
  365. */
  366. if ((sg_state->next_sglist == sg_state->sglen)
  367. || (sg_state->csr & csr_error)) {
  368. sgdma_callback_t callback = sg_state->callback;
  369. void *arg = sg_state->arg;
  370. u32 sg_csr = sg_state->csr;
  371. /* All done with this sglist */
  372. sgdma->free_sgdma++;
  373. if (callback) {
  374. spin_unlock(&sgdma->lock);
  375. (*callback) (sgdma, sg_csr, arg);
  376. return;
  377. }
  378. }
  379. }
  380. spin_unlock(&sgdma->lock);
  381. }
  382. /* Start queued scatter-gather DMA transfers. */
  383. void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma)
  384. {
  385. unsigned long flags;
  386. int queued_sgdma, sgslot;
  387. struct sgdma_state *sg_state;
  388. const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
  389. | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
  390. | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
  391. spin_lock_irqsave(&sgdma->lock, flags);
  392. queued_sgdma = NUM_SG_DMA - sgdma->free_sgdma;
  393. sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
  394. while (queued_sgdma > 0) {
  395. sg_state = sgdma->sg_state + sgslot;
  396. while ((sg_state->next_sglist < sg_state->sglen) &&
  397. !(sg_state->csr & csr_error)) {
  398. const struct scatterlist *sglist;
  399. unsigned int len;
  400. sglist = sg_state->sglist + sg_state->next_sglist;
  401. /* try to start the next DMA transfer */
  402. if (sg_state->next_sglist + 1 == sg_state->sglen) {
  403. /*
  404. * On the last sg, we handle the case where
  405. * cam->img.pix.sizeimage % PAGE_ALIGN != 0
  406. */
  407. len = sg_state->len - sg_state->bytes_read;
  408. } else {
  409. len = sg_dma_len(sglist);
  410. }
  411. if (omap24xxcam_dma_start(&sgdma->dma,
  412. sg_dma_address(sglist),
  413. len,
  414. omap24xxcam_sgdma_callback,
  415. (void *)sgslot)) {
  416. /* DMA start failed */
  417. spin_unlock_irqrestore(&sgdma->lock, flags);
  418. return;
  419. } else {
  420. unsigned long expires;
  421. /* DMA start was successful */
  422. sg_state->next_sglist++;
  423. sg_state->bytes_read += len;
  424. sg_state->queued_sglist++;
  425. /* We start the reset timer */
  426. expires = jiffies + HZ;
  427. mod_timer(&sgdma->reset_timer, expires);
  428. }
  429. }
  430. queued_sgdma--;
  431. sgslot = (sgslot + 1) % NUM_SG_DMA;
  432. }
  433. spin_unlock_irqrestore(&sgdma->lock, flags);
  434. }
  435. /*
  436. * Queue a scatter-gather DMA transfer from the camera to memory.
  437. * Returns zero if the transfer was successfully queued, or non-zero
  438. * if all of the scatter-gather slots are already in use.
  439. */
  440. int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
  441. const struct scatterlist *sglist, int sglen,
  442. int len, sgdma_callback_t callback, void *arg)
  443. {
  444. unsigned long flags;
  445. struct sgdma_state *sg_state;
  446. if ((sglen < 0) || ((sglen > 0) && !sglist))
  447. return -EINVAL;
  448. spin_lock_irqsave(&sgdma->lock, flags);
  449. if (!sgdma->free_sgdma) {
  450. spin_unlock_irqrestore(&sgdma->lock, flags);
  451. return -EBUSY;
  452. }
  453. sg_state = sgdma->sg_state + sgdma->next_sgdma;
  454. sg_state->sglist = sglist;
  455. sg_state->sglen = sglen;
  456. sg_state->next_sglist = 0;
  457. sg_state->bytes_read = 0;
  458. sg_state->len = len;
  459. sg_state->queued_sglist = 0;
  460. sg_state->csr = 0;
  461. sg_state->callback = callback;
  462. sg_state->arg = arg;
  463. sgdma->next_sgdma = (sgdma->next_sgdma + 1) % NUM_SG_DMA;
  464. sgdma->free_sgdma--;
  465. spin_unlock_irqrestore(&sgdma->lock, flags);
  466. omap24xxcam_sgdma_process(sgdma);
  467. return 0;
  468. }
  469. /* Sync scatter-gather DMA by aborting any DMA transfers currently in progress.
  470. * Any queued scatter-gather DMA transactions that have not yet been started
  471. * will remain queued. The DMA controller will be idle after this routine
  472. * completes. When the scatter-gather queue is restarted, the next
  473. * scatter-gather DMA transfer will begin at the start of a new transaction.
  474. */
  475. void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma)
  476. {
  477. unsigned long flags;
  478. int sgslot;
  479. struct sgdma_state *sg_state;
  480. u32 csr = CAMDMA_CSR_TRANS_ERR;
  481. /* stop any DMA transfers in progress */
  482. omap24xxcam_dma_stop(&sgdma->dma, csr);
  483. spin_lock_irqsave(&sgdma->lock, flags);
  484. if (sgdma->free_sgdma < NUM_SG_DMA) {
  485. sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
  486. sg_state = sgdma->sg_state + sgslot;
  487. if (sg_state->next_sglist != 0) {
  488. /* This DMA transfer was in progress, so abort it. */
  489. sgdma_callback_t callback = sg_state->callback;
  490. void *arg = sg_state->arg;
  491. sgdma->free_sgdma++;
  492. if (callback) {
  493. /* leave interrupts masked */
  494. spin_unlock(&sgdma->lock);
  495. (*callback) (sgdma, csr, arg);
  496. spin_lock(&sgdma->lock);
  497. }
  498. }
  499. }
  500. spin_unlock_irqrestore(&sgdma->lock, flags);
  501. }
  502. void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
  503. unsigned long base,
  504. void (*reset_callback)(unsigned long data),
  505. unsigned long reset_callback_data)
  506. {
  507. int sg;
  508. spin_lock_init(&sgdma->lock);
  509. sgdma->free_sgdma = NUM_SG_DMA;
  510. sgdma->next_sgdma = 0;
  511. for (sg = 0; sg < NUM_SG_DMA; sg++) {
  512. sgdma->sg_state[sg].sglen = 0;
  513. sgdma->sg_state[sg].next_sglist = 0;
  514. sgdma->sg_state[sg].bytes_read = 0;
  515. sgdma->sg_state[sg].queued_sglist = 0;
  516. sgdma->sg_state[sg].csr = 0;
  517. sgdma->sg_state[sg].callback = NULL;
  518. sgdma->sg_state[sg].arg = NULL;
  519. }
  520. omap24xxcam_dma_init(&sgdma->dma, base);
  521. setup_timer(&sgdma->reset_timer, reset_callback, reset_callback_data);
  522. }