dma.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471
  1. /*
  2. * linux/arch/arm/plat-omap/dma.c
  3. *
  4. * Copyright (C) 2003 - 2008 Nokia Corporation
  5. * Author: Juha Yrjölä <juha.yrjola@nokia.com>
  6. * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
  7. * Graphics DMA and LCD DMA graphics tranformations
  8. * by Imre Deak <imre.deak@nokia.com>
  9. * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
  10. * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
  11. * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
  12. *
  13. * Copyright (C) 2009 Texas Instruments
  14. * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  15. *
  16. * Support functions for the OMAP internal DMA channels.
  17. *
  18. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  19. * Converted DMA library into DMA platform driver.
  20. * - G, Manjunath Kondaiah <manjugk@ti.com>
  21. *
  22. * This program is free software; you can redistribute it and/or modify
  23. * it under the terms of the GNU General Public License version 2 as
  24. * published by the Free Software Foundation.
  25. *
  26. */
  27. #include <linux/module.h>
  28. #include <linux/init.h>
  29. #include <linux/sched.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/errno.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/irq.h>
  34. #include <linux/io.h>
  35. #include <linux/slab.h>
  36. #include <linux/delay.h>
  37. #include <linux/omap-dma.h>
  38. #ifdef CONFIG_ARCH_OMAP1
  39. #include <mach/soc.h>
  40. #endif
  41. /*
  42. * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
  43. * channels that an instance of the SDMA IP block can support. Used
  44. * to size arrays. (The actual maximum on a particular SoC may be less
  45. * than this -- for example, OMAP1 SDMA instances only support 17 logical
  46. * DMA channels.)
  47. */
  48. #define MAX_LOGICAL_DMA_CH_COUNT 32
  49. #undef DEBUG
  50. #ifndef CONFIG_ARCH_OMAP1
  51. enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
  52. DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
  53. };
  54. enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
  55. #endif
  56. #define OMAP_DMA_ACTIVE 0x01
  57. #define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff
  58. #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
  59. static struct omap_system_dma_plat_info *p;
  60. static struct omap_dma_dev_attr *d;
  61. static void omap_clear_dma(int lch);
  62. static int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
  63. unsigned char write_prio);
  64. static int enable_1510_mode;
  65. static u32 errata;
  66. static struct omap_dma_global_context_registers {
  67. u32 dma_irqenable_l0;
  68. u32 dma_irqenable_l1;
  69. u32 dma_ocp_sysconfig;
  70. u32 dma_gcr;
  71. } omap_dma_global_context;
  72. struct dma_link_info {
  73. int *linked_dmach_q;
  74. int no_of_lchs_linked;
  75. int q_count;
  76. int q_tail;
  77. int q_head;
  78. int chain_state;
  79. int chain_mode;
  80. };
  81. static struct dma_link_info *dma_linked_lch;
  82. #ifndef CONFIG_ARCH_OMAP1
  83. /* Chain handling macros */
  84. #define OMAP_DMA_CHAIN_QINIT(chain_id) \
  85. do { \
  86. dma_linked_lch[chain_id].q_head = \
  87. dma_linked_lch[chain_id].q_tail = \
  88. dma_linked_lch[chain_id].q_count = 0; \
  89. } while (0)
  90. #define OMAP_DMA_CHAIN_QFULL(chain_id) \
  91. (dma_linked_lch[chain_id].no_of_lchs_linked == \
  92. dma_linked_lch[chain_id].q_count)
  93. #define OMAP_DMA_CHAIN_QLAST(chain_id) \
  94. do { \
  95. ((dma_linked_lch[chain_id].no_of_lchs_linked-1) == \
  96. dma_linked_lch[chain_id].q_count) \
  97. } while (0)
  98. #define OMAP_DMA_CHAIN_QEMPTY(chain_id) \
  99. (0 == dma_linked_lch[chain_id].q_count)
  100. #define __OMAP_DMA_CHAIN_INCQ(end) \
  101. ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
  102. #define OMAP_DMA_CHAIN_INCQHEAD(chain_id) \
  103. do { \
  104. __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
  105. dma_linked_lch[chain_id].q_count--; \
  106. } while (0)
  107. #define OMAP_DMA_CHAIN_INCQTAIL(chain_id) \
  108. do { \
  109. __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
  110. dma_linked_lch[chain_id].q_count++; \
  111. } while (0)
  112. #endif
  113. static int dma_lch_count;
  114. static int dma_chan_count;
  115. static int omap_dma_reserve_channels;
  116. static spinlock_t dma_chan_lock;
  117. static struct omap_dma_lch *dma_chan;
  118. static inline void disable_lnk(int lch);
  119. static void omap_disable_channel_irq(int lch);
  120. static inline void omap_enable_channel_irq(int lch);
  121. #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
  122. __func__);
  123. #ifdef CONFIG_ARCH_OMAP15XX
  124. /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
  125. static int omap_dma_in_1510_mode(void)
  126. {
  127. return enable_1510_mode;
  128. }
  129. #else
  130. #define omap_dma_in_1510_mode() 0
  131. #endif
  132. #ifdef CONFIG_ARCH_OMAP1
  133. static inline void set_gdma_dev(int req, int dev)
  134. {
  135. u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
  136. int shift = ((req - 1) % 5) * 6;
  137. u32 l;
  138. l = omap_readl(reg);
  139. l &= ~(0x3f << shift);
  140. l |= (dev - 1) << shift;
  141. omap_writel(l, reg);
  142. }
  143. #else
  144. #define set_gdma_dev(req, dev) do {} while (0)
  145. #define omap_readl(reg) 0
  146. #define omap_writel(val, reg) do {} while (0)
  147. #endif
  148. #ifdef CONFIG_ARCH_OMAP1
  149. void omap_set_dma_priority(int lch, int dst_port, int priority)
  150. {
  151. unsigned long reg;
  152. u32 l;
  153. if (dma_omap1()) {
  154. switch (dst_port) {
  155. case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */
  156. reg = OMAP_TC_OCPT1_PRIOR;
  157. break;
  158. case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */
  159. reg = OMAP_TC_OCPT2_PRIOR;
  160. break;
  161. case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */
  162. reg = OMAP_TC_EMIFF_PRIOR;
  163. break;
  164. case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */
  165. reg = OMAP_TC_EMIFS_PRIOR;
  166. break;
  167. default:
  168. BUG();
  169. return;
  170. }
  171. l = omap_readl(reg);
  172. l &= ~(0xf << 8);
  173. l |= (priority & 0xf) << 8;
  174. omap_writel(l, reg);
  175. }
  176. }
  177. #endif
  178. #ifdef CONFIG_ARCH_OMAP2PLUS
  179. void omap_set_dma_priority(int lch, int dst_port, int priority)
  180. {
  181. u32 ccr;
  182. ccr = p->dma_read(CCR, lch);
  183. if (priority)
  184. ccr |= (1 << 6);
  185. else
  186. ccr &= ~(1 << 6);
  187. p->dma_write(ccr, CCR, lch);
  188. }
  189. #endif
  190. EXPORT_SYMBOL(omap_set_dma_priority);
  191. void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
  192. int frame_count, int sync_mode,
  193. int dma_trigger, int src_or_dst_synch)
  194. {
  195. u32 l;
  196. l = p->dma_read(CSDP, lch);
  197. l &= ~0x03;
  198. l |= data_type;
  199. p->dma_write(l, CSDP, lch);
  200. if (dma_omap1()) {
  201. u16 ccr;
  202. ccr = p->dma_read(CCR, lch);
  203. ccr &= ~(1 << 5);
  204. if (sync_mode == OMAP_DMA_SYNC_FRAME)
  205. ccr |= 1 << 5;
  206. p->dma_write(ccr, CCR, lch);
  207. ccr = p->dma_read(CCR2, lch);
  208. ccr &= ~(1 << 2);
  209. if (sync_mode == OMAP_DMA_SYNC_BLOCK)
  210. ccr |= 1 << 2;
  211. p->dma_write(ccr, CCR2, lch);
  212. }
  213. if (dma_omap2plus() && dma_trigger) {
  214. u32 val;
  215. val = p->dma_read(CCR, lch);
  216. /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
  217. val &= ~((1 << 23) | (3 << 19) | 0x1f);
  218. val |= (dma_trigger & ~0x1f) << 14;
  219. val |= dma_trigger & 0x1f;
  220. if (sync_mode & OMAP_DMA_SYNC_FRAME)
  221. val |= 1 << 5;
  222. else
  223. val &= ~(1 << 5);
  224. if (sync_mode & OMAP_DMA_SYNC_BLOCK)
  225. val |= 1 << 18;
  226. else
  227. val &= ~(1 << 18);
  228. if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
  229. val &= ~(1 << 24); /* dest synch */
  230. val |= (1 << 23); /* Prefetch */
  231. } else if (src_or_dst_synch) {
  232. val |= 1 << 24; /* source synch */
  233. } else {
  234. val &= ~(1 << 24); /* dest synch */
  235. }
  236. p->dma_write(val, CCR, lch);
  237. }
  238. p->dma_write(elem_count, CEN, lch);
  239. p->dma_write(frame_count, CFN, lch);
  240. }
  241. EXPORT_SYMBOL(omap_set_dma_transfer_params);
  242. void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
  243. {
  244. if (dma_omap2plus()) {
  245. u32 csdp;
  246. csdp = p->dma_read(CSDP, lch);
  247. csdp &= ~(0x3 << 16);
  248. csdp |= (mode << 16);
  249. p->dma_write(csdp, CSDP, lch);
  250. }
  251. }
  252. EXPORT_SYMBOL(omap_set_dma_write_mode);
  253. void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
  254. {
  255. if (dma_omap1() && !dma_omap15xx()) {
  256. u32 l;
  257. l = p->dma_read(LCH_CTRL, lch);
  258. l &= ~0x7;
  259. l |= mode;
  260. p->dma_write(l, LCH_CTRL, lch);
  261. }
  262. }
  263. EXPORT_SYMBOL(omap_set_dma_channel_mode);
  264. /* Note that src_port is only for omap1 */
  265. void omap_set_dma_src_params(int lch, int src_port, int src_amode,
  266. unsigned long src_start,
  267. int src_ei, int src_fi)
  268. {
  269. u32 l;
  270. if (dma_omap1()) {
  271. u16 w;
  272. w = p->dma_read(CSDP, lch);
  273. w &= ~(0x1f << 2);
  274. w |= src_port << 2;
  275. p->dma_write(w, CSDP, lch);
  276. }
  277. l = p->dma_read(CCR, lch);
  278. l &= ~(0x03 << 12);
  279. l |= src_amode << 12;
  280. p->dma_write(l, CCR, lch);
  281. p->dma_write(src_start, CSSA, lch);
  282. p->dma_write(src_ei, CSEI, lch);
  283. p->dma_write(src_fi, CSFI, lch);
  284. }
  285. EXPORT_SYMBOL(omap_set_dma_src_params);
  286. void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
  287. {
  288. omap_set_dma_transfer_params(lch, params->data_type,
  289. params->elem_count, params->frame_count,
  290. params->sync_mode, params->trigger,
  291. params->src_or_dst_synch);
  292. omap_set_dma_src_params(lch, params->src_port,
  293. params->src_amode, params->src_start,
  294. params->src_ei, params->src_fi);
  295. omap_set_dma_dest_params(lch, params->dst_port,
  296. params->dst_amode, params->dst_start,
  297. params->dst_ei, params->dst_fi);
  298. if (params->read_prio || params->write_prio)
  299. omap_dma_set_prio_lch(lch, params->read_prio,
  300. params->write_prio);
  301. }
  302. EXPORT_SYMBOL(omap_set_dma_params);
  303. void omap_set_dma_src_data_pack(int lch, int enable)
  304. {
  305. u32 l;
  306. l = p->dma_read(CSDP, lch);
  307. l &= ~(1 << 6);
  308. if (enable)
  309. l |= (1 << 6);
  310. p->dma_write(l, CSDP, lch);
  311. }
  312. EXPORT_SYMBOL(omap_set_dma_src_data_pack);
  313. void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
  314. {
  315. unsigned int burst = 0;
  316. u32 l;
  317. l = p->dma_read(CSDP, lch);
  318. l &= ~(0x03 << 7);
  319. switch (burst_mode) {
  320. case OMAP_DMA_DATA_BURST_DIS:
  321. break;
  322. case OMAP_DMA_DATA_BURST_4:
  323. if (dma_omap2plus())
  324. burst = 0x1;
  325. else
  326. burst = 0x2;
  327. break;
  328. case OMAP_DMA_DATA_BURST_8:
  329. if (dma_omap2plus()) {
  330. burst = 0x2;
  331. break;
  332. }
  333. /*
  334. * not supported by current hardware on OMAP1
  335. * w |= (0x03 << 7);
  336. * fall through
  337. */
  338. case OMAP_DMA_DATA_BURST_16:
  339. if (dma_omap2plus()) {
  340. burst = 0x3;
  341. break;
  342. }
  343. /*
  344. * OMAP1 don't support burst 16
  345. * fall through
  346. */
  347. default:
  348. BUG();
  349. }
  350. l |= (burst << 7);
  351. p->dma_write(l, CSDP, lch);
  352. }
  353. EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
  354. /* Note that dest_port is only for OMAP1 */
  355. void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
  356. unsigned long dest_start,
  357. int dst_ei, int dst_fi)
  358. {
  359. u32 l;
  360. if (dma_omap1()) {
  361. l = p->dma_read(CSDP, lch);
  362. l &= ~(0x1f << 9);
  363. l |= dest_port << 9;
  364. p->dma_write(l, CSDP, lch);
  365. }
  366. l = p->dma_read(CCR, lch);
  367. l &= ~(0x03 << 14);
  368. l |= dest_amode << 14;
  369. p->dma_write(l, CCR, lch);
  370. p->dma_write(dest_start, CDSA, lch);
  371. p->dma_write(dst_ei, CDEI, lch);
  372. p->dma_write(dst_fi, CDFI, lch);
  373. }
  374. EXPORT_SYMBOL(omap_set_dma_dest_params);
  375. void omap_set_dma_dest_data_pack(int lch, int enable)
  376. {
  377. u32 l;
  378. l = p->dma_read(CSDP, lch);
  379. l &= ~(1 << 13);
  380. if (enable)
  381. l |= 1 << 13;
  382. p->dma_write(l, CSDP, lch);
  383. }
  384. EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
  385. void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
  386. {
  387. unsigned int burst = 0;
  388. u32 l;
  389. l = p->dma_read(CSDP, lch);
  390. l &= ~(0x03 << 14);
  391. switch (burst_mode) {
  392. case OMAP_DMA_DATA_BURST_DIS:
  393. break;
  394. case OMAP_DMA_DATA_BURST_4:
  395. if (dma_omap2plus())
  396. burst = 0x1;
  397. else
  398. burst = 0x2;
  399. break;
  400. case OMAP_DMA_DATA_BURST_8:
  401. if (dma_omap2plus())
  402. burst = 0x2;
  403. else
  404. burst = 0x3;
  405. break;
  406. case OMAP_DMA_DATA_BURST_16:
  407. if (dma_omap2plus()) {
  408. burst = 0x3;
  409. break;
  410. }
  411. /*
  412. * OMAP1 don't support burst 16
  413. * fall through
  414. */
  415. default:
  416. printk(KERN_ERR "Invalid DMA burst mode\n");
  417. BUG();
  418. return;
  419. }
  420. l |= (burst << 14);
  421. p->dma_write(l, CSDP, lch);
  422. }
  423. EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
  424. static inline void omap_enable_channel_irq(int lch)
  425. {
  426. /* Clear CSR */
  427. if (dma_omap1())
  428. p->dma_read(CSR, lch);
  429. else
  430. p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
  431. /* Enable some nice interrupts. */
  432. p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
  433. }
  434. static inline void omap_disable_channel_irq(int lch)
  435. {
  436. /* disable channel interrupts */
  437. p->dma_write(0, CICR, lch);
  438. /* Clear CSR */
  439. if (dma_omap1())
  440. p->dma_read(CSR, lch);
  441. else
  442. p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
  443. }
  444. void omap_enable_dma_irq(int lch, u16 bits)
  445. {
  446. dma_chan[lch].enabled_irqs |= bits;
  447. }
  448. EXPORT_SYMBOL(omap_enable_dma_irq);
  449. void omap_disable_dma_irq(int lch, u16 bits)
  450. {
  451. dma_chan[lch].enabled_irqs &= ~bits;
  452. }
  453. EXPORT_SYMBOL(omap_disable_dma_irq);
  454. static inline void enable_lnk(int lch)
  455. {
  456. u32 l;
  457. l = p->dma_read(CLNK_CTRL, lch);
  458. if (dma_omap1())
  459. l &= ~(1 << 14);
  460. /* Set the ENABLE_LNK bits */
  461. if (dma_chan[lch].next_lch != -1)
  462. l = dma_chan[lch].next_lch | (1 << 15);
  463. #ifndef CONFIG_ARCH_OMAP1
  464. if (dma_omap2plus())
  465. if (dma_chan[lch].next_linked_ch != -1)
  466. l = dma_chan[lch].next_linked_ch | (1 << 15);
  467. #endif
  468. p->dma_write(l, CLNK_CTRL, lch);
  469. }
  470. static inline void disable_lnk(int lch)
  471. {
  472. u32 l;
  473. l = p->dma_read(CLNK_CTRL, lch);
  474. /* Disable interrupts */
  475. omap_disable_channel_irq(lch);
  476. if (dma_omap1()) {
  477. /* Set the STOP_LNK bit */
  478. l |= 1 << 14;
  479. }
  480. if (dma_omap2plus()) {
  481. /* Clear the ENABLE_LNK bit */
  482. l &= ~(1 << 15);
  483. }
  484. p->dma_write(l, CLNK_CTRL, lch);
  485. dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
  486. }
  487. static inline void omap2_enable_irq_lch(int lch)
  488. {
  489. u32 val;
  490. unsigned long flags;
  491. if (dma_omap1())
  492. return;
  493. spin_lock_irqsave(&dma_chan_lock, flags);
  494. /* clear IRQ STATUS */
  495. p->dma_write(1 << lch, IRQSTATUS_L0, lch);
  496. /* Enable interrupt */
  497. val = p->dma_read(IRQENABLE_L0, lch);
  498. val |= 1 << lch;
  499. p->dma_write(val, IRQENABLE_L0, lch);
  500. spin_unlock_irqrestore(&dma_chan_lock, flags);
  501. }
  502. static inline void omap2_disable_irq_lch(int lch)
  503. {
  504. u32 val;
  505. unsigned long flags;
  506. if (dma_omap1())
  507. return;
  508. spin_lock_irqsave(&dma_chan_lock, flags);
  509. /* Disable interrupt */
  510. val = p->dma_read(IRQENABLE_L0, lch);
  511. val &= ~(1 << lch);
  512. p->dma_write(val, IRQENABLE_L0, lch);
  513. /* clear IRQ STATUS */
  514. p->dma_write(1 << lch, IRQSTATUS_L0, lch);
  515. spin_unlock_irqrestore(&dma_chan_lock, flags);
  516. }
  517. int omap_request_dma(int dev_id, const char *dev_name,
  518. void (*callback)(int lch, u16 ch_status, void *data),
  519. void *data, int *dma_ch_out)
  520. {
  521. int ch, free_ch = -1;
  522. unsigned long flags;
  523. struct omap_dma_lch *chan;
  524. WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
  525. spin_lock_irqsave(&dma_chan_lock, flags);
  526. for (ch = 0; ch < dma_chan_count; ch++) {
  527. if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
  528. free_ch = ch;
  529. /* Exit after first free channel found */
  530. break;
  531. }
  532. }
  533. if (free_ch == -1) {
  534. spin_unlock_irqrestore(&dma_chan_lock, flags);
  535. return -EBUSY;
  536. }
  537. chan = dma_chan + free_ch;
  538. chan->dev_id = dev_id;
  539. if (p->clear_lch_regs)
  540. p->clear_lch_regs(free_ch);
  541. if (dma_omap2plus())
  542. omap_clear_dma(free_ch);
  543. spin_unlock_irqrestore(&dma_chan_lock, flags);
  544. chan->dev_name = dev_name;
  545. chan->callback = callback;
  546. chan->data = data;
  547. chan->flags = 0;
  548. #ifndef CONFIG_ARCH_OMAP1
  549. if (dma_omap2plus()) {
  550. chan->chain_id = -1;
  551. chan->next_linked_ch = -1;
  552. }
  553. #endif
  554. chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
  555. if (dma_omap1())
  556. chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
  557. else if (dma_omap2plus())
  558. chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
  559. OMAP2_DMA_TRANS_ERR_IRQ;
  560. if (dma_omap16xx()) {
  561. /* If the sync device is set, configure it dynamically. */
  562. if (dev_id != 0) {
  563. set_gdma_dev(free_ch + 1, dev_id);
  564. dev_id = free_ch + 1;
  565. }
  566. /*
  567. * Disable the 1510 compatibility mode and set the sync device
  568. * id.
  569. */
  570. p->dma_write(dev_id | (1 << 10), CCR, free_ch);
  571. } else if (dma_omap1()) {
  572. p->dma_write(dev_id, CCR, free_ch);
  573. }
  574. if (dma_omap2plus()) {
  575. omap_enable_channel_irq(free_ch);
  576. omap2_enable_irq_lch(free_ch);
  577. }
  578. *dma_ch_out = free_ch;
  579. return 0;
  580. }
  581. EXPORT_SYMBOL(omap_request_dma);
  582. void omap_free_dma(int lch)
  583. {
  584. unsigned long flags;
  585. if (dma_chan[lch].dev_id == -1) {
  586. pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
  587. lch);
  588. return;
  589. }
  590. /* Disable interrupt for logical channel */
  591. if (dma_omap2plus())
  592. omap2_disable_irq_lch(lch);
  593. /* Disable all DMA interrupts for the channel. */
  594. omap_disable_channel_irq(lch);
  595. /* Make sure the DMA transfer is stopped. */
  596. p->dma_write(0, CCR, lch);
  597. /* Clear registers */
  598. if (dma_omap2plus())
  599. omap_clear_dma(lch);
  600. spin_lock_irqsave(&dma_chan_lock, flags);
  601. dma_chan[lch].dev_id = -1;
  602. dma_chan[lch].next_lch = -1;
  603. dma_chan[lch].callback = NULL;
  604. spin_unlock_irqrestore(&dma_chan_lock, flags);
  605. }
  606. EXPORT_SYMBOL(omap_free_dma);
  607. /**
  608. * @brief omap_dma_set_global_params : Set global priority settings for dma
  609. *
  610. * @param arb_rate
  611. * @param max_fifo_depth
  612. * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
  613. * DMA_THREAD_RESERVE_ONET
  614. * DMA_THREAD_RESERVE_TWOT
  615. * DMA_THREAD_RESERVE_THREET
  616. */
  617. void
  618. omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
  619. {
  620. u32 reg;
  621. if (dma_omap1()) {
  622. printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
  623. return;
  624. }
  625. if (max_fifo_depth == 0)
  626. max_fifo_depth = 1;
  627. if (arb_rate == 0)
  628. arb_rate = 1;
  629. reg = 0xff & max_fifo_depth;
  630. reg |= (0x3 & tparams) << 12;
  631. reg |= (arb_rate & 0xff) << 16;
  632. p->dma_write(reg, GCR, 0);
  633. }
  634. EXPORT_SYMBOL(omap_dma_set_global_params);
  635. /**
  636. * @brief omap_dma_set_prio_lch : Set channel wise priority settings
  637. *
  638. * @param lch
  639. * @param read_prio - Read priority
  640. * @param write_prio - Write priority
  641. * Both of the above can be set with one of the following values :
  642. * DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
  643. */
  644. static int
  645. omap_dma_set_prio_lch(int lch, unsigned char read_prio,
  646. unsigned char write_prio)
  647. {
  648. u32 l;
  649. if (unlikely((lch < 0 || lch >= dma_lch_count))) {
  650. printk(KERN_ERR "Invalid channel id\n");
  651. return -EINVAL;
  652. }
  653. l = p->dma_read(CCR, lch);
  654. l &= ~((1 << 6) | (1 << 26));
  655. if (d->dev_caps & IS_RW_PRIORITY)
  656. l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
  657. else
  658. l |= ((read_prio & 0x1) << 6);
  659. p->dma_write(l, CCR, lch);
  660. return 0;
  661. }
  662. /*
  663. * Clears any DMA state so the DMA engine is ready to restart with new buffers
  664. * through omap_start_dma(). Any buffers in flight are discarded.
  665. */
  666. static void omap_clear_dma(int lch)
  667. {
  668. unsigned long flags;
  669. local_irq_save(flags);
  670. p->clear_dma(lch);
  671. local_irq_restore(flags);
  672. }
  673. void omap_start_dma(int lch)
  674. {
  675. u32 l;
  676. /*
  677. * The CPC/CDAC register needs to be initialized to zero
  678. * before starting dma transfer.
  679. */
  680. if (dma_omap15xx())
  681. p->dma_write(0, CPC, lch);
  682. else
  683. p->dma_write(0, CDAC, lch);
  684. if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
  685. int next_lch, cur_lch;
  686. char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
  687. /* Set the link register of the first channel */
  688. enable_lnk(lch);
  689. memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
  690. dma_chan_link_map[lch] = 1;
  691. cur_lch = dma_chan[lch].next_lch;
  692. do {
  693. next_lch = dma_chan[cur_lch].next_lch;
  694. /* The loop case: we've been here already */
  695. if (dma_chan_link_map[cur_lch])
  696. break;
  697. /* Mark the current channel */
  698. dma_chan_link_map[cur_lch] = 1;
  699. enable_lnk(cur_lch);
  700. omap_enable_channel_irq(cur_lch);
  701. cur_lch = next_lch;
  702. } while (next_lch != -1);
  703. } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
  704. p->dma_write(lch, CLNK_CTRL, lch);
  705. omap_enable_channel_irq(lch);
  706. l = p->dma_read(CCR, lch);
  707. if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
  708. l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
  709. l |= OMAP_DMA_CCR_EN;
  710. /*
  711. * As dma_write() uses IO accessors which are weakly ordered, there
  712. * is no guarantee that data in coherent DMA memory will be visible
  713. * to the DMA device. Add a memory barrier here to ensure that any
  714. * such data is visible prior to enabling DMA.
  715. */
  716. mb();
  717. p->dma_write(l, CCR, lch);
  718. dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
  719. }
  720. EXPORT_SYMBOL(omap_start_dma);
  721. void omap_stop_dma(int lch)
  722. {
  723. u32 l;
  724. /* Disable all interrupts on the channel */
  725. omap_disable_channel_irq(lch);
  726. l = p->dma_read(CCR, lch);
  727. if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
  728. (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
  729. int i = 0;
  730. u32 sys_cf;
  731. /* Configure No-Standby */
  732. l = p->dma_read(OCP_SYSCONFIG, lch);
  733. sys_cf = l;
  734. l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
  735. l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
  736. p->dma_write(l , OCP_SYSCONFIG, 0);
  737. l = p->dma_read(CCR, lch);
  738. l &= ~OMAP_DMA_CCR_EN;
  739. p->dma_write(l, CCR, lch);
  740. /* Wait for sDMA FIFO drain */
  741. l = p->dma_read(CCR, lch);
  742. while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
  743. OMAP_DMA_CCR_WR_ACTIVE))) {
  744. udelay(5);
  745. i++;
  746. l = p->dma_read(CCR, lch);
  747. }
  748. if (i >= 100)
  749. pr_err("DMA drain did not complete on lch %d\n", lch);
  750. /* Restore OCP_SYSCONFIG */
  751. p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
  752. } else {
  753. l &= ~OMAP_DMA_CCR_EN;
  754. p->dma_write(l, CCR, lch);
  755. }
  756. /*
  757. * Ensure that data transferred by DMA is visible to any access
  758. * after DMA has been disabled. This is important for coherent
  759. * DMA regions.
  760. */
  761. mb();
  762. if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
  763. int next_lch, cur_lch = lch;
  764. char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
  765. memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
  766. do {
  767. /* The loop case: we've been here already */
  768. if (dma_chan_link_map[cur_lch])
  769. break;
  770. /* Mark the current channel */
  771. dma_chan_link_map[cur_lch] = 1;
  772. disable_lnk(cur_lch);
  773. next_lch = dma_chan[cur_lch].next_lch;
  774. cur_lch = next_lch;
  775. } while (next_lch != -1);
  776. }
  777. dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
  778. }
  779. EXPORT_SYMBOL(omap_stop_dma);
  780. /*
  781. * Allows changing the DMA callback function or data. This may be needed if
  782. * the driver shares a single DMA channel for multiple dma triggers.
  783. */
  784. int omap_set_dma_callback(int lch,
  785. void (*callback)(int lch, u16 ch_status, void *data),
  786. void *data)
  787. {
  788. unsigned long flags;
  789. if (lch < 0)
  790. return -ENODEV;
  791. spin_lock_irqsave(&dma_chan_lock, flags);
  792. if (dma_chan[lch].dev_id == -1) {
  793. printk(KERN_ERR "DMA callback for not set for free channel\n");
  794. spin_unlock_irqrestore(&dma_chan_lock, flags);
  795. return -EINVAL;
  796. }
  797. dma_chan[lch].callback = callback;
  798. dma_chan[lch].data = data;
  799. spin_unlock_irqrestore(&dma_chan_lock, flags);
  800. return 0;
  801. }
  802. EXPORT_SYMBOL(omap_set_dma_callback);
  803. /*
  804. * Returns current physical source address for the given DMA channel.
  805. * If the channel is running the caller must disable interrupts prior calling
  806. * this function and process the returned value before re-enabling interrupt to
  807. * prevent races with the interrupt handler. Note that in continuous mode there
  808. * is a chance for CSSA_L register overflow between the two reads resulting
  809. * in incorrect return value.
  810. */
  811. dma_addr_t omap_get_dma_src_pos(int lch)
  812. {
  813. dma_addr_t offset = 0;
  814. if (dma_omap15xx())
  815. offset = p->dma_read(CPC, lch);
  816. else
  817. offset = p->dma_read(CSAC, lch);
  818. if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
  819. offset = p->dma_read(CSAC, lch);
  820. if (!dma_omap15xx()) {
  821. /*
  822. * CDAC == 0 indicates that the DMA transfer on the channel has
  823. * not been started (no data has been transferred so far).
  824. * Return the programmed source start address in this case.
  825. */
  826. if (likely(p->dma_read(CDAC, lch)))
  827. offset = p->dma_read(CSAC, lch);
  828. else
  829. offset = p->dma_read(CSSA, lch);
  830. }
  831. if (dma_omap1())
  832. offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
  833. return offset;
  834. }
  835. EXPORT_SYMBOL(omap_get_dma_src_pos);
  836. /*
  837. * Returns current physical destination address for the given DMA channel.
  838. * If the channel is running the caller must disable interrupts prior calling
  839. * this function and process the returned value before re-enabling interrupt to
  840. * prevent races with the interrupt handler. Note that in continuous mode there
  841. * is a chance for CDSA_L register overflow between the two reads resulting
  842. * in incorrect return value.
  843. */
  844. dma_addr_t omap_get_dma_dst_pos(int lch)
  845. {
  846. dma_addr_t offset = 0;
  847. if (dma_omap15xx())
  848. offset = p->dma_read(CPC, lch);
  849. else
  850. offset = p->dma_read(CDAC, lch);
  851. /*
  852. * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
  853. * read before the DMA controller finished disabling the channel.
  854. */
  855. if (!dma_omap15xx() && offset == 0) {
  856. offset = p->dma_read(CDAC, lch);
  857. /*
  858. * CDAC == 0 indicates that the DMA transfer on the channel has
  859. * not been started (no data has been transferred so far).
  860. * Return the programmed destination start address in this case.
  861. */
  862. if (unlikely(!offset))
  863. offset = p->dma_read(CDSA, lch);
  864. }
  865. if (dma_omap1())
  866. offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
  867. return offset;
  868. }
  869. EXPORT_SYMBOL(omap_get_dma_dst_pos);
  870. int omap_get_dma_active_status(int lch)
  871. {
  872. return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
  873. }
  874. EXPORT_SYMBOL(omap_get_dma_active_status);
  875. int omap_dma_running(void)
  876. {
  877. int lch;
  878. if (dma_omap1())
  879. if (omap_lcd_dma_running())
  880. return 1;
  881. for (lch = 0; lch < dma_chan_count; lch++)
  882. if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
  883. return 1;
  884. return 0;
  885. }
  886. /*
  887. * lch_queue DMA will start right after lch_head one is finished.
  888. * For this DMA link to start, you still need to start (see omap_start_dma)
  889. * the first one. That will fire up the entire queue.
  890. */
  891. void omap_dma_link_lch(int lch_head, int lch_queue)
  892. {
  893. if (omap_dma_in_1510_mode()) {
  894. if (lch_head == lch_queue) {
  895. p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
  896. CCR, lch_head);
  897. return;
  898. }
  899. printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
  900. BUG();
  901. return;
  902. }
  903. if ((dma_chan[lch_head].dev_id == -1) ||
  904. (dma_chan[lch_queue].dev_id == -1)) {
  905. pr_err("omap_dma: trying to link non requested channels\n");
  906. dump_stack();
  907. }
  908. dma_chan[lch_head].next_lch = lch_queue;
  909. }
  910. EXPORT_SYMBOL(omap_dma_link_lch);
  911. /*----------------------------------------------------------------------------*/
  912. #ifdef CONFIG_ARCH_OMAP1
  913. static int omap1_dma_handle_ch(int ch)
  914. {
  915. u32 csr;
  916. if (enable_1510_mode && ch >= 6) {
  917. csr = dma_chan[ch].saved_csr;
  918. dma_chan[ch].saved_csr = 0;
  919. } else
  920. csr = p->dma_read(CSR, ch);
  921. if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
  922. dma_chan[ch + 6].saved_csr = csr >> 7;
  923. csr &= 0x7f;
  924. }
  925. if ((csr & 0x3f) == 0)
  926. return 0;
  927. if (unlikely(dma_chan[ch].dev_id == -1)) {
  928. pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
  929. ch, csr);
  930. return 0;
  931. }
  932. if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
  933. pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
  934. if (unlikely(csr & OMAP_DMA_DROP_IRQ))
  935. pr_warn("DMA synchronization event drop occurred with device %d\n",
  936. dma_chan[ch].dev_id);
  937. if (likely(csr & OMAP_DMA_BLOCK_IRQ))
  938. dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
  939. if (likely(dma_chan[ch].callback != NULL))
  940. dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
  941. return 1;
  942. }
  943. static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
  944. {
  945. int ch = ((int) dev_id) - 1;
  946. int handled = 0;
  947. for (;;) {
  948. int handled_now = 0;
  949. handled_now += omap1_dma_handle_ch(ch);
  950. if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
  951. handled_now += omap1_dma_handle_ch(ch + 6);
  952. if (!handled_now)
  953. break;
  954. handled += handled_now;
  955. }
  956. return handled ? IRQ_HANDLED : IRQ_NONE;
  957. }
  958. #else
  959. #define omap1_dma_irq_handler NULL
  960. #endif
  961. #ifdef CONFIG_ARCH_OMAP2PLUS
  962. static int omap2_dma_handle_ch(int ch)
  963. {
  964. u32 status = p->dma_read(CSR, ch);
  965. if (!status) {
  966. if (printk_ratelimit())
  967. pr_warn("Spurious DMA IRQ for lch %d\n", ch);
  968. p->dma_write(1 << ch, IRQSTATUS_L0, ch);
  969. return 0;
  970. }
  971. if (unlikely(dma_chan[ch].dev_id == -1)) {
  972. if (printk_ratelimit())
  973. pr_warn("IRQ %04x for non-allocated DMA channel %d\n",
  974. status, ch);
  975. return 0;
  976. }
  977. if (unlikely(status & OMAP_DMA_DROP_IRQ))
  978. pr_info("DMA synchronization event drop occurred with device %d\n",
  979. dma_chan[ch].dev_id);
  980. if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
  981. printk(KERN_INFO "DMA transaction error with device %d\n",
  982. dma_chan[ch].dev_id);
  983. if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
  984. u32 ccr;
  985. ccr = p->dma_read(CCR, ch);
  986. ccr &= ~OMAP_DMA_CCR_EN;
  987. p->dma_write(ccr, CCR, ch);
  988. dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
  989. }
  990. }
  991. if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
  992. printk(KERN_INFO "DMA secure error with device %d\n",
  993. dma_chan[ch].dev_id);
  994. if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
  995. printk(KERN_INFO "DMA misaligned error with device %d\n",
  996. dma_chan[ch].dev_id);
  997. p->dma_write(status, CSR, ch);
  998. p->dma_write(1 << ch, IRQSTATUS_L0, ch);
  999. /* read back the register to flush the write */
  1000. p->dma_read(IRQSTATUS_L0, ch);
  1001. /* If the ch is not chained then chain_id will be -1 */
  1002. if (dma_chan[ch].chain_id != -1) {
  1003. int chain_id = dma_chan[ch].chain_id;
  1004. dma_chan[ch].state = DMA_CH_NOTSTARTED;
  1005. if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
  1006. dma_chan[dma_chan[ch].next_linked_ch].state =
  1007. DMA_CH_STARTED;
  1008. if (dma_linked_lch[chain_id].chain_mode ==
  1009. OMAP_DMA_DYNAMIC_CHAIN)
  1010. disable_lnk(ch);
  1011. if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
  1012. OMAP_DMA_CHAIN_INCQHEAD(chain_id);
  1013. status = p->dma_read(CSR, ch);
  1014. p->dma_write(status, CSR, ch);
  1015. }
  1016. if (likely(dma_chan[ch].callback != NULL))
  1017. dma_chan[ch].callback(ch, status, dma_chan[ch].data);
  1018. return 0;
  1019. }
  1020. /* STATUS register count is from 1-32 while our is 0-31 */
  1021. static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
  1022. {
  1023. u32 val, enable_reg;
  1024. int i;
  1025. val = p->dma_read(IRQSTATUS_L0, 0);
  1026. if (val == 0) {
  1027. if (printk_ratelimit())
  1028. printk(KERN_WARNING "Spurious DMA IRQ\n");
  1029. return IRQ_HANDLED;
  1030. }
  1031. enable_reg = p->dma_read(IRQENABLE_L0, 0);
  1032. val &= enable_reg; /* Dispatch only relevant interrupts */
  1033. for (i = 0; i < dma_lch_count && val != 0; i++) {
  1034. if (val & 1)
  1035. omap2_dma_handle_ch(i);
  1036. val >>= 1;
  1037. }
  1038. return IRQ_HANDLED;
  1039. }
  1040. static struct irqaction omap24xx_dma_irq = {
  1041. .name = "DMA",
  1042. .handler = omap2_dma_irq_handler,
  1043. };
  1044. #else
  1045. static struct irqaction omap24xx_dma_irq;
  1046. #endif
  1047. /*----------------------------------------------------------------------------*/
  1048. /*
  1049. * Note that we are currently using only IRQENABLE_L0 and L1.
  1050. * As the DSP may be using IRQENABLE_L2 and L3, let's not
  1051. * touch those for now.
  1052. */
  1053. void omap_dma_global_context_save(void)
  1054. {
  1055. omap_dma_global_context.dma_irqenable_l0 =
  1056. p->dma_read(IRQENABLE_L0, 0);
  1057. omap_dma_global_context.dma_irqenable_l1 =
  1058. p->dma_read(IRQENABLE_L1, 0);
  1059. omap_dma_global_context.dma_ocp_sysconfig =
  1060. p->dma_read(OCP_SYSCONFIG, 0);
  1061. omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
  1062. }
  1063. void omap_dma_global_context_restore(void)
  1064. {
  1065. int ch;
  1066. p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
  1067. p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
  1068. OCP_SYSCONFIG, 0);
  1069. p->dma_write(omap_dma_global_context.dma_irqenable_l0,
  1070. IRQENABLE_L0, 0);
  1071. p->dma_write(omap_dma_global_context.dma_irqenable_l1,
  1072. IRQENABLE_L1, 0);
  1073. if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
  1074. p->dma_write(0x3 , IRQSTATUS_L0, 0);
  1075. for (ch = 0; ch < dma_chan_count; ch++)
  1076. if (dma_chan[ch].dev_id != -1)
  1077. omap_clear_dma(ch);
  1078. }
  1079. struct omap_system_dma_plat_info *omap_get_plat_info(void)
  1080. {
  1081. return p;
  1082. }
  1083. EXPORT_SYMBOL_GPL(omap_get_plat_info);
  1084. static int omap_system_dma_probe(struct platform_device *pdev)
  1085. {
  1086. int ch, ret = 0;
  1087. int dma_irq;
  1088. char irq_name[4];
  1089. int irq_rel;
  1090. p = pdev->dev.platform_data;
  1091. if (!p) {
  1092. dev_err(&pdev->dev,
  1093. "%s: System DMA initialized without platform data\n",
  1094. __func__);
  1095. return -EINVAL;
  1096. }
  1097. d = p->dma_attr;
  1098. errata = p->errata;
  1099. if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
  1100. && (omap_dma_reserve_channels < d->lch_count))
  1101. d->lch_count = omap_dma_reserve_channels;
  1102. dma_lch_count = d->lch_count;
  1103. dma_chan_count = dma_lch_count;
  1104. enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
  1105. dma_chan = devm_kcalloc(&pdev->dev, dma_lch_count,
  1106. sizeof(struct omap_dma_lch), GFP_KERNEL);
  1107. if (!dma_chan) {
  1108. dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
  1109. return -ENOMEM;
  1110. }
  1111. if (dma_omap2plus()) {
  1112. dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
  1113. dma_lch_count, GFP_KERNEL);
  1114. if (!dma_linked_lch) {
  1115. ret = -ENOMEM;
  1116. goto exit_dma_lch_fail;
  1117. }
  1118. }
  1119. spin_lock_init(&dma_chan_lock);
  1120. for (ch = 0; ch < dma_chan_count; ch++) {
  1121. omap_clear_dma(ch);
  1122. if (dma_omap2plus())
  1123. omap2_disable_irq_lch(ch);
  1124. dma_chan[ch].dev_id = -1;
  1125. dma_chan[ch].next_lch = -1;
  1126. if (ch >= 6 && enable_1510_mode)
  1127. continue;
  1128. if (dma_omap1()) {
  1129. /*
  1130. * request_irq() doesn't like dev_id (ie. ch) being
  1131. * zero, so we have to kludge around this.
  1132. */
  1133. sprintf(&irq_name[0], "%d", ch);
  1134. dma_irq = platform_get_irq_byname(pdev, irq_name);
  1135. if (dma_irq < 0) {
  1136. ret = dma_irq;
  1137. goto exit_dma_irq_fail;
  1138. }
  1139. /* INT_DMA_LCD is handled in lcd_dma.c */
  1140. if (dma_irq == INT_DMA_LCD)
  1141. continue;
  1142. ret = request_irq(dma_irq,
  1143. omap1_dma_irq_handler, 0, "DMA",
  1144. (void *) (ch + 1));
  1145. if (ret != 0)
  1146. goto exit_dma_irq_fail;
  1147. }
  1148. }
  1149. if (d->dev_caps & IS_RW_PRIORITY)
  1150. omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
  1151. DMA_DEFAULT_FIFO_DEPTH, 0);
  1152. if (dma_omap2plus() && !(d->dev_caps & DMA_ENGINE_HANDLE_IRQ)) {
  1153. strcpy(irq_name, "0");
  1154. dma_irq = platform_get_irq_byname(pdev, irq_name);
  1155. if (dma_irq < 0) {
  1156. dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
  1157. ret = dma_irq;
  1158. goto exit_dma_lch_fail;
  1159. }
  1160. ret = setup_irq(dma_irq, &omap24xx_dma_irq);
  1161. if (ret) {
  1162. dev_err(&pdev->dev, "set_up failed for IRQ %d for DMA (error %d)\n",
  1163. dma_irq, ret);
  1164. goto exit_dma_lch_fail;
  1165. }
  1166. }
  1167. /* reserve dma channels 0 and 1 in high security devices on 34xx */
  1168. if (d->dev_caps & HS_CHANNELS_RESERVED) {
  1169. pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
  1170. dma_chan[0].dev_id = 0;
  1171. dma_chan[1].dev_id = 1;
  1172. }
  1173. p->show_dma_caps();
  1174. return 0;
  1175. exit_dma_irq_fail:
  1176. dev_err(&pdev->dev, "unable to request IRQ %d for DMA (error %d)\n",
  1177. dma_irq, ret);
  1178. for (irq_rel = 0; irq_rel < ch; irq_rel++) {
  1179. dma_irq = platform_get_irq(pdev, irq_rel);
  1180. free_irq(dma_irq, (void *)(irq_rel + 1));
  1181. }
  1182. exit_dma_lch_fail:
  1183. return ret;
  1184. }
  1185. static int omap_system_dma_remove(struct platform_device *pdev)
  1186. {
  1187. int dma_irq;
  1188. if (dma_omap2plus()) {
  1189. char irq_name[4];
  1190. strcpy(irq_name, "0");
  1191. dma_irq = platform_get_irq_byname(pdev, irq_name);
  1192. if (dma_irq >= 0)
  1193. remove_irq(dma_irq, &omap24xx_dma_irq);
  1194. } else {
  1195. int irq_rel = 0;
  1196. for ( ; irq_rel < dma_chan_count; irq_rel++) {
  1197. dma_irq = platform_get_irq(pdev, irq_rel);
  1198. free_irq(dma_irq, (void *)(irq_rel + 1));
  1199. }
  1200. }
  1201. return 0;
  1202. }
  1203. static struct platform_driver omap_system_dma_driver = {
  1204. .probe = omap_system_dma_probe,
  1205. .remove = omap_system_dma_remove,
  1206. .driver = {
  1207. .name = "omap_dma_system"
  1208. },
  1209. };
  1210. static int __init omap_system_dma_init(void)
  1211. {
  1212. return platform_driver_register(&omap_system_dma_driver);
  1213. }
  1214. arch_initcall(omap_system_dma_init);
  1215. static void __exit omap_system_dma_exit(void)
  1216. {
  1217. platform_driver_unregister(&omap_system_dma_driver);
  1218. }
  1219. MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
  1220. MODULE_LICENSE("GPL");
  1221. MODULE_ALIAS("platform:" DRIVER_NAME);
  1222. MODULE_AUTHOR("Texas Instruments Inc");
  1223. /*
  1224. * Reserve the omap SDMA channels using cmdline bootarg
  1225. * "omap_dma_reserve_ch=". The valid range is 1 to 32
  1226. */
  1227. static int __init omap_dma_cmdline_reserve_ch(char *str)
  1228. {
  1229. if (get_option(&str, &omap_dma_reserve_channels) != 1)
  1230. omap_dma_reserve_channels = 0;
  1231. return 1;
  1232. }
  1233. __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);