dma.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /* Wrapper for DMA channel allocator that starts clocks etc */
  2. #include <linux/kernel.h>
  3. #include <linux/spinlock.h>
  4. #include <mach/dma.h>
  5. #include <hwregs/reg_map.h>
  6. #include <hwregs/reg_rdwr.h>
  7. #include <hwregs/marb_defs.h>
  8. #include <hwregs/clkgen_defs.h>
  9. #include <hwregs/strmux_defs.h>
  10. #include <linux/errno.h>
  11. #include <arbiter.h>
  12. static char used_dma_channels[MAX_DMA_CHANNELS];
  13. static const char *used_dma_channels_users[MAX_DMA_CHANNELS];
  14. static DEFINE_SPINLOCK(dma_lock);
  15. int crisv32_request_dma(unsigned int dmanr, const char *device_id,
  16. unsigned options, unsigned int bandwidth, enum dma_owner owner)
  17. {
  18. unsigned long flags;
  19. reg_clkgen_rw_clk_ctrl clk_ctrl;
  20. reg_strmux_rw_cfg strmux_cfg;
  21. if (crisv32_arbiter_allocate_bandwidth(dmanr,
  22. options & DMA_INT_MEM ? INT_REGION : EXT_REGION,
  23. bandwidth))
  24. return -ENOMEM;
  25. spin_lock_irqsave(&dma_lock, flags);
  26. if (used_dma_channels[dmanr]) {
  27. spin_unlock_irqrestore(&dma_lock, flags);
  28. if (options & DMA_VERBOSE_ON_ERROR)
  29. printk(KERN_ERR "Failed to request DMA %i for %s, "
  30. "already allocated by %s\n",
  31. dmanr,
  32. device_id,
  33. used_dma_channels_users[dmanr]);
  34. if (options & DMA_PANIC_ON_ERROR)
  35. panic("request_dma error!");
  36. spin_unlock_irqrestore(&dma_lock, flags);
  37. return -EBUSY;
  38. }
  39. clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
  40. strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
  41. switch (dmanr) {
  42. case 0:
  43. case 1:
  44. clk_ctrl.dma0_1_eth = 1;
  45. break;
  46. case 2:
  47. case 3:
  48. clk_ctrl.dma2_3_strcop = 1;
  49. break;
  50. case 4:
  51. case 5:
  52. clk_ctrl.dma4_5_iop = 1;
  53. break;
  54. case 6:
  55. case 7:
  56. clk_ctrl.sser_ser_dma6_7 = 1;
  57. break;
  58. case 9:
  59. case 11:
  60. clk_ctrl.dma9_11 = 1;
  61. break;
  62. #if MAX_DMA_CHANNELS-1 != 11
  63. #error Check dma.c
  64. #endif
  65. default:
  66. spin_unlock_irqrestore(&dma_lock, flags);
  67. if (options & DMA_VERBOSE_ON_ERROR)
  68. printk(KERN_ERR "Failed to request DMA %i for %s, "
  69. "only 0-%i valid)\n",
  70. dmanr, device_id, MAX_DMA_CHANNELS-1);
  71. if (options & DMA_PANIC_ON_ERROR)
  72. panic("request_dma error!");
  73. return -EINVAL;
  74. }
  75. switch (owner) {
  76. case dma_eth:
  77. if (dmanr == 0)
  78. strmux_cfg.dma0 = regk_strmux_eth;
  79. else if (dmanr == 1)
  80. strmux_cfg.dma1 = regk_strmux_eth;
  81. else
  82. panic("Invalid DMA channel for eth\n");
  83. break;
  84. case dma_ser0:
  85. if (dmanr == 0)
  86. strmux_cfg.dma0 = regk_strmux_ser0;
  87. else if (dmanr == 1)
  88. strmux_cfg.dma1 = regk_strmux_ser0;
  89. else
  90. panic("Invalid DMA channel for ser0\n");
  91. break;
  92. case dma_ser3:
  93. if (dmanr == 2)
  94. strmux_cfg.dma2 = regk_strmux_ser3;
  95. else if (dmanr == 3)
  96. strmux_cfg.dma3 = regk_strmux_ser3;
  97. else
  98. panic("Invalid DMA channel for ser3\n");
  99. break;
  100. case dma_strp:
  101. if (dmanr == 2)
  102. strmux_cfg.dma2 = regk_strmux_strcop;
  103. else if (dmanr == 3)
  104. strmux_cfg.dma3 = regk_strmux_strcop;
  105. else
  106. panic("Invalid DMA channel for strp\n");
  107. break;
  108. case dma_ser1:
  109. if (dmanr == 4)
  110. strmux_cfg.dma4 = regk_strmux_ser1;
  111. else if (dmanr == 5)
  112. strmux_cfg.dma5 = regk_strmux_ser1;
  113. else
  114. panic("Invalid DMA channel for ser1\n");
  115. break;
  116. case dma_iop:
  117. if (dmanr == 4)
  118. strmux_cfg.dma4 = regk_strmux_iop;
  119. else if (dmanr == 5)
  120. strmux_cfg.dma5 = regk_strmux_iop;
  121. else
  122. panic("Invalid DMA channel for iop\n");
  123. break;
  124. case dma_ser2:
  125. if (dmanr == 6)
  126. strmux_cfg.dma6 = regk_strmux_ser2;
  127. else if (dmanr == 7)
  128. strmux_cfg.dma7 = regk_strmux_ser2;
  129. else
  130. panic("Invalid DMA channel for ser2\n");
  131. break;
  132. case dma_sser:
  133. if (dmanr == 6)
  134. strmux_cfg.dma6 = regk_strmux_sser;
  135. else if (dmanr == 7)
  136. strmux_cfg.dma7 = regk_strmux_sser;
  137. else
  138. panic("Invalid DMA channel for sser\n");
  139. break;
  140. case dma_ser4:
  141. if (dmanr == 9)
  142. strmux_cfg.dma9 = regk_strmux_ser4;
  143. else
  144. panic("Invalid DMA channel for ser4\n");
  145. break;
  146. case dma_jpeg:
  147. if (dmanr == 9)
  148. strmux_cfg.dma9 = regk_strmux_jpeg;
  149. else
  150. panic("Invalid DMA channel for JPEG\n");
  151. break;
  152. case dma_h264:
  153. if (dmanr == 11)
  154. strmux_cfg.dma11 = regk_strmux_h264;
  155. else
  156. panic("Invalid DMA channel for H264\n");
  157. break;
  158. }
  159. used_dma_channels[dmanr] = 1;
  160. used_dma_channels_users[dmanr] = device_id;
  161. REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
  162. REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
  163. spin_unlock_irqrestore(&dma_lock, flags);
  164. return 0;
  165. }
  166. void crisv32_free_dma(unsigned int dmanr)
  167. {
  168. spin_lock(&dma_lock);
  169. used_dma_channels[dmanr] = 0;
  170. spin_unlock(&dma_lock);
  171. }