dma.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * arch/arm/mach-tegra/dma.c
  3. *
  4. * System DMA driver for NVIDIA Tegra SoCs
  5. *
  6. * Copyright (c) 2008-2009, NVIDIA Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  21. */
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/module.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/err.h>
  27. #include <linux/irq.h>
  28. #include <linux/delay.h>
  29. #include <linux/clk.h>
  30. #include <mach/dma.h>
  31. #include <mach/irqs.h>
  32. #include <mach/iomap.h>
  33. #include <mach/suspend.h>
  34. #include "apbio.h"
  35. #define APB_DMA_GEN 0x000
  36. #define GEN_ENABLE (1<<31)
  37. #define APB_DMA_CNTRL 0x010
  38. #define APB_DMA_IRQ_MASK 0x01c
  39. #define APB_DMA_IRQ_MASK_SET 0x020
  40. #define APB_DMA_CHAN_CSR 0x000
  41. #define CSR_ENB (1<<31)
  42. #define CSR_IE_EOC (1<<30)
  43. #define CSR_HOLD (1<<29)
  44. #define CSR_DIR (1<<28)
  45. #define CSR_ONCE (1<<27)
  46. #define CSR_FLOW (1<<21)
  47. #define CSR_REQ_SEL_SHIFT 16
  48. #define CSR_WCOUNT_SHIFT 2
  49. #define CSR_WCOUNT_MASK 0xFFFC
  50. #define APB_DMA_CHAN_STA 0x004
  51. #define STA_BUSY (1<<31)
  52. #define STA_ISE_EOC (1<<30)
  53. #define STA_HALT (1<<29)
  54. #define STA_PING_PONG (1<<28)
  55. #define STA_COUNT_SHIFT 2
  56. #define STA_COUNT_MASK 0xFFFC
  57. #define APB_DMA_CHAN_AHB_PTR 0x010
  58. #define APB_DMA_CHAN_AHB_SEQ 0x014
  59. #define AHB_SEQ_INTR_ENB (1<<31)
  60. #define AHB_SEQ_BUS_WIDTH_SHIFT 28
  61. #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
  62. #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
  63. #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
  64. #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
  65. #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
  66. #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
  67. #define AHB_SEQ_DATA_SWAP (1<<27)
  68. #define AHB_SEQ_BURST_MASK (0x7<<24)
  69. #define AHB_SEQ_BURST_1 (4<<24)
  70. #define AHB_SEQ_BURST_4 (5<<24)
  71. #define AHB_SEQ_BURST_8 (6<<24)
  72. #define AHB_SEQ_DBL_BUF (1<<19)
  73. #define AHB_SEQ_WRAP_SHIFT 16
  74. #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
  75. #define APB_DMA_CHAN_APB_PTR 0x018
  76. #define APB_DMA_CHAN_APB_SEQ 0x01c
  77. #define APB_SEQ_BUS_WIDTH_SHIFT 28
  78. #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
  79. #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
  80. #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
  81. #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
  82. #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
  83. #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
  84. #define APB_SEQ_DATA_SWAP (1<<27)
  85. #define APB_SEQ_WRAP_SHIFT 16
  86. #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
  87. #define TEGRA_SYSTEM_DMA_CH_NR 16
  88. #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
  89. #define TEGRA_SYSTEM_DMA_CH_MIN 0
  90. #define TEGRA_SYSTEM_DMA_CH_MAX \
  91. (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
  92. #define NV_DMA_MAX_TRASFER_SIZE 0x10000
  93. static const unsigned int ahb_addr_wrap_table[8] = {
  94. 0, 32, 64, 128, 256, 512, 1024, 2048
  95. };
  96. static const unsigned int apb_addr_wrap_table[8] = {
  97. 0, 1, 2, 4, 8, 16, 32, 64
  98. };
  99. static const unsigned int bus_width_table[5] = {
  100. 8, 16, 32, 64, 128
  101. };
  102. #define TEGRA_DMA_NAME_SIZE 16
  103. struct tegra_dma_channel {
  104. struct list_head list;
  105. int id;
  106. spinlock_t lock;
  107. char name[TEGRA_DMA_NAME_SIZE];
  108. void __iomem *addr;
  109. int mode;
  110. int irq;
  111. int req_transfer_count;
  112. };
  113. #define NV_DMA_MAX_CHANNELS 32
  114. static bool tegra_dma_initialized;
  115. static DEFINE_MUTEX(tegra_dma_lock);
  116. static DEFINE_SPINLOCK(enable_lock);
  117. static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
  118. static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
  119. static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
  120. struct tegra_dma_req *req);
  121. static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
  122. struct tegra_dma_req *req);
  123. static void tegra_dma_stop(struct tegra_dma_channel *ch);
  124. void tegra_dma_flush(struct tegra_dma_channel *ch)
  125. {
  126. }
  127. EXPORT_SYMBOL(tegra_dma_flush);
  128. void tegra_dma_dequeue(struct tegra_dma_channel *ch)
  129. {
  130. struct tegra_dma_req *req;
  131. if (tegra_dma_is_empty(ch))
  132. return;
  133. req = list_entry(ch->list.next, typeof(*req), node);
  134. tegra_dma_dequeue_req(ch, req);
  135. return;
  136. }
  137. static void tegra_dma_stop(struct tegra_dma_channel *ch)
  138. {
  139. u32 csr;
  140. u32 status;
  141. csr = readl(ch->addr + APB_DMA_CHAN_CSR);
  142. csr &= ~CSR_IE_EOC;
  143. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  144. csr &= ~CSR_ENB;
  145. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  146. status = readl(ch->addr + APB_DMA_CHAN_STA);
  147. if (status & STA_ISE_EOC)
  148. writel(status, ch->addr + APB_DMA_CHAN_STA);
  149. }
  150. static int tegra_dma_cancel(struct tegra_dma_channel *ch)
  151. {
  152. unsigned long irq_flags;
  153. spin_lock_irqsave(&ch->lock, irq_flags);
  154. while (!list_empty(&ch->list))
  155. list_del(ch->list.next);
  156. tegra_dma_stop(ch);
  157. spin_unlock_irqrestore(&ch->lock, irq_flags);
  158. return 0;
  159. }
  160. static unsigned int get_channel_status(struct tegra_dma_channel *ch,
  161. struct tegra_dma_req *req, bool is_stop_dma)
  162. {
  163. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  164. unsigned int status;
  165. if (is_stop_dma) {
  166. /*
  167. * STOP the DMA and get the transfer count.
  168. * Getting the transfer count is tricky.
  169. * - Globally disable DMA on all channels
  170. * - Read the channel's status register to know the number
  171. * of pending bytes to be transfered.
  172. * - Stop the dma channel
  173. * - Globally re-enable DMA to resume other transfers
  174. */
  175. spin_lock(&enable_lock);
  176. writel(0, addr + APB_DMA_GEN);
  177. udelay(20);
  178. status = readl(ch->addr + APB_DMA_CHAN_STA);
  179. tegra_dma_stop(ch);
  180. writel(GEN_ENABLE, addr + APB_DMA_GEN);
  181. spin_unlock(&enable_lock);
  182. if (status & STA_ISE_EOC) {
  183. pr_err("Got Dma Int here clearing");
  184. writel(status, ch->addr + APB_DMA_CHAN_STA);
  185. }
  186. req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
  187. } else {
  188. status = readl(ch->addr + APB_DMA_CHAN_STA);
  189. }
  190. return status;
  191. }
  192. /* should be called with the channel lock held */
  193. static unsigned int dma_active_count(struct tegra_dma_channel *ch,
  194. struct tegra_dma_req *req, unsigned int status)
  195. {
  196. unsigned int to_transfer;
  197. unsigned int req_transfer_count;
  198. unsigned int bytes_transferred;
  199. to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1;
  200. req_transfer_count = ch->req_transfer_count + 1;
  201. bytes_transferred = req_transfer_count;
  202. if (status & STA_BUSY)
  203. bytes_transferred -= to_transfer;
  204. /*
  205. * In continuous transfer mode, DMA only tracks the count of the
  206. * half DMA buffer. So, if the DMA already finished half the DMA
  207. * then add the half buffer to the completed count.
  208. */
  209. if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) {
  210. if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
  211. bytes_transferred += req_transfer_count;
  212. if (status & STA_ISE_EOC)
  213. bytes_transferred += req_transfer_count;
  214. }
  215. bytes_transferred *= 4;
  216. return bytes_transferred;
  217. }
  218. int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
  219. struct tegra_dma_req *_req)
  220. {
  221. unsigned int status;
  222. struct tegra_dma_req *req = NULL;
  223. int found = 0;
  224. unsigned long irq_flags;
  225. int stop = 0;
  226. spin_lock_irqsave(&ch->lock, irq_flags);
  227. if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
  228. stop = 1;
  229. list_for_each_entry(req, &ch->list, node) {
  230. if (req == _req) {
  231. list_del(&req->node);
  232. found = 1;
  233. break;
  234. }
  235. }
  236. if (!found) {
  237. spin_unlock_irqrestore(&ch->lock, irq_flags);
  238. return 0;
  239. }
  240. if (!stop)
  241. goto skip_stop_dma;
  242. status = get_channel_status(ch, req, true);
  243. req->bytes_transferred = dma_active_count(ch, req, status);
  244. if (!list_empty(&ch->list)) {
  245. /* if the list is not empty, queue the next request */
  246. struct tegra_dma_req *next_req;
  247. next_req = list_entry(ch->list.next,
  248. typeof(*next_req), node);
  249. tegra_dma_update_hw(ch, next_req);
  250. }
  251. skip_stop_dma:
  252. req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
  253. spin_unlock_irqrestore(&ch->lock, irq_flags);
  254. /* Callback should be called without any lock */
  255. req->complete(req);
  256. return 0;
  257. }
  258. EXPORT_SYMBOL(tegra_dma_dequeue_req);
  259. bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
  260. {
  261. unsigned long irq_flags;
  262. bool is_empty;
  263. spin_lock_irqsave(&ch->lock, irq_flags);
  264. if (list_empty(&ch->list))
  265. is_empty = true;
  266. else
  267. is_empty = false;
  268. spin_unlock_irqrestore(&ch->lock, irq_flags);
  269. return is_empty;
  270. }
  271. EXPORT_SYMBOL(tegra_dma_is_empty);
  272. bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
  273. struct tegra_dma_req *_req)
  274. {
  275. unsigned long irq_flags;
  276. struct tegra_dma_req *req;
  277. spin_lock_irqsave(&ch->lock, irq_flags);
  278. list_for_each_entry(req, &ch->list, node) {
  279. if (req == _req) {
  280. spin_unlock_irqrestore(&ch->lock, irq_flags);
  281. return true;
  282. }
  283. }
  284. spin_unlock_irqrestore(&ch->lock, irq_flags);
  285. return false;
  286. }
  287. EXPORT_SYMBOL(tegra_dma_is_req_inflight);
  288. int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
  289. struct tegra_dma_req *req)
  290. {
  291. unsigned long irq_flags;
  292. struct tegra_dma_req *_req;
  293. int start_dma = 0;
  294. if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
  295. req->source_addr & 0x3 || req->dest_addr & 0x3) {
  296. pr_err("Invalid DMA request for channel %d\n", ch->id);
  297. return -EINVAL;
  298. }
  299. spin_lock_irqsave(&ch->lock, irq_flags);
  300. list_for_each_entry(_req, &ch->list, node) {
  301. if (req == _req) {
  302. spin_unlock_irqrestore(&ch->lock, irq_flags);
  303. return -EEXIST;
  304. }
  305. }
  306. req->bytes_transferred = 0;
  307. req->status = 0;
  308. req->buffer_status = 0;
  309. if (list_empty(&ch->list))
  310. start_dma = 1;
  311. list_add_tail(&req->node, &ch->list);
  312. if (start_dma)
  313. tegra_dma_update_hw(ch, req);
  314. spin_unlock_irqrestore(&ch->lock, irq_flags);
  315. return 0;
  316. }
  317. EXPORT_SYMBOL(tegra_dma_enqueue_req);
  318. struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
  319. {
  320. int channel;
  321. struct tegra_dma_channel *ch = NULL;
  322. if (!tegra_dma_initialized)
  323. return NULL;
  324. mutex_lock(&tegra_dma_lock);
  325. /* first channel is the shared channel */
  326. if (mode & TEGRA_DMA_SHARED) {
  327. channel = TEGRA_SYSTEM_DMA_CH_MIN;
  328. } else {
  329. channel = find_first_zero_bit(channel_usage,
  330. ARRAY_SIZE(dma_channels));
  331. if (channel >= ARRAY_SIZE(dma_channels))
  332. goto out;
  333. }
  334. __set_bit(channel, channel_usage);
  335. ch = &dma_channels[channel];
  336. ch->mode = mode;
  337. out:
  338. mutex_unlock(&tegra_dma_lock);
  339. return ch;
  340. }
  341. EXPORT_SYMBOL(tegra_dma_allocate_channel);
  342. void tegra_dma_free_channel(struct tegra_dma_channel *ch)
  343. {
  344. if (ch->mode & TEGRA_DMA_SHARED)
  345. return;
  346. tegra_dma_cancel(ch);
  347. mutex_lock(&tegra_dma_lock);
  348. __clear_bit(ch->id, channel_usage);
  349. mutex_unlock(&tegra_dma_lock);
  350. }
  351. EXPORT_SYMBOL(tegra_dma_free_channel);
  352. static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
  353. struct tegra_dma_req *req)
  354. {
  355. u32 apb_ptr;
  356. u32 ahb_ptr;
  357. if (req->to_memory) {
  358. apb_ptr = req->source_addr;
  359. ahb_ptr = req->dest_addr;
  360. } else {
  361. apb_ptr = req->dest_addr;
  362. ahb_ptr = req->source_addr;
  363. }
  364. writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
  365. writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
  366. req->status = TEGRA_DMA_REQ_INFLIGHT;
  367. return;
  368. }
  369. static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
  370. struct tegra_dma_req *req)
  371. {
  372. int ahb_addr_wrap;
  373. int apb_addr_wrap;
  374. int ahb_bus_width;
  375. int apb_bus_width;
  376. int index;
  377. u32 ahb_seq;
  378. u32 apb_seq;
  379. u32 ahb_ptr;
  380. u32 apb_ptr;
  381. u32 csr;
  382. csr = CSR_IE_EOC | CSR_FLOW;
  383. ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
  384. apb_seq = 0;
  385. csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
  386. /* One shot mode is always single buffered,
  387. * continuous mode is always double buffered
  388. * */
  389. if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
  390. csr |= CSR_ONCE;
  391. ch->req_transfer_count = (req->size >> 2) - 1;
  392. } else {
  393. ahb_seq |= AHB_SEQ_DBL_BUF;
  394. /* In double buffered mode, we set the size to half the
  395. * requested size and interrupt when half the buffer
  396. * is full */
  397. ch->req_transfer_count = (req->size >> 3) - 1;
  398. }
  399. csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
  400. if (req->to_memory) {
  401. apb_ptr = req->source_addr;
  402. ahb_ptr = req->dest_addr;
  403. apb_addr_wrap = req->source_wrap;
  404. ahb_addr_wrap = req->dest_wrap;
  405. apb_bus_width = req->source_bus_width;
  406. ahb_bus_width = req->dest_bus_width;
  407. } else {
  408. csr |= CSR_DIR;
  409. apb_ptr = req->dest_addr;
  410. ahb_ptr = req->source_addr;
  411. apb_addr_wrap = req->dest_wrap;
  412. ahb_addr_wrap = req->source_wrap;
  413. apb_bus_width = req->dest_bus_width;
  414. ahb_bus_width = req->source_bus_width;
  415. }
  416. apb_addr_wrap >>= 2;
  417. ahb_addr_wrap >>= 2;
  418. /* set address wrap for APB size */
  419. index = 0;
  420. do {
  421. if (apb_addr_wrap_table[index] == apb_addr_wrap)
  422. break;
  423. index++;
  424. } while (index < ARRAY_SIZE(apb_addr_wrap_table));
  425. BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
  426. apb_seq |= index << APB_SEQ_WRAP_SHIFT;
  427. /* set address wrap for AHB size */
  428. index = 0;
  429. do {
  430. if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
  431. break;
  432. index++;
  433. } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
  434. BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
  435. ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
  436. for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
  437. if (bus_width_table[index] == ahb_bus_width)
  438. break;
  439. }
  440. BUG_ON(index == ARRAY_SIZE(bus_width_table));
  441. ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
  442. for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
  443. if (bus_width_table[index] == apb_bus_width)
  444. break;
  445. }
  446. BUG_ON(index == ARRAY_SIZE(bus_width_table));
  447. apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
  448. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  449. writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
  450. writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
  451. writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
  452. writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
  453. csr |= CSR_ENB;
  454. writel(csr, ch->addr + APB_DMA_CHAN_CSR);
  455. req->status = TEGRA_DMA_REQ_INFLIGHT;
  456. }
  457. static void handle_oneshot_dma(struct tegra_dma_channel *ch)
  458. {
  459. struct tegra_dma_req *req;
  460. unsigned long irq_flags;
  461. spin_lock_irqsave(&ch->lock, irq_flags);
  462. if (list_empty(&ch->list)) {
  463. spin_unlock_irqrestore(&ch->lock, irq_flags);
  464. return;
  465. }
  466. req = list_entry(ch->list.next, typeof(*req), node);
  467. if (req) {
  468. int bytes_transferred;
  469. bytes_transferred = ch->req_transfer_count;
  470. bytes_transferred += 1;
  471. bytes_transferred <<= 2;
  472. list_del(&req->node);
  473. req->bytes_transferred = bytes_transferred;
  474. req->status = TEGRA_DMA_REQ_SUCCESS;
  475. spin_unlock_irqrestore(&ch->lock, irq_flags);
  476. /* Callback should be called without any lock */
  477. pr_debug("%s: transferred %d bytes\n", __func__,
  478. req->bytes_transferred);
  479. req->complete(req);
  480. spin_lock_irqsave(&ch->lock, irq_flags);
  481. }
  482. if (!list_empty(&ch->list)) {
  483. req = list_entry(ch->list.next, typeof(*req), node);
  484. /* the complete function we just called may have enqueued
  485. another req, in which case dma has already started */
  486. if (req->status != TEGRA_DMA_REQ_INFLIGHT)
  487. tegra_dma_update_hw(ch, req);
  488. }
  489. spin_unlock_irqrestore(&ch->lock, irq_flags);
  490. }
  491. static void handle_continuous_dma(struct tegra_dma_channel *ch)
  492. {
  493. struct tegra_dma_req *req;
  494. unsigned long irq_flags;
  495. spin_lock_irqsave(&ch->lock, irq_flags);
  496. if (list_empty(&ch->list)) {
  497. spin_unlock_irqrestore(&ch->lock, irq_flags);
  498. return;
  499. }
  500. req = list_entry(ch->list.next, typeof(*req), node);
  501. if (req) {
  502. if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
  503. bool is_dma_ping_complete;
  504. is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
  505. & STA_PING_PONG) ? true : false;
  506. if (req->to_memory)
  507. is_dma_ping_complete = !is_dma_ping_complete;
  508. /* Out of sync - Release current buffer */
  509. if (!is_dma_ping_complete) {
  510. int bytes_transferred;
  511. bytes_transferred = ch->req_transfer_count;
  512. bytes_transferred += 1;
  513. bytes_transferred <<= 3;
  514. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
  515. req->bytes_transferred = bytes_transferred;
  516. req->status = TEGRA_DMA_REQ_SUCCESS;
  517. tegra_dma_stop(ch);
  518. if (!list_is_last(&req->node, &ch->list)) {
  519. struct tegra_dma_req *next_req;
  520. next_req = list_entry(req->node.next,
  521. typeof(*next_req), node);
  522. tegra_dma_update_hw(ch, next_req);
  523. }
  524. list_del(&req->node);
  525. /* DMA lock is NOT held when callbak is called */
  526. spin_unlock_irqrestore(&ch->lock, irq_flags);
  527. req->complete(req);
  528. return;
  529. }
  530. /* Load the next request into the hardware, if available
  531. * */
  532. if (!list_is_last(&req->node, &ch->list)) {
  533. struct tegra_dma_req *next_req;
  534. next_req = list_entry(req->node.next,
  535. typeof(*next_req), node);
  536. tegra_dma_update_hw_partial(ch, next_req);
  537. }
  538. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
  539. req->status = TEGRA_DMA_REQ_SUCCESS;
  540. /* DMA lock is NOT held when callback is called */
  541. spin_unlock_irqrestore(&ch->lock, irq_flags);
  542. if (likely(req->threshold))
  543. req->threshold(req);
  544. return;
  545. } else if (req->buffer_status ==
  546. TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
  547. /* Callback when the buffer is completely full (i.e on
  548. * the second interrupt */
  549. int bytes_transferred;
  550. bytes_transferred = ch->req_transfer_count;
  551. bytes_transferred += 1;
  552. bytes_transferred <<= 3;
  553. req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
  554. req->bytes_transferred = bytes_transferred;
  555. req->status = TEGRA_DMA_REQ_SUCCESS;
  556. list_del(&req->node);
  557. /* DMA lock is NOT held when callbak is called */
  558. spin_unlock_irqrestore(&ch->lock, irq_flags);
  559. req->complete(req);
  560. return;
  561. } else {
  562. BUG();
  563. }
  564. }
  565. spin_unlock_irqrestore(&ch->lock, irq_flags);
  566. }
  567. static irqreturn_t dma_isr(int irq, void *data)
  568. {
  569. struct tegra_dma_channel *ch = data;
  570. unsigned long status;
  571. status = readl(ch->addr + APB_DMA_CHAN_STA);
  572. if (status & STA_ISE_EOC)
  573. writel(status, ch->addr + APB_DMA_CHAN_STA);
  574. else {
  575. pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
  576. return IRQ_HANDLED;
  577. }
  578. return IRQ_WAKE_THREAD;
  579. }
  580. static irqreturn_t dma_thread_fn(int irq, void *data)
  581. {
  582. struct tegra_dma_channel *ch = data;
  583. if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
  584. handle_oneshot_dma(ch);
  585. else
  586. handle_continuous_dma(ch);
  587. return IRQ_HANDLED;
  588. }
  589. int __init tegra_dma_init(void)
  590. {
  591. int ret = 0;
  592. int i;
  593. unsigned int irq;
  594. void __iomem *addr;
  595. struct clk *c;
  596. bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
  597. c = clk_get_sys("tegra-dma", NULL);
  598. if (IS_ERR(c)) {
  599. pr_err("Unable to get clock for APB DMA\n");
  600. ret = PTR_ERR(c);
  601. goto fail;
  602. }
  603. ret = clk_enable(c);
  604. if (ret != 0) {
  605. pr_err("Unable to enable clock for APB DMA\n");
  606. goto fail;
  607. }
  608. addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  609. writel(GEN_ENABLE, addr + APB_DMA_GEN);
  610. writel(0, addr + APB_DMA_CNTRL);
  611. writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
  612. addr + APB_DMA_IRQ_MASK_SET);
  613. for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
  614. struct tegra_dma_channel *ch = &dma_channels[i];
  615. ch->id = i;
  616. snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
  617. ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  618. TEGRA_APB_DMA_CH0_SIZE * i);
  619. spin_lock_init(&ch->lock);
  620. INIT_LIST_HEAD(&ch->list);
  621. irq = INT_APB_DMA_CH0 + i;
  622. ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
  623. dma_channels[i].name, ch);
  624. if (ret) {
  625. pr_err("Failed to register IRQ %d for DMA %d\n",
  626. irq, i);
  627. goto fail;
  628. }
  629. ch->irq = irq;
  630. __clear_bit(i, channel_usage);
  631. }
  632. /* mark the shared channel allocated */
  633. __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
  634. tegra_dma_initialized = true;
  635. return 0;
  636. fail:
  637. writel(0, addr + APB_DMA_GEN);
  638. for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
  639. struct tegra_dma_channel *ch = &dma_channels[i];
  640. if (ch->irq)
  641. free_irq(ch->irq, ch);
  642. }
  643. return ret;
  644. }
  645. postcore_initcall(tegra_dma_init);
  646. #ifdef CONFIG_PM
  647. static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
  648. void tegra_dma_suspend(void)
  649. {
  650. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  651. u32 *ctx = apb_dma;
  652. int i;
  653. *ctx++ = readl(addr + APB_DMA_GEN);
  654. *ctx++ = readl(addr + APB_DMA_CNTRL);
  655. *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
  656. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
  657. addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  658. TEGRA_APB_DMA_CH0_SIZE * i);
  659. *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
  660. *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
  661. *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
  662. *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
  663. *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
  664. }
  665. }
  666. void tegra_dma_resume(void)
  667. {
  668. void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
  669. u32 *ctx = apb_dma;
  670. int i;
  671. writel(*ctx++, addr + APB_DMA_GEN);
  672. writel(*ctx++, addr + APB_DMA_CNTRL);
  673. writel(*ctx++, addr + APB_DMA_IRQ_MASK);
  674. for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
  675. addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
  676. TEGRA_APB_DMA_CH0_SIZE * i);
  677. writel(*ctx++, addr + APB_DMA_CHAN_CSR);
  678. writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
  679. writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
  680. writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
  681. writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
  682. }
  683. }
  684. #endif