dma.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. /* linux/arch/arm/plat-s3c64xx/dma.c
  2. *
  3. * Copyright 2009 Openmoko, Inc.
  4. * Copyright 2009 Simtec Electronics
  5. * Ben Dooks <ben@simtec.co.uk>
  6. * http://armlinux.simtec.co.uk/
  7. *
  8. * S3C64XX DMA core
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/dmapool.h>
  18. #include <linux/device.h>
  19. #include <linux/errno.h>
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/clk.h>
  23. #include <linux/err.h>
  24. #include <linux/io.h>
  25. #include <mach/dma.h>
  26. #include <mach/map.h>
  27. #include <mach/irqs.h>
  28. #include <mach/regs-sys.h>
  29. #include <asm/hardware/pl080.h>
  30. /* dma channel state information */
  31. struct s3c64xx_dmac {
  32. struct device dev;
  33. struct clk *clk;
  34. void __iomem *regs;
  35. struct s3c2410_dma_chan *channels;
  36. enum dma_ch chanbase;
  37. };
  38. /* pool to provide LLI buffers */
  39. static struct dma_pool *dma_pool;
  40. /* Debug configuration and code */
  41. static unsigned char debug_show_buffs = 0;
  42. static void dbg_showchan(struct s3c2410_dma_chan *chan)
  43. {
  44. pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
  45. chan->number,
  46. readl(chan->regs + PL080_CH_SRC_ADDR),
  47. readl(chan->regs + PL080_CH_DST_ADDR),
  48. readl(chan->regs + PL080_CH_LLI),
  49. readl(chan->regs + PL080_CH_CONTROL),
  50. readl(chan->regs + PL080S_CH_CONTROL2),
  51. readl(chan->regs + PL080S_CH_CONFIG));
  52. }
  53. static void show_lli(struct pl080s_lli *lli)
  54. {
  55. pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
  56. lli, lli->src_addr, lli->dst_addr, lli->next_lli,
  57. lli->control0, lli->control1);
  58. }
  59. static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
  60. {
  61. struct s3c64xx_dma_buff *ptr;
  62. struct s3c64xx_dma_buff *end;
  63. pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
  64. chan->number, chan->next, chan->curr, chan->end);
  65. ptr = chan->next;
  66. end = chan->end;
  67. if (debug_show_buffs) {
  68. for (; ptr != NULL; ptr = ptr->next) {
  69. pr_debug("DMA%d: %08x ",
  70. chan->number, ptr->lli_dma);
  71. show_lli(ptr->lli);
  72. }
  73. }
  74. }
  75. /* End of Debug */
  76. static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
  77. {
  78. struct s3c2410_dma_chan *chan;
  79. unsigned int start, offs;
  80. start = 0;
  81. if (channel >= DMACH_PCM1_TX)
  82. start = 8;
  83. for (offs = 0; offs < 8; offs++) {
  84. chan = &s3c2410_chans[start + offs];
  85. if (!chan->in_use)
  86. goto found;
  87. }
  88. return NULL;
  89. found:
  90. s3c_dma_chan_map[channel] = chan;
  91. return chan;
  92. }
  93. int s3c2410_dma_config(enum dma_ch channel, int xferunit)
  94. {
  95. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  96. if (chan == NULL)
  97. return -EINVAL;
  98. switch (xferunit) {
  99. case 1:
  100. chan->hw_width = 0;
  101. break;
  102. case 2:
  103. chan->hw_width = 1;
  104. break;
  105. case 4:
  106. chan->hw_width = 2;
  107. break;
  108. default:
  109. printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
  110. return -EINVAL;
  111. }
  112. return 0;
  113. }
  114. EXPORT_SYMBOL(s3c2410_dma_config);
  115. static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
  116. struct pl080s_lli *lli,
  117. dma_addr_t data, int size)
  118. {
  119. dma_addr_t src, dst;
  120. u32 control0, control1;
  121. switch (chan->source) {
  122. case DMA_FROM_DEVICE:
  123. src = chan->dev_addr;
  124. dst = data;
  125. control0 = PL080_CONTROL_SRC_AHB2;
  126. control0 |= PL080_CONTROL_DST_INCR;
  127. break;
  128. case DMA_TO_DEVICE:
  129. src = data;
  130. dst = chan->dev_addr;
  131. control0 = PL080_CONTROL_DST_AHB2;
  132. control0 |= PL080_CONTROL_SRC_INCR;
  133. break;
  134. default:
  135. BUG();
  136. }
  137. /* note, we do not currently setup any of the burst controls */
  138. control1 = size >> chan->hw_width; /* size in no of xfers */
  139. control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */
  140. control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */
  141. control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
  142. control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
  143. lli->src_addr = src;
  144. lli->dst_addr = dst;
  145. lli->next_lli = 0;
  146. lli->control0 = control0;
  147. lli->control1 = control1;
  148. }
  149. static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
  150. struct pl080s_lli *lli)
  151. {
  152. void __iomem *regs = chan->regs;
  153. pr_debug("%s: LLI %p => regs\n", __func__, lli);
  154. show_lli(lli);
  155. writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
  156. writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
  157. writel(lli->next_lli, regs + PL080_CH_LLI);
  158. writel(lli->control0, regs + PL080_CH_CONTROL);
  159. writel(lli->control1, regs + PL080S_CH_CONTROL2);
  160. }
  161. static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
  162. {
  163. struct s3c64xx_dmac *dmac = chan->dmac;
  164. u32 config;
  165. u32 bit = chan->bit;
  166. dbg_showchan(chan);
  167. pr_debug("%s: clearing interrupts\n", __func__);
  168. /* clear interrupts */
  169. writel(bit, dmac->regs + PL080_TC_CLEAR);
  170. writel(bit, dmac->regs + PL080_ERR_CLEAR);
  171. pr_debug("%s: starting channel\n", __func__);
  172. config = readl(chan->regs + PL080S_CH_CONFIG);
  173. config |= PL080_CONFIG_ENABLE;
  174. config &= ~PL080_CONFIG_HALT;
  175. pr_debug("%s: writing config %08x\n", __func__, config);
  176. writel(config, chan->regs + PL080S_CH_CONFIG);
  177. return 0;
  178. }
  179. static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
  180. {
  181. u32 config;
  182. int timeout;
  183. pr_debug("%s: stopping channel\n", __func__);
  184. dbg_showchan(chan);
  185. config = readl(chan->regs + PL080S_CH_CONFIG);
  186. config |= PL080_CONFIG_HALT;
  187. writel(config, chan->regs + PL080S_CH_CONFIG);
  188. timeout = 1000;
  189. do {
  190. config = readl(chan->regs + PL080S_CH_CONFIG);
  191. pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
  192. if (config & PL080_CONFIG_ACTIVE)
  193. udelay(10);
  194. else
  195. break;
  196. } while (--timeout > 0);
  197. if (config & PL080_CONFIG_ACTIVE) {
  198. printk(KERN_ERR "%s: channel still active\n", __func__);
  199. return -EFAULT;
  200. }
  201. config = readl(chan->regs + PL080S_CH_CONFIG);
  202. config &= ~PL080_CONFIG_ENABLE;
  203. writel(config, chan->regs + PL080S_CH_CONFIG);
  204. return 0;
  205. }
  206. static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
  207. struct s3c64xx_dma_buff *buf,
  208. enum s3c2410_dma_buffresult result)
  209. {
  210. if (chan->callback_fn != NULL)
  211. (chan->callback_fn)(chan, buf->pw, 0, result);
  212. }
  213. static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
  214. {
  215. dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
  216. kfree(buff);
  217. }
  218. static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
  219. {
  220. struct s3c64xx_dma_buff *buff, *next;
  221. u32 config;
  222. dbg_showchan(chan);
  223. pr_debug("%s: flushing channel\n", __func__);
  224. config = readl(chan->regs + PL080S_CH_CONFIG);
  225. config &= ~PL080_CONFIG_ENABLE;
  226. writel(config, chan->regs + PL080S_CH_CONFIG);
  227. /* dump all the buffers associated with this channel */
  228. for (buff = chan->curr; buff != NULL; buff = next) {
  229. next = buff->next;
  230. pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
  231. s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
  232. s3c64xx_dma_freebuff(buff);
  233. }
  234. chan->curr = chan->next = chan->end = NULL;
  235. return 0;
  236. }
  237. int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
  238. {
  239. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  240. WARN_ON(!chan);
  241. if (!chan)
  242. return -EINVAL;
  243. switch (op) {
  244. case S3C2410_DMAOP_START:
  245. return s3c64xx_dma_start(chan);
  246. case S3C2410_DMAOP_STOP:
  247. return s3c64xx_dma_stop(chan);
  248. case S3C2410_DMAOP_FLUSH:
  249. return s3c64xx_dma_flush(chan);
  250. /* believe PAUSE/RESUME are no-ops */
  251. case S3C2410_DMAOP_PAUSE:
  252. case S3C2410_DMAOP_RESUME:
  253. case S3C2410_DMAOP_STARTED:
  254. case S3C2410_DMAOP_TIMEOUT:
  255. return 0;
  256. }
  257. return -ENOENT;
  258. }
  259. EXPORT_SYMBOL(s3c2410_dma_ctrl);
  260. /* s3c2410_dma_enque
  261. *
  262. */
  263. int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
  264. dma_addr_t data, int size)
  265. {
  266. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  267. struct s3c64xx_dma_buff *next;
  268. struct s3c64xx_dma_buff *buff;
  269. struct pl080s_lli *lli;
  270. unsigned long flags;
  271. int ret;
  272. WARN_ON(!chan);
  273. if (!chan)
  274. return -EINVAL;
  275. buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
  276. if (!buff) {
  277. printk(KERN_ERR "%s: no memory for buffer\n", __func__);
  278. return -ENOMEM;
  279. }
  280. lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
  281. if (!lli) {
  282. printk(KERN_ERR "%s: no memory for lli\n", __func__);
  283. ret = -ENOMEM;
  284. goto err_buff;
  285. }
  286. pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
  287. __func__, buff, data, lli, (u32)buff->lli_dma, size);
  288. buff->lli = lli;
  289. buff->pw = id;
  290. s3c64xx_dma_fill_lli(chan, lli, data, size);
  291. local_irq_save(flags);
  292. if ((next = chan->next) != NULL) {
  293. struct s3c64xx_dma_buff *end = chan->end;
  294. struct pl080s_lli *endlli = end->lli;
  295. pr_debug("enquing onto channel\n");
  296. end->next = buff;
  297. endlli->next_lli = buff->lli_dma;
  298. if (chan->flags & S3C2410_DMAF_CIRCULAR) {
  299. struct s3c64xx_dma_buff *curr = chan->curr;
  300. lli->next_lli = curr->lli_dma;
  301. }
  302. if (next == chan->curr) {
  303. writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
  304. chan->next = buff;
  305. }
  306. show_lli(endlli);
  307. chan->end = buff;
  308. } else {
  309. pr_debug("enquing onto empty channel\n");
  310. chan->curr = buff;
  311. chan->next = buff;
  312. chan->end = buff;
  313. s3c64xx_lli_to_regs(chan, lli);
  314. }
  315. local_irq_restore(flags);
  316. show_lli(lli);
  317. dbg_showchan(chan);
  318. dbg_showbuffs(chan);
  319. return 0;
  320. err_buff:
  321. kfree(buff);
  322. return ret;
  323. }
  324. EXPORT_SYMBOL(s3c2410_dma_enqueue);
  325. int s3c2410_dma_devconfig(enum dma_ch channel,
  326. enum dma_data_direction source,
  327. unsigned long devaddr)
  328. {
  329. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  330. u32 peripheral;
  331. u32 config = 0;
  332. pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
  333. __func__, channel, source, devaddr, chan);
  334. WARN_ON(!chan);
  335. if (!chan)
  336. return -EINVAL;
  337. peripheral = (chan->peripheral & 0xf);
  338. chan->source = source;
  339. chan->dev_addr = devaddr;
  340. pr_debug("%s: peripheral %d\n", __func__, peripheral);
  341. switch (source) {
  342. case DMA_FROM_DEVICE:
  343. config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
  344. config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
  345. break;
  346. case DMA_TO_DEVICE:
  347. config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
  348. config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
  349. break;
  350. default:
  351. printk(KERN_ERR "%s: bad source\n", __func__);
  352. return -EINVAL;
  353. }
  354. /* allow TC and ERR interrupts */
  355. config |= PL080_CONFIG_TC_IRQ_MASK;
  356. config |= PL080_CONFIG_ERR_IRQ_MASK;
  357. pr_debug("%s: config %08x\n", __func__, config);
  358. writel(config, chan->regs + PL080S_CH_CONFIG);
  359. return 0;
  360. }
  361. EXPORT_SYMBOL(s3c2410_dma_devconfig);
  362. int s3c2410_dma_getposition(enum dma_ch channel,
  363. dma_addr_t *src, dma_addr_t *dst)
  364. {
  365. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  366. WARN_ON(!chan);
  367. if (!chan)
  368. return -EINVAL;
  369. if (src != NULL)
  370. *src = readl(chan->regs + PL080_CH_SRC_ADDR);
  371. if (dst != NULL)
  372. *dst = readl(chan->regs + PL080_CH_DST_ADDR);
  373. return 0;
  374. }
  375. EXPORT_SYMBOL(s3c2410_dma_getposition);
  376. /* s3c2410_request_dma
  377. *
  378. * get control of an dma channel
  379. */
  380. int s3c2410_dma_request(enum dma_ch channel,
  381. struct s3c2410_dma_client *client,
  382. void *dev)
  383. {
  384. struct s3c2410_dma_chan *chan;
  385. unsigned long flags;
  386. pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
  387. channel, client->name, dev);
  388. local_irq_save(flags);
  389. chan = s3c64xx_dma_map_channel(channel);
  390. if (chan == NULL) {
  391. local_irq_restore(flags);
  392. return -EBUSY;
  393. }
  394. dbg_showchan(chan);
  395. chan->client = client;
  396. chan->in_use = 1;
  397. chan->peripheral = channel;
  398. local_irq_restore(flags);
  399. /* need to setup */
  400. pr_debug("%s: channel initialised, %p\n", __func__, chan);
  401. return chan->number | DMACH_LOW_LEVEL;
  402. }
  403. EXPORT_SYMBOL(s3c2410_dma_request);
  404. /* s3c2410_dma_free
  405. *
  406. * release the given channel back to the system, will stop and flush
  407. * any outstanding transfers, and ensure the channel is ready for the
  408. * next claimant.
  409. *
  410. * Note, although a warning is currently printed if the freeing client
  411. * info is not the same as the registrant's client info, the free is still
  412. * allowed to go through.
  413. */
  414. int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
  415. {
  416. struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
  417. unsigned long flags;
  418. if (chan == NULL)
  419. return -EINVAL;
  420. local_irq_save(flags);
  421. if (chan->client != client) {
  422. printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
  423. channel, chan->client, client);
  424. }
  425. /* sort out stopping and freeing the channel */
  426. chan->client = NULL;
  427. chan->in_use = 0;
  428. if (!(channel & DMACH_LOW_LEVEL))
  429. s3c_dma_chan_map[channel] = NULL;
  430. local_irq_restore(flags);
  431. return 0;
  432. }
  433. EXPORT_SYMBOL(s3c2410_dma_free);
  434. static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
  435. {
  436. struct s3c64xx_dmac *dmac = pw;
  437. struct s3c2410_dma_chan *chan;
  438. enum s3c2410_dma_buffresult res;
  439. u32 tcstat, errstat;
  440. u32 bit;
  441. int offs;
  442. tcstat = readl(dmac->regs + PL080_TC_STATUS);
  443. errstat = readl(dmac->regs + PL080_ERR_STATUS);
  444. for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
  445. struct s3c64xx_dma_buff *buff;
  446. if (!(errstat & bit) && !(tcstat & bit))
  447. continue;
  448. chan = dmac->channels + offs;
  449. res = S3C2410_RES_ERR;
  450. if (tcstat & bit) {
  451. writel(bit, dmac->regs + PL080_TC_CLEAR);
  452. res = S3C2410_RES_OK;
  453. }
  454. if (errstat & bit)
  455. writel(bit, dmac->regs + PL080_ERR_CLEAR);
  456. /* 'next' points to the buffer that is next to the
  457. * currently active buffer.
  458. * For CIRCULAR queues, 'next' will be same as 'curr'
  459. * when 'end' is the active buffer.
  460. */
  461. buff = chan->curr;
  462. while (buff && buff != chan->next
  463. && buff->next != chan->next)
  464. buff = buff->next;
  465. if (!buff)
  466. BUG();
  467. if (buff == chan->next)
  468. buff = chan->end;
  469. s3c64xx_dma_bufffdone(chan, buff, res);
  470. /* Free the node and update curr, if non-circular queue */
  471. if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) {
  472. chan->curr = buff->next;
  473. s3c64xx_dma_freebuff(buff);
  474. }
  475. /* Update 'next' */
  476. buff = chan->next;
  477. if (chan->next == chan->end) {
  478. chan->next = chan->curr;
  479. if (!(chan->flags & S3C2410_DMAF_CIRCULAR))
  480. chan->end = NULL;
  481. } else {
  482. chan->next = buff->next;
  483. }
  484. }
  485. return IRQ_HANDLED;
  486. }
  487. static struct bus_type dma_subsys = {
  488. .name = "s3c64xx-dma",
  489. .dev_name = "s3c64xx-dma",
  490. };
  491. static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
  492. int irq, unsigned int base)
  493. {
  494. struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
  495. struct s3c64xx_dmac *dmac;
  496. char clkname[16];
  497. void __iomem *regs;
  498. void __iomem *regptr;
  499. int err, ch;
  500. dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
  501. if (!dmac) {
  502. printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
  503. return -ENOMEM;
  504. }
  505. dmac->dev.id = chno / 8;
  506. dmac->dev.bus = &dma_subsys;
  507. err = device_register(&dmac->dev);
  508. if (err) {
  509. printk(KERN_ERR "%s: failed to register device\n", __func__);
  510. goto err_alloc;
  511. }
  512. regs = ioremap(base, 0x200);
  513. if (!regs) {
  514. printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
  515. err = -ENXIO;
  516. goto err_dev;
  517. }
  518. snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
  519. dmac->clk = clk_get(NULL, clkname);
  520. if (IS_ERR(dmac->clk)) {
  521. printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
  522. err = PTR_ERR(dmac->clk);
  523. goto err_map;
  524. }
  525. clk_enable(dmac->clk);
  526. dmac->regs = regs;
  527. dmac->chanbase = chbase;
  528. dmac->channels = chptr;
  529. err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
  530. if (err < 0) {
  531. printk(KERN_ERR "%s: failed to get irq\n", __func__);
  532. goto err_clk;
  533. }
  534. regptr = regs + PL080_Cx_BASE(0);
  535. for (ch = 0; ch < 8; ch++, chptr++) {
  536. pr_debug("%s: registering DMA %d (%p)\n",
  537. __func__, chno + ch, regptr);
  538. chptr->bit = 1 << ch;
  539. chptr->number = chno + ch;
  540. chptr->dmac = dmac;
  541. chptr->regs = regptr;
  542. regptr += PL080_Cx_STRIDE;
  543. }
  544. /* for the moment, permanently enable the controller */
  545. writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
  546. printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
  547. irq, regs, chno, chno+8);
  548. return 0;
  549. err_clk:
  550. clk_disable(dmac->clk);
  551. clk_put(dmac->clk);
  552. err_map:
  553. iounmap(regs);
  554. err_dev:
  555. device_unregister(&dmac->dev);
  556. err_alloc:
  557. kfree(dmac);
  558. return err;
  559. }
  560. static int __init s3c64xx_dma_init(void)
  561. {
  562. int ret;
  563. printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
  564. dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
  565. if (!dma_pool) {
  566. printk(KERN_ERR "%s: failed to create pool\n", __func__);
  567. return -ENOMEM;
  568. }
  569. ret = subsys_system_register(&dma_subsys, NULL);
  570. if (ret) {
  571. printk(KERN_ERR "%s: failed to create subsys\n", __func__);
  572. return -ENOMEM;
  573. }
  574. /* Set all DMA configuration to be DMA, not SDMA */
  575. writel(0xffffff, S3C64XX_SDMA_SEL);
  576. /* Register standard DMA controllers */
  577. s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
  578. s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
  579. return 0;
  580. }
  581. arch_initcall(s3c64xx_dma_init);