dma_lib.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /*
  2. * Copyright (C) 2006-2007 PA Semi, Inc
  3. *
  4. * Common functions for DMA access on PA Semi PWRficient
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/init.h>
  21. #include <linux/export.h>
  22. #include <linux/pci.h>
  23. #include <linux/slab.h>
  24. #include <linux/of.h>
  25. #include <linux/sched.h>
  26. #include <asm/pasemi_dma.h>
  27. #define MAX_TXCH 64
  28. #define MAX_RXCH 64
  29. #define MAX_FLAGS 64
  30. #define MAX_FUN 8
  31. static struct pasdma_status *dma_status;
  32. static void __iomem *iob_regs;
  33. static void __iomem *mac_regs[6];
  34. static void __iomem *dma_regs;
  35. static int base_hw_irq;
  36. static int num_txch, num_rxch;
  37. static struct pci_dev *dma_pdev;
  38. /* Bitmaps to handle allocation of channels */
  39. static DECLARE_BITMAP(txch_free, MAX_TXCH);
  40. static DECLARE_BITMAP(rxch_free, MAX_RXCH);
  41. static DECLARE_BITMAP(flags_free, MAX_FLAGS);
  42. static DECLARE_BITMAP(fun_free, MAX_FUN);
  43. /* pasemi_read_iob_reg - read IOB register
  44. * @reg: Register to read (offset into PCI CFG space)
  45. */
  46. unsigned int pasemi_read_iob_reg(unsigned int reg)
  47. {
  48. return in_le32(iob_regs+reg);
  49. }
  50. EXPORT_SYMBOL(pasemi_read_iob_reg);
  51. /* pasemi_write_iob_reg - write IOB register
  52. * @reg: Register to write to (offset into PCI CFG space)
  53. * @val: Value to write
  54. */
  55. void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
  56. {
  57. out_le32(iob_regs+reg, val);
  58. }
  59. EXPORT_SYMBOL(pasemi_write_iob_reg);
  60. /* pasemi_read_mac_reg - read MAC register
  61. * @intf: MAC interface
  62. * @reg: Register to read (offset into PCI CFG space)
  63. */
  64. unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
  65. {
  66. return in_le32(mac_regs[intf]+reg);
  67. }
  68. EXPORT_SYMBOL(pasemi_read_mac_reg);
  69. /* pasemi_write_mac_reg - write MAC register
  70. * @intf: MAC interface
  71. * @reg: Register to write to (offset into PCI CFG space)
  72. * @val: Value to write
  73. */
  74. void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
  75. {
  76. out_le32(mac_regs[intf]+reg, val);
  77. }
  78. EXPORT_SYMBOL(pasemi_write_mac_reg);
  79. /* pasemi_read_dma_reg - read DMA register
  80. * @reg: Register to read (offset into PCI CFG space)
  81. */
  82. unsigned int pasemi_read_dma_reg(unsigned int reg)
  83. {
  84. return in_le32(dma_regs+reg);
  85. }
  86. EXPORT_SYMBOL(pasemi_read_dma_reg);
  87. /* pasemi_write_dma_reg - write DMA register
  88. * @reg: Register to write to (offset into PCI CFG space)
  89. * @val: Value to write
  90. */
  91. void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
  92. {
  93. out_le32(dma_regs+reg, val);
  94. }
  95. EXPORT_SYMBOL(pasemi_write_dma_reg);
  96. static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
  97. {
  98. int bit;
  99. int start, limit;
  100. switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
  101. case TXCHAN_EVT0:
  102. start = 0;
  103. limit = 10;
  104. break;
  105. case TXCHAN_EVT1:
  106. start = 10;
  107. limit = MAX_TXCH;
  108. break;
  109. default:
  110. start = 0;
  111. limit = MAX_TXCH;
  112. break;
  113. }
  114. retry:
  115. bit = find_next_bit(txch_free, MAX_TXCH, start);
  116. if (bit >= limit)
  117. return -ENOSPC;
  118. if (!test_and_clear_bit(bit, txch_free))
  119. goto retry;
  120. return bit;
  121. }
  122. static void pasemi_free_tx_chan(int chan)
  123. {
  124. BUG_ON(test_bit(chan, txch_free));
  125. set_bit(chan, txch_free);
  126. }
  127. static int pasemi_alloc_rx_chan(void)
  128. {
  129. int bit;
  130. retry:
  131. bit = find_first_bit(rxch_free, MAX_RXCH);
  132. if (bit >= MAX_TXCH)
  133. return -ENOSPC;
  134. if (!test_and_clear_bit(bit, rxch_free))
  135. goto retry;
  136. return bit;
  137. }
  138. static void pasemi_free_rx_chan(int chan)
  139. {
  140. BUG_ON(test_bit(chan, rxch_free));
  141. set_bit(chan, rxch_free);
  142. }
  143. /* pasemi_dma_alloc_chan - Allocate a DMA channel
  144. * @type: Type of channel to allocate
  145. * @total_size: Total size of structure to allocate (to allow for more
  146. * room behind the structure to be used by the client)
  147. * @offset: Offset in bytes from start of the total structure to the beginning
  148. * of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
  149. * not the first member of the client structure.
  150. *
  151. * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
  152. * type argument specifies whether it's a RX or TX channel, and in the case
  153. * of TX channels which group it needs to belong to (if any).
  154. *
  155. * Returns a pointer to the total structure allocated on success, NULL
  156. * on failure.
  157. */
  158. void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
  159. int total_size, int offset)
  160. {
  161. void *buf;
  162. struct pasemi_dmachan *chan;
  163. int chno;
  164. BUG_ON(total_size < sizeof(struct pasemi_dmachan));
  165. buf = kzalloc(total_size, GFP_KERNEL);
  166. if (!buf)
  167. return NULL;
  168. chan = buf + offset;
  169. chan->priv = buf;
  170. switch (type & (TXCHAN|RXCHAN)) {
  171. case RXCHAN:
  172. chno = pasemi_alloc_rx_chan();
  173. chan->chno = chno;
  174. chan->irq = irq_create_mapping(NULL,
  175. base_hw_irq + num_txch + chno);
  176. chan->status = &dma_status->rx_sta[chno];
  177. break;
  178. case TXCHAN:
  179. chno = pasemi_alloc_tx_chan(type);
  180. chan->chno = chno;
  181. chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
  182. chan->status = &dma_status->tx_sta[chno];
  183. break;
  184. }
  185. chan->chan_type = type;
  186. return chan;
  187. }
  188. EXPORT_SYMBOL(pasemi_dma_alloc_chan);
  189. /* pasemi_dma_free_chan - Free a previously allocated channel
  190. * @chan: Channel to free
  191. *
  192. * Frees a previously allocated channel. It will also deallocate any
  193. * descriptor ring associated with the channel, if allocated.
  194. */
  195. void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
  196. {
  197. if (chan->ring_virt)
  198. pasemi_dma_free_ring(chan);
  199. switch (chan->chan_type & (RXCHAN|TXCHAN)) {
  200. case RXCHAN:
  201. pasemi_free_rx_chan(chan->chno);
  202. break;
  203. case TXCHAN:
  204. pasemi_free_tx_chan(chan->chno);
  205. break;
  206. }
  207. kfree(chan->priv);
  208. }
  209. EXPORT_SYMBOL(pasemi_dma_free_chan);
  210. /* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
  211. * @chan: Channel for which to allocate
  212. * @ring_size: Ring size in 64-bit (8-byte) words
  213. *
  214. * Allocate a descriptor ring for a channel. Returns 0 on success, errno
  215. * on failure. The passed in struct pasemi_dmachan is updated with the
  216. * virtual and DMA addresses of the ring.
  217. */
  218. int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
  219. {
  220. BUG_ON(chan->ring_virt);
  221. chan->ring_size = ring_size;
  222. chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
  223. ring_size * sizeof(u64),
  224. &chan->ring_dma, GFP_KERNEL);
  225. if (!chan->ring_virt)
  226. return -ENOMEM;
  227. memset(chan->ring_virt, 0, ring_size * sizeof(u64));
  228. return 0;
  229. }
  230. EXPORT_SYMBOL(pasemi_dma_alloc_ring);
  231. /* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
  232. * @chan: Channel for which to free the descriptor ring
  233. *
  234. * Frees a previously allocated descriptor ring for a channel.
  235. */
  236. void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
  237. {
  238. BUG_ON(!chan->ring_virt);
  239. dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
  240. chan->ring_virt, chan->ring_dma);
  241. chan->ring_virt = NULL;
  242. chan->ring_size = 0;
  243. chan->ring_dma = 0;
  244. }
  245. EXPORT_SYMBOL(pasemi_dma_free_ring);
  246. /* pasemi_dma_start_chan - Start a DMA channel
  247. * @chan: Channel to start
  248. * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
  249. *
  250. * Enables (starts) a DMA channel with optional additional arguments.
  251. */
  252. void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
  253. {
  254. if (chan->chan_type == RXCHAN)
  255. pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
  256. cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
  257. else
  258. pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
  259. cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
  260. }
  261. EXPORT_SYMBOL(pasemi_dma_start_chan);
  262. /* pasemi_dma_stop_chan - Stop a DMA channel
  263. * @chan: Channel to stop
  264. *
  265. * Stops (disables) a DMA channel. This is done by setting the ST bit in the
  266. * CMDSTA register and waiting on the ACT (active) bit to clear, then
  267. * finally disabling the whole channel.
  268. *
  269. * This function will only try for a short while for the channel to stop, if
  270. * it doesn't it will return failure.
  271. *
  272. * Returns 1 on success, 0 on failure.
  273. */
  274. #define MAX_RETRIES 5000
  275. int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
  276. {
  277. int reg, retries;
  278. u32 sta;
  279. if (chan->chan_type == RXCHAN) {
  280. reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
  281. pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
  282. for (retries = 0; retries < MAX_RETRIES; retries++) {
  283. sta = pasemi_read_dma_reg(reg);
  284. if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
  285. pasemi_write_dma_reg(reg, 0);
  286. return 1;
  287. }
  288. cond_resched();
  289. }
  290. } else {
  291. reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
  292. pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
  293. for (retries = 0; retries < MAX_RETRIES; retries++) {
  294. sta = pasemi_read_dma_reg(reg);
  295. if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
  296. pasemi_write_dma_reg(reg, 0);
  297. return 1;
  298. }
  299. cond_resched();
  300. }
  301. }
  302. return 0;
  303. }
  304. EXPORT_SYMBOL(pasemi_dma_stop_chan);
  305. /* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
  306. * @chan: Channel to allocate for
  307. * @size: Size of buffer in bytes
  308. * @handle: DMA handle
  309. *
  310. * Allocate a buffer to be used by the DMA engine for read/write,
  311. * similar to dma_alloc_coherent().
  312. *
  313. * Returns the virtual address of the buffer, or NULL in case of failure.
  314. */
  315. void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
  316. dma_addr_t *handle)
  317. {
  318. return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
  319. }
  320. EXPORT_SYMBOL(pasemi_dma_alloc_buf);
  321. /* pasemi_dma_free_buf - Free a buffer used for DMA
  322. * @chan: Channel the buffer was allocated for
  323. * @size: Size of buffer in bytes
  324. * @handle: DMA handle
  325. *
  326. * Frees a previously allocated buffer.
  327. */
  328. void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
  329. dma_addr_t *handle)
  330. {
  331. dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
  332. }
  333. EXPORT_SYMBOL(pasemi_dma_free_buf);
  334. /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
  335. *
  336. * Allocates a flag for use with channel synchronization (event descriptors).
  337. * Returns allocated flag (0-63), < 0 on error.
  338. */
  339. int pasemi_dma_alloc_flag(void)
  340. {
  341. int bit;
  342. retry:
  343. bit = find_next_bit(flags_free, MAX_FLAGS, 0);
  344. if (bit >= MAX_FLAGS)
  345. return -ENOSPC;
  346. if (!test_and_clear_bit(bit, flags_free))
  347. goto retry;
  348. return bit;
  349. }
  350. EXPORT_SYMBOL(pasemi_dma_alloc_flag);
  351. /* pasemi_dma_free_flag - Deallocates a flag (event)
  352. * @flag: Flag number to deallocate
  353. *
  354. * Frees up a flag so it can be reused for other purposes.
  355. */
  356. void pasemi_dma_free_flag(int flag)
  357. {
  358. BUG_ON(test_bit(flag, flags_free));
  359. BUG_ON(flag >= MAX_FLAGS);
  360. set_bit(flag, flags_free);
  361. }
  362. EXPORT_SYMBOL(pasemi_dma_free_flag);
  363. /* pasemi_dma_set_flag - Sets a flag (event) to 1
  364. * @flag: Flag number to set active
  365. *
  366. * Sets the flag provided to 1.
  367. */
  368. void pasemi_dma_set_flag(int flag)
  369. {
  370. BUG_ON(flag >= MAX_FLAGS);
  371. if (flag < 32)
  372. pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag);
  373. else
  374. pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag);
  375. }
  376. EXPORT_SYMBOL(pasemi_dma_set_flag);
  377. /* pasemi_dma_clear_flag - Sets a flag (event) to 0
  378. * @flag: Flag number to set inactive
  379. *
  380. * Sets the flag provided to 0.
  381. */
  382. void pasemi_dma_clear_flag(int flag)
  383. {
  384. BUG_ON(flag >= MAX_FLAGS);
  385. if (flag < 32)
  386. pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag);
  387. else
  388. pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag);
  389. }
  390. EXPORT_SYMBOL(pasemi_dma_clear_flag);
  391. /* pasemi_dma_alloc_fun - Allocate a function engine
  392. *
  393. * Allocates a function engine to use for crypto/checksum offload
  394. * Returns allocated engine (0-8), < 0 on error.
  395. */
  396. int pasemi_dma_alloc_fun(void)
  397. {
  398. int bit;
  399. retry:
  400. bit = find_next_bit(fun_free, MAX_FLAGS, 0);
  401. if (bit >= MAX_FLAGS)
  402. return -ENOSPC;
  403. if (!test_and_clear_bit(bit, fun_free))
  404. goto retry;
  405. return bit;
  406. }
  407. EXPORT_SYMBOL(pasemi_dma_alloc_fun);
  408. /* pasemi_dma_free_fun - Deallocates a function engine
  409. * @flag: Engine number to deallocate
  410. *
  411. * Frees up a function engine so it can be used for other purposes.
  412. */
  413. void pasemi_dma_free_fun(int fun)
  414. {
  415. BUG_ON(test_bit(fun, fun_free));
  416. BUG_ON(fun >= MAX_FLAGS);
  417. set_bit(fun, fun_free);
  418. }
  419. EXPORT_SYMBOL(pasemi_dma_free_fun);
  420. static void *map_onedev(struct pci_dev *p, int index)
  421. {
  422. struct device_node *dn;
  423. void __iomem *ret;
  424. dn = pci_device_to_OF_node(p);
  425. if (!dn)
  426. goto fallback;
  427. ret = of_iomap(dn, index);
  428. if (!ret)
  429. goto fallback;
  430. return ret;
  431. fallback:
  432. /* This is hardcoded and ugly, but we have some firmware versions
  433. * that don't provide the register space in the device tree. Luckily
  434. * they are at well-known locations so we can just do the math here.
  435. */
  436. return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
  437. }
  438. /* pasemi_dma_init - Initialize the PA Semi DMA library
  439. *
  440. * This function initializes the DMA library. It must be called before
  441. * any other function in the library.
  442. *
  443. * Returns 0 on success, errno on failure.
  444. */
  445. int pasemi_dma_init(void)
  446. {
  447. static DEFINE_SPINLOCK(init_lock);
  448. struct pci_dev *iob_pdev;
  449. struct pci_dev *pdev;
  450. struct resource res;
  451. struct device_node *dn;
  452. int i, intf, err = 0;
  453. unsigned long timeout;
  454. u32 tmp;
  455. if (!machine_is(pasemi))
  456. return -ENODEV;
  457. spin_lock(&init_lock);
  458. /* Make sure we haven't already initialized */
  459. if (dma_pdev)
  460. goto out;
  461. iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
  462. if (!iob_pdev) {
  463. BUG();
  464. printk(KERN_WARNING "Can't find I/O Bridge\n");
  465. err = -ENODEV;
  466. goto out;
  467. }
  468. iob_regs = map_onedev(iob_pdev, 0);
  469. dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
  470. if (!dma_pdev) {
  471. BUG();
  472. printk(KERN_WARNING "Can't find DMA controller\n");
  473. err = -ENODEV;
  474. goto out;
  475. }
  476. dma_regs = map_onedev(dma_pdev, 0);
  477. base_hw_irq = virq_to_hw(dma_pdev->irq);
  478. pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
  479. num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
  480. pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
  481. num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
  482. intf = 0;
  483. for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
  484. pdev;
  485. pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
  486. mac_regs[intf++] = map_onedev(pdev, 0);
  487. pci_dev_put(pdev);
  488. for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
  489. pdev;
  490. pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
  491. mac_regs[intf++] = map_onedev(pdev, 0);
  492. pci_dev_put(pdev);
  493. dn = pci_device_to_OF_node(iob_pdev);
  494. if (dn)
  495. err = of_address_to_resource(dn, 1, &res);
  496. if (!dn || err) {
  497. /* Fallback for old firmware */
  498. res.start = 0xfd800000;
  499. res.end = res.start + 0x1000;
  500. }
  501. dma_status = __ioremap(res.start, resource_size(&res), 0);
  502. pci_dev_put(iob_pdev);
  503. for (i = 0; i < MAX_TXCH; i++)
  504. __set_bit(i, txch_free);
  505. for (i = 0; i < MAX_RXCH; i++)
  506. __set_bit(i, rxch_free);
  507. timeout = jiffies + HZ;
  508. pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
  509. while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
  510. if (time_after(jiffies, timeout)) {
  511. pr_warning("Warning: Could not disable RX section\n");
  512. break;
  513. }
  514. }
  515. timeout = jiffies + HZ;
  516. pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
  517. while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
  518. if (time_after(jiffies, timeout)) {
  519. pr_warning("Warning: Could not disable TX section\n");
  520. break;
  521. }
  522. }
  523. /* setup resource allocations for the different DMA sections */
  524. tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
  525. pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
  526. /* enable tx section */
  527. pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
  528. /* enable rx section */
  529. pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
  530. for (i = 0; i < MAX_FLAGS; i++)
  531. __set_bit(i, flags_free);
  532. for (i = 0; i < MAX_FUN; i++)
  533. __set_bit(i, fun_free);
  534. /* clear all status flags */
  535. pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
  536. pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
  537. printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
  538. "(%d tx, %d rx channels)\n", num_txch, num_rxch);
  539. out:
  540. spin_unlock(&init_lock);
  541. return err;
  542. }
  543. EXPORT_SYMBOL(pasemi_dma_init);