p2m.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /*
  2. * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
  3. * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/scatterlist.h>
  21. #include "solo6x10.h"
  22. /* #define SOLO_TEST_P2M */
  23. int solo_p2m_dma(struct solo_dev *solo_dev, u8 id, int wr,
  24. void *sys_addr, u32 ext_addr, u32 size)
  25. {
  26. dma_addr_t dma_addr;
  27. int ret;
  28. WARN_ON(!size);
  29. BUG_ON(id >= SOLO_NR_P2M);
  30. if (!size)
  31. return -EINVAL;
  32. dma_addr = pci_map_single(solo_dev->pdev, sys_addr, size,
  33. wr ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
  34. ret = solo_p2m_dma_t(solo_dev, id, wr, dma_addr, ext_addr, size);
  35. pci_unmap_single(solo_dev->pdev, dma_addr, size,
  36. wr ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
  37. return ret;
  38. }
  39. int solo_p2m_dma_t(struct solo_dev *solo_dev, u8 id, int wr,
  40. dma_addr_t dma_addr, u32 ext_addr, u32 size)
  41. {
  42. struct p2m_desc *desc = kzalloc(sizeof(*desc) * 2, GFP_DMA);
  43. int ret;
  44. if (desc == NULL)
  45. return -ENOMEM;
  46. solo_p2m_push_desc(&desc[1], wr, dma_addr, ext_addr, size, 0, 0);
  47. ret = solo_p2m_dma_desc(solo_dev, id, desc, 2);
  48. kfree(desc);
  49. return ret;
  50. }
  51. void solo_p2m_push_desc(struct p2m_desc *desc, int wr, dma_addr_t dma_addr,
  52. u32 ext_addr, u32 size, int repeat, u32 ext_size)
  53. {
  54. desc->ta = cpu_to_le32(dma_addr);
  55. desc->fa = cpu_to_le32(ext_addr);
  56. desc->ext = cpu_to_le32(SOLO_P2M_COPY_SIZE(size >> 2));
  57. desc->ctrl = cpu_to_le32(SOLO_P2M_BURST_SIZE(SOLO_P2M_BURST_256) |
  58. (wr ? SOLO_P2M_WRITE : 0) | SOLO_P2M_TRANS_ON);
  59. /* Ext size only matters when we're repeating */
  60. if (repeat) {
  61. desc->ext |= cpu_to_le32(SOLO_P2M_EXT_INC(ext_size >> 2));
  62. desc->ctrl |= cpu_to_le32(SOLO_P2M_PCI_INC(size >> 2) |
  63. SOLO_P2M_REPEAT(repeat));
  64. }
  65. }
  66. int solo_p2m_dma_desc(struct solo_dev *solo_dev, u8 id,
  67. struct p2m_desc *desc, int desc_count)
  68. {
  69. struct solo_p2m_dev *p2m_dev;
  70. unsigned int timeout;
  71. int ret = 0;
  72. u32 config = 0;
  73. dma_addr_t desc_dma = 0;
  74. BUG_ON(id >= SOLO_NR_P2M);
  75. BUG_ON(!desc_count || desc_count > SOLO_NR_P2M_DESC);
  76. p2m_dev = &solo_dev->p2m_dev[id];
  77. mutex_lock(&p2m_dev->mutex);
  78. solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), 0);
  79. INIT_COMPLETION(p2m_dev->completion);
  80. p2m_dev->error = 0;
  81. /* Enable the descriptors */
  82. config = solo_reg_read(solo_dev, SOLO_P2M_CONFIG(id));
  83. desc_dma = pci_map_single(solo_dev->pdev, desc,
  84. desc_count * sizeof(*desc),
  85. PCI_DMA_TODEVICE);
  86. solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(id), desc_dma);
  87. solo_reg_write(solo_dev, SOLO_P2M_DESC_ID(id), desc_count - 1);
  88. solo_reg_write(solo_dev, SOLO_P2M_CONFIG(id), config |
  89. SOLO_P2M_DESC_MODE);
  90. /* Should have all descriptors completed from one interrupt */
  91. timeout = wait_for_completion_timeout(&p2m_dev->completion, HZ);
  92. solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), 0);
  93. /* Reset back to non-descriptor mode */
  94. solo_reg_write(solo_dev, SOLO_P2M_CONFIG(id), config);
  95. solo_reg_write(solo_dev, SOLO_P2M_DESC_ID(id), 0);
  96. solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(id), 0);
  97. pci_unmap_single(solo_dev->pdev, desc_dma,
  98. desc_count * sizeof(*desc),
  99. PCI_DMA_TODEVICE);
  100. if (p2m_dev->error)
  101. ret = -EIO;
  102. else if (timeout == 0)
  103. ret = -EAGAIN;
  104. mutex_unlock(&p2m_dev->mutex);
  105. WARN_ON_ONCE(ret);
  106. return ret;
  107. }
  108. int solo_p2m_dma_sg(struct solo_dev *solo_dev, u8 id,
  109. struct p2m_desc *pdesc, int wr,
  110. struct scatterlist *sg, u32 sg_off,
  111. u32 ext_addr, u32 size)
  112. {
  113. int i;
  114. int idx;
  115. BUG_ON(id >= SOLO_NR_P2M);
  116. if (WARN_ON_ONCE(!size))
  117. return -EINVAL;
  118. memset(pdesc, 0, sizeof(*pdesc));
  119. /* Should rewrite this to handle > SOLO_NR_P2M_DESC transactions */
  120. for (i = 0, idx = 1; idx < SOLO_NR_P2M_DESC && sg && size > 0;
  121. i++, sg = sg_next(sg)) {
  122. struct p2m_desc *desc = &pdesc[idx];
  123. u32 sg_len = sg_dma_len(sg);
  124. u32 len;
  125. if (sg_off >= sg_len) {
  126. sg_off -= sg_len;
  127. continue;
  128. }
  129. sg_len -= sg_off;
  130. len = min(sg_len, size);
  131. solo_p2m_push_desc(desc, wr, sg_dma_address(sg) + sg_off,
  132. ext_addr, len, 0, 0);
  133. size -= len;
  134. ext_addr += len;
  135. idx++;
  136. sg_off = 0;
  137. }
  138. WARN_ON_ONCE(size || i >= SOLO_NR_P2M_DESC);
  139. return solo_p2m_dma_desc(solo_dev, id, pdesc, idx);
  140. }
  141. #ifdef SOLO_TEST_P2M
  142. #define P2M_TEST_CHAR 0xbe
  143. static unsigned long long p2m_test(struct solo_dev *solo_dev, u8 id,
  144. u32 base, int size)
  145. {
  146. u8 *wr_buf;
  147. u8 *rd_buf;
  148. int i;
  149. unsigned long long err_cnt = 0;
  150. wr_buf = kmalloc(size, GFP_KERNEL);
  151. if (!wr_buf) {
  152. printk(SOLO6X10_NAME ": Failed to malloc for p2m_test\n");
  153. return size;
  154. }
  155. rd_buf = kmalloc(size, GFP_KERNEL);
  156. if (!rd_buf) {
  157. printk(SOLO6X10_NAME ": Failed to malloc for p2m_test\n");
  158. kfree(wr_buf);
  159. return size;
  160. }
  161. memset(wr_buf, P2M_TEST_CHAR, size);
  162. memset(rd_buf, P2M_TEST_CHAR + 1, size);
  163. solo_p2m_dma(solo_dev, id, 1, wr_buf, base, size);
  164. solo_p2m_dma(solo_dev, id, 0, rd_buf, base, size);
  165. for (i = 0; i < size; i++)
  166. if (wr_buf[i] != rd_buf[i])
  167. err_cnt++;
  168. kfree(wr_buf);
  169. kfree(rd_buf);
  170. return err_cnt;
  171. }
  172. #define TEST_CHUNK_SIZE (8 * 1024)
  173. static void run_p2m_test(struct solo_dev *solo_dev)
  174. {
  175. unsigned long long errs = 0;
  176. u32 size = SOLO_JPEG_EXT_ADDR(solo_dev) + SOLO_JPEG_EXT_SIZE(solo_dev);
  177. int i, d;
  178. printk(KERN_WARNING "%s: Testing %u bytes of external ram\n",
  179. SOLO6X10_NAME, size);
  180. for (i = 0; i < size; i += TEST_CHUNK_SIZE)
  181. for (d = 0; d < 4; d++)
  182. errs += p2m_test(solo_dev, d, i, TEST_CHUNK_SIZE);
  183. printk(KERN_WARNING "%s: Found %llu errors during p2m test\n",
  184. SOLO6X10_NAME, errs);
  185. return;
  186. }
  187. #else
  188. #define run_p2m_test(__solo) do {} while (0)
  189. #endif
  190. void solo_p2m_isr(struct solo_dev *solo_dev, int id)
  191. {
  192. struct solo_p2m_dev *p2m_dev = &solo_dev->p2m_dev[id];
  193. solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_P2M(id));
  194. complete(&p2m_dev->completion);
  195. }
  196. void solo_p2m_error_isr(struct solo_dev *solo_dev, u32 status)
  197. {
  198. struct solo_p2m_dev *p2m_dev;
  199. int i;
  200. if (!(status & SOLO_PCI_ERR_P2M))
  201. return;
  202. for (i = 0; i < SOLO_NR_P2M; i++) {
  203. p2m_dev = &solo_dev->p2m_dev[i];
  204. p2m_dev->error = 1;
  205. solo_reg_write(solo_dev, SOLO_P2M_CONTROL(i), 0);
  206. complete(&p2m_dev->completion);
  207. }
  208. }
  209. void solo_p2m_exit(struct solo_dev *solo_dev)
  210. {
  211. int i;
  212. for (i = 0; i < SOLO_NR_P2M; i++)
  213. solo_irq_off(solo_dev, SOLO_IRQ_P2M(i));
  214. }
  215. int solo_p2m_init(struct solo_dev *solo_dev)
  216. {
  217. struct solo_p2m_dev *p2m_dev;
  218. int i;
  219. for (i = 0; i < SOLO_NR_P2M; i++) {
  220. p2m_dev = &solo_dev->p2m_dev[i];
  221. mutex_init(&p2m_dev->mutex);
  222. init_completion(&p2m_dev->completion);
  223. solo_reg_write(solo_dev, SOLO_P2M_CONTROL(i), 0);
  224. solo_reg_write(solo_dev, SOLO_P2M_CONFIG(i),
  225. SOLO_P2M_CSC_16BIT_565 |
  226. SOLO_P2M_DMA_INTERVAL(3) |
  227. SOLO_P2M_DESC_INTR_OPT |
  228. SOLO_P2M_PCI_MASTER_MODE);
  229. solo_irq_on(solo_dev, SOLO_IRQ_P2M(i));
  230. }
  231. run_p2m_test(solo_dev);
  232. return 0;
  233. }