dma-coherent.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dma-mapping.h>
  9. #include <linux/gfp.h>
  10. #include <linux/export.h>
  11. #include <linux/mm.h>
  12. #include <linux/device.h>
  13. #include <linux/scatterlist.h>
  14. #include <asm/processor.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/io.h>
  17. #include <asm/addrspace.h>
  18. void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
  19. {
  20. /*
  21. * No need to sync an uncached area
  22. */
  23. if (PXSEG(vaddr) == P2SEG)
  24. return;
  25. switch (direction) {
  26. case DMA_FROM_DEVICE: /* invalidate only */
  27. invalidate_dcache_region(vaddr, size);
  28. break;
  29. case DMA_TO_DEVICE: /* writeback only */
  30. clean_dcache_region(vaddr, size);
  31. break;
  32. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  33. flush_dcache_region(vaddr, size);
  34. break;
  35. default:
  36. BUG();
  37. }
  38. }
  39. EXPORT_SYMBOL(dma_cache_sync);
  40. static struct page *__dma_alloc(struct device *dev, size_t size,
  41. dma_addr_t *handle, gfp_t gfp)
  42. {
  43. struct page *page, *free, *end;
  44. int order;
  45. /* Following is a work-around (a.k.a. hack) to prevent pages
  46. * with __GFP_COMP being passed to split_page() which cannot
  47. * handle them. The real problem is that this flag probably
  48. * should be 0 on AVR32 as it is not supported on this
  49. * platform--see CONFIG_HUGETLB_PAGE. */
  50. gfp &= ~(__GFP_COMP);
  51. size = PAGE_ALIGN(size);
  52. order = get_order(size);
  53. page = alloc_pages(gfp, order);
  54. if (!page)
  55. return NULL;
  56. split_page(page, order);
  57. /*
  58. * When accessing physical memory with valid cache data, we
  59. * get a cache hit even if the virtual memory region is marked
  60. * as uncached.
  61. *
  62. * Since the memory is newly allocated, there is no point in
  63. * doing a writeback. If the previous owner cares, he should
  64. * have flushed the cache before releasing the memory.
  65. */
  66. invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
  67. *handle = page_to_bus(page);
  68. free = page + (size >> PAGE_SHIFT);
  69. end = page + (1 << order);
  70. /*
  71. * Free any unused pages
  72. */
  73. while (free < end) {
  74. __free_page(free);
  75. free++;
  76. }
  77. return page;
  78. }
  79. static void __dma_free(struct device *dev, size_t size,
  80. struct page *page, dma_addr_t handle)
  81. {
  82. struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
  83. while (page < end)
  84. __free_page(page++);
  85. }
  86. static void *avr32_dma_alloc(struct device *dev, size_t size,
  87. dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
  88. {
  89. struct page *page;
  90. dma_addr_t phys;
  91. page = __dma_alloc(dev, size, handle, gfp);
  92. if (!page)
  93. return NULL;
  94. phys = page_to_phys(page);
  95. if (attrs & DMA_ATTR_WRITE_COMBINE) {
  96. /* Now, map the page into P3 with write-combining turned on */
  97. *handle = phys;
  98. return __ioremap(phys, size, _PAGE_BUFFER);
  99. } else {
  100. return phys_to_uncached(phys);
  101. }
  102. }
  103. static void avr32_dma_free(struct device *dev, size_t size,
  104. void *cpu_addr, dma_addr_t handle, unsigned long attrs)
  105. {
  106. struct page *page;
  107. if (attrs & DMA_ATTR_WRITE_COMBINE) {
  108. iounmap(cpu_addr);
  109. page = phys_to_page(handle);
  110. } else {
  111. void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
  112. pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
  113. cpu_addr, (unsigned long)handle, (unsigned)size);
  114. BUG_ON(!virt_addr_valid(addr));
  115. page = virt_to_page(addr);
  116. }
  117. __dma_free(dev, size, page, handle);
  118. }
  119. static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
  120. unsigned long offset, size_t size,
  121. enum dma_data_direction direction, unsigned long attrs)
  122. {
  123. void *cpu_addr = page_address(page) + offset;
  124. dma_cache_sync(dev, cpu_addr, size, direction);
  125. return virt_to_bus(cpu_addr);
  126. }
  127. static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
  128. int nents, enum dma_data_direction direction,
  129. unsigned long attrs)
  130. {
  131. int i;
  132. struct scatterlist *sg;
  133. for_each_sg(sglist, sg, nents, i) {
  134. char *virt;
  135. sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
  136. virt = sg_virt(sg);
  137. dma_cache_sync(dev, virt, sg->length, direction);
  138. }
  139. return nents;
  140. }
  141. static void avr32_dma_sync_single_for_device(struct device *dev,
  142. dma_addr_t dma_handle, size_t size,
  143. enum dma_data_direction direction)
  144. {
  145. dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
  146. }
  147. static void avr32_dma_sync_sg_for_device(struct device *dev,
  148. struct scatterlist *sglist, int nents,
  149. enum dma_data_direction direction)
  150. {
  151. int i;
  152. struct scatterlist *sg;
  153. for_each_sg(sglist, sg, nents, i)
  154. dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
  155. }
  156. struct dma_map_ops avr32_dma_ops = {
  157. .alloc = avr32_dma_alloc,
  158. .free = avr32_dma_free,
  159. .map_page = avr32_dma_map_page,
  160. .map_sg = avr32_dma_map_sg,
  161. .sync_single_for_device = avr32_dma_sync_single_for_device,
  162. .sync_sg_for_device = avr32_dma_sync_sg_for_device,
  163. };
  164. EXPORT_SYMBOL(avr32_dma_ops);