dma-coherent.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dma-mapping.h>
  9. #include <linux/gfp.h>
  10. #include <linux/export.h>
  11. #include <asm/addrspace.h>
  12. #include <asm/cacheflush.h>
  13. void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
  14. {
  15. /*
  16. * No need to sync an uncached area
  17. */
  18. if (PXSEG(vaddr) == P2SEG)
  19. return;
  20. switch (direction) {
  21. case DMA_FROM_DEVICE: /* invalidate only */
  22. invalidate_dcache_region(vaddr, size);
  23. break;
  24. case DMA_TO_DEVICE: /* writeback only */
  25. clean_dcache_region(vaddr, size);
  26. break;
  27. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  28. flush_dcache_region(vaddr, size);
  29. break;
  30. default:
  31. BUG();
  32. }
  33. }
  34. EXPORT_SYMBOL(dma_cache_sync);
  35. static struct page *__dma_alloc(struct device *dev, size_t size,
  36. dma_addr_t *handle, gfp_t gfp)
  37. {
  38. struct page *page, *free, *end;
  39. int order;
  40. /* Following is a work-around (a.k.a. hack) to prevent pages
  41. * with __GFP_COMP being passed to split_page() which cannot
  42. * handle them. The real problem is that this flag probably
  43. * should be 0 on AVR32 as it is not supported on this
  44. * platform--see CONFIG_HUGETLB_PAGE. */
  45. gfp &= ~(__GFP_COMP);
  46. size = PAGE_ALIGN(size);
  47. order = get_order(size);
  48. page = alloc_pages(gfp, order);
  49. if (!page)
  50. return NULL;
  51. split_page(page, order);
  52. /*
  53. * When accessing physical memory with valid cache data, we
  54. * get a cache hit even if the virtual memory region is marked
  55. * as uncached.
  56. *
  57. * Since the memory is newly allocated, there is no point in
  58. * doing a writeback. If the previous owner cares, he should
  59. * have flushed the cache before releasing the memory.
  60. */
  61. invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
  62. *handle = page_to_bus(page);
  63. free = page + (size >> PAGE_SHIFT);
  64. end = page + (1 << order);
  65. /*
  66. * Free any unused pages
  67. */
  68. while (free < end) {
  69. __free_page(free);
  70. free++;
  71. }
  72. return page;
  73. }
  74. static void __dma_free(struct device *dev, size_t size,
  75. struct page *page, dma_addr_t handle)
  76. {
  77. struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
  78. while (page < end)
  79. __free_page(page++);
  80. }
  81. void *dma_alloc_coherent(struct device *dev, size_t size,
  82. dma_addr_t *handle, gfp_t gfp)
  83. {
  84. struct page *page;
  85. void *ret = NULL;
  86. page = __dma_alloc(dev, size, handle, gfp);
  87. if (page)
  88. ret = phys_to_uncached(page_to_phys(page));
  89. return ret;
  90. }
  91. EXPORT_SYMBOL(dma_alloc_coherent);
  92. void dma_free_coherent(struct device *dev, size_t size,
  93. void *cpu_addr, dma_addr_t handle)
  94. {
  95. void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
  96. struct page *page;
  97. pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
  98. cpu_addr, (unsigned long)handle, (unsigned)size);
  99. BUG_ON(!virt_addr_valid(addr));
  100. page = virt_to_page(addr);
  101. __dma_free(dev, size, page, handle);
  102. }
  103. EXPORT_SYMBOL(dma_free_coherent);
  104. void *dma_alloc_writecombine(struct device *dev, size_t size,
  105. dma_addr_t *handle, gfp_t gfp)
  106. {
  107. struct page *page;
  108. dma_addr_t phys;
  109. page = __dma_alloc(dev, size, handle, gfp);
  110. if (!page)
  111. return NULL;
  112. phys = page_to_phys(page);
  113. *handle = phys;
  114. /* Now, map the page into P3 with write-combining turned on */
  115. return __ioremap(phys, size, _PAGE_BUFFER);
  116. }
  117. EXPORT_SYMBOL(dma_alloc_writecombine);
  118. void dma_free_writecombine(struct device *dev, size_t size,
  119. void *cpu_addr, dma_addr_t handle)
  120. {
  121. struct page *page;
  122. iounmap(cpu_addr);
  123. page = phys_to_page(handle);
  124. __dma_free(dev, size, page, handle);
  125. }
  126. EXPORT_SYMBOL(dma_free_writecombine);