dma.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /*
  2. * Copyright (C) 2009-2010 PetaLogix
  3. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  4. *
  5. * Provide default implementations of the DMA mapping callbacks for
  6. * directly mapped busses.
  7. */
  8. #include <linux/device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/gfp.h>
  11. #include <linux/dma-debug.h>
  12. #include <asm/bug.h>
  13. #include <asm/cacheflush.h>
  14. /*
  15. * Generic direct DMA implementation
  16. *
  17. * This implementation supports a per-device offset that can be applied if
  18. * the address at which memory is visible to devices is not 0. Platform code
  19. * can set archdata.dma_data to an unsigned long holding the offset. By
  20. * default the offset is PCI_DRAM_OFFSET.
  21. */
  22. static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
  23. size_t size, enum dma_data_direction direction)
  24. {
  25. switch (direction) {
  26. case DMA_TO_DEVICE:
  27. case DMA_BIDIRECTIONAL:
  28. flush_dcache_range(paddr + offset, paddr + offset + size);
  29. break;
  30. case DMA_FROM_DEVICE:
  31. invalidate_dcache_range(paddr + offset, paddr + offset + size);
  32. break;
  33. default:
  34. BUG();
  35. }
  36. }
  37. static unsigned long get_dma_direct_offset(struct device *dev)
  38. {
  39. if (likely(dev))
  40. return (unsigned long)dev->archdata.dma_data;
  41. return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
  42. }
  43. #define NOT_COHERENT_CACHE
  44. static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
  45. dma_addr_t *dma_handle, gfp_t flag)
  46. {
  47. #ifdef NOT_COHERENT_CACHE
  48. return consistent_alloc(flag, size, dma_handle);
  49. #else
  50. void *ret;
  51. struct page *page;
  52. int node = dev_to_node(dev);
  53. /* ignore region specifiers */
  54. flag &= ~(__GFP_HIGHMEM);
  55. page = alloc_pages_node(node, flag, get_order(size));
  56. if (page == NULL)
  57. return NULL;
  58. ret = page_address(page);
  59. memset(ret, 0, size);
  60. *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
  61. return ret;
  62. #endif
  63. }
  64. static void dma_direct_free_coherent(struct device *dev, size_t size,
  65. void *vaddr, dma_addr_t dma_handle)
  66. {
  67. #ifdef NOT_COHERENT_CACHE
  68. consistent_free(size, vaddr);
  69. #else
  70. free_pages((unsigned long)vaddr, get_order(size));
  71. #endif
  72. }
  73. static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
  74. int nents, enum dma_data_direction direction,
  75. struct dma_attrs *attrs)
  76. {
  77. struct scatterlist *sg;
  78. int i;
  79. /* FIXME this part of code is untested */
  80. for_each_sg(sgl, sg, nents, i) {
  81. sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
  82. __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
  83. sg->length, direction);
  84. }
  85. return nents;
  86. }
  87. static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
  88. int nents, enum dma_data_direction direction,
  89. struct dma_attrs *attrs)
  90. {
  91. }
  92. static int dma_direct_dma_supported(struct device *dev, u64 mask)
  93. {
  94. return 1;
  95. }
  96. static inline dma_addr_t dma_direct_map_page(struct device *dev,
  97. struct page *page,
  98. unsigned long offset,
  99. size_t size,
  100. enum dma_data_direction direction,
  101. struct dma_attrs *attrs)
  102. {
  103. __dma_sync_page(page_to_phys(page), offset, size, direction);
  104. return page_to_phys(page) + offset + get_dma_direct_offset(dev);
  105. }
  106. static inline void dma_direct_unmap_page(struct device *dev,
  107. dma_addr_t dma_address,
  108. size_t size,
  109. enum dma_data_direction direction,
  110. struct dma_attrs *attrs)
  111. {
  112. /* There is not necessary to do cache cleanup
  113. *
  114. * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
  115. * dma_address is physical address
  116. */
  117. __dma_sync_page(dma_address, 0 , size, direction);
  118. }
  119. struct dma_map_ops dma_direct_ops = {
  120. .alloc_coherent = dma_direct_alloc_coherent,
  121. .free_coherent = dma_direct_free_coherent,
  122. .map_sg = dma_direct_map_sg,
  123. .unmap_sg = dma_direct_unmap_sg,
  124. .dma_supported = dma_direct_dma_supported,
  125. .map_page = dma_direct_map_page,
  126. .unmap_page = dma_direct_unmap_page,
  127. };
  128. EXPORT_SYMBOL(dma_direct_ops);
  129. /* Number of entries preallocated for DMA-API debugging */
  130. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  131. static int __init dma_init(void)
  132. {
  133. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  134. return 0;
  135. }
  136. fs_initcall(dma_init);