dma-mapping.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * include/asm-xtensa/dma-mapping.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2003 - 2005 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_DMA_MAPPING_H
  11. #define _XTENSA_DMA_MAPPING_H
  12. #include <asm/cache.h>
  13. #include <asm/io.h>
  14. #include <linux/mm.h>
  15. #include <linux/scatterlist.h>
  16. /*
  17. * DMA-consistent mapping functions.
  18. */
  19. extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
  20. extern void consistent_free(void*, size_t, dma_addr_t);
  21. extern void consistent_sync(void*, size_t, int);
  22. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  23. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  24. void *dma_alloc_coherent(struct device *dev, size_t size,
  25. dma_addr_t *dma_handle, gfp_t flag);
  26. void dma_free_coherent(struct device *dev, size_t size,
  27. void *vaddr, dma_addr_t dma_handle);
  28. static inline dma_addr_t
  29. dma_map_single(struct device *dev, void *ptr, size_t size,
  30. enum dma_data_direction direction)
  31. {
  32. BUG_ON(direction == DMA_NONE);
  33. consistent_sync(ptr, size, direction);
  34. return virt_to_phys(ptr);
  35. }
  36. static inline void
  37. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  38. enum dma_data_direction direction)
  39. {
  40. BUG_ON(direction == DMA_NONE);
  41. }
  42. static inline int
  43. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  44. enum dma_data_direction direction)
  45. {
  46. int i;
  47. BUG_ON(direction == DMA_NONE);
  48. for (i = 0; i < nents; i++, sg++ ) {
  49. BUG_ON(!sg_page(sg));
  50. sg->dma_address = sg_phys(sg);
  51. consistent_sync(sg_virt(sg), sg->length, direction);
  52. }
  53. return nents;
  54. }
  55. static inline dma_addr_t
  56. dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  57. size_t size, enum dma_data_direction direction)
  58. {
  59. BUG_ON(direction == DMA_NONE);
  60. return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
  61. }
  62. static inline void
  63. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  64. enum dma_data_direction direction)
  65. {
  66. BUG_ON(direction == DMA_NONE);
  67. }
  68. static inline void
  69. dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  70. enum dma_data_direction direction)
  71. {
  72. BUG_ON(direction == DMA_NONE);
  73. }
  74. static inline void
  75. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  76. enum dma_data_direction direction)
  77. {
  78. consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
  79. }
  80. static inline void
  81. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  82. enum dma_data_direction direction)
  83. {
  84. consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
  85. }
  86. static inline void
  87. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  88. unsigned long offset, size_t size,
  89. enum dma_data_direction direction)
  90. {
  91. consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
  92. }
  93. static inline void
  94. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  95. unsigned long offset, size_t size,
  96. enum dma_data_direction direction)
  97. {
  98. consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
  99. }
  100. static inline void
  101. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  102. enum dma_data_direction dir)
  103. {
  104. int i;
  105. for (i = 0; i < nelems; i++, sg++)
  106. consistent_sync(sg_virt(sg), sg->length, dir);
  107. }
  108. static inline void
  109. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  110. enum dma_data_direction dir)
  111. {
  112. int i;
  113. for (i = 0; i < nelems; i++, sg++)
  114. consistent_sync(sg_virt(sg), sg->length, dir);
  115. }
  116. static inline int
  117. dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  118. {
  119. return 0;
  120. }
  121. static inline int
  122. dma_supported(struct device *dev, u64 mask)
  123. {
  124. return 1;
  125. }
  126. static inline int
  127. dma_set_mask(struct device *dev, u64 mask)
  128. {
  129. if(!dev->dma_mask || !dma_supported(dev, mask))
  130. return -EIO;
  131. *dev->dma_mask = mask;
  132. return 0;
  133. }
  134. static inline void
  135. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  136. enum dma_data_direction direction)
  137. {
  138. consistent_sync(vaddr, size, direction);
  139. }
  140. #endif /* _XTENSA_DMA_MAPPING_H */