dma.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /*
  2. * Copyright(c) 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/types.h>
  48. #include <linux/scatterlist.h>
  49. #include <rdma/ib_verbs.h>
  50. #include "dma.h"
  51. #define BAD_DMA_ADDRESS ((u64)0)
  52. /*
  53. * The following functions implement driver specific replacements
  54. * for the ib_dma_*() functions.
  55. *
  56. * These functions return kernel virtual addresses instead of
  57. * device bus addresses since the driver uses the CPU to copy
  58. * data instead of using hardware DMA.
  59. */
  60. static int rvt_mapping_error(struct ib_device *dev, u64 dma_addr)
  61. {
  62. return dma_addr == BAD_DMA_ADDRESS;
  63. }
  64. static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr,
  65. size_t size, enum dma_data_direction direction)
  66. {
  67. if (WARN_ON(!valid_dma_direction(direction)))
  68. return BAD_DMA_ADDRESS;
  69. return (u64)cpu_addr;
  70. }
  71. static void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
  72. enum dma_data_direction direction)
  73. {
  74. /* This is a stub, nothing to be done here */
  75. }
  76. static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
  77. unsigned long offset, size_t size,
  78. enum dma_data_direction direction)
  79. {
  80. u64 addr;
  81. if (WARN_ON(!valid_dma_direction(direction)))
  82. return BAD_DMA_ADDRESS;
  83. addr = (u64)page_address(page);
  84. if (addr)
  85. addr += offset;
  86. return addr;
  87. }
  88. static void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
  89. enum dma_data_direction direction)
  90. {
  91. /* This is a stub, nothing to be done here */
  92. }
  93. static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl,
  94. int nents, enum dma_data_direction direction)
  95. {
  96. struct scatterlist *sg;
  97. u64 addr;
  98. int i;
  99. int ret = nents;
  100. if (WARN_ON(!valid_dma_direction(direction)))
  101. return 0;
  102. for_each_sg(sgl, sg, nents, i) {
  103. addr = (u64)page_address(sg_page(sg));
  104. if (!addr) {
  105. ret = 0;
  106. break;
  107. }
  108. sg->dma_address = addr + sg->offset;
  109. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  110. sg->dma_length = sg->length;
  111. #endif
  112. }
  113. return ret;
  114. }
  115. static void rvt_unmap_sg(struct ib_device *dev,
  116. struct scatterlist *sg, int nents,
  117. enum dma_data_direction direction)
  118. {
  119. /* This is a stub, nothing to be done here */
  120. }
  121. static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
  122. int nents, enum dma_data_direction direction,
  123. unsigned long attrs)
  124. {
  125. return rvt_map_sg(dev, sgl, nents, direction);
  126. }
  127. static void rvt_unmap_sg_attrs(struct ib_device *dev,
  128. struct scatterlist *sg, int nents,
  129. enum dma_data_direction direction,
  130. unsigned long attrs)
  131. {
  132. return rvt_unmap_sg(dev, sg, nents, direction);
  133. }
  134. static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
  135. size_t size, enum dma_data_direction dir)
  136. {
  137. }
  138. static void rvt_sync_single_for_device(struct ib_device *dev, u64 addr,
  139. size_t size,
  140. enum dma_data_direction dir)
  141. {
  142. }
  143. static void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size,
  144. u64 *dma_handle, gfp_t flag)
  145. {
  146. struct page *p;
  147. void *addr = NULL;
  148. p = alloc_pages(flag, get_order(size));
  149. if (p)
  150. addr = page_address(p);
  151. if (dma_handle)
  152. *dma_handle = (u64)addr;
  153. return addr;
  154. }
  155. static void rvt_dma_free_coherent(struct ib_device *dev, size_t size,
  156. void *cpu_addr, u64 dma_handle)
  157. {
  158. free_pages((unsigned long)cpu_addr, get_order(size));
  159. }
  160. struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
  161. .mapping_error = rvt_mapping_error,
  162. .map_single = rvt_dma_map_single,
  163. .unmap_single = rvt_dma_unmap_single,
  164. .map_page = rvt_dma_map_page,
  165. .unmap_page = rvt_dma_unmap_page,
  166. .map_sg = rvt_map_sg,
  167. .unmap_sg = rvt_unmap_sg,
  168. .map_sg_attrs = rvt_map_sg_attrs,
  169. .unmap_sg_attrs = rvt_unmap_sg_attrs,
  170. .sync_single_for_cpu = rvt_sync_single_for_cpu,
  171. .sync_single_for_device = rvt_sync_single_for_device,
  172. .alloc_coherent = rvt_dma_alloc_coherent,
  173. .free_coherent = rvt_dma_free_coherent
  174. };