dma-coherent.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Coherent per-device memory handling.
  4. * Borrowed from i386
  5. */
  6. #include <linux/io.h>
  7. #include <linux/slab.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/dma-mapping.h>
  11. struct dma_coherent_mem {
  12. void *virt_base;
  13. dma_addr_t device_base;
  14. unsigned long pfn_base;
  15. int size;
  16. int flags;
  17. unsigned long *bitmap;
  18. spinlock_t spinlock;
  19. bool use_dev_dma_pfn_offset;
  20. };
  21. static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
  22. static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
  23. {
  24. if (dev && dev->dma_mem)
  25. return dev->dma_mem;
  26. return NULL;
  27. }
  28. static inline dma_addr_t dma_get_device_base(struct device *dev,
  29. struct dma_coherent_mem * mem)
  30. {
  31. if (mem->use_dev_dma_pfn_offset)
  32. return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
  33. else
  34. return mem->device_base;
  35. }
  36. static int dma_init_coherent_memory(
  37. phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
  38. struct dma_coherent_mem **mem)
  39. {
  40. struct dma_coherent_mem *dma_mem = NULL;
  41. void __iomem *mem_base = NULL;
  42. int pages = size >> PAGE_SHIFT;
  43. int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  44. int ret;
  45. if (!size) {
  46. ret = -EINVAL;
  47. goto out;
  48. }
  49. mem_base = memremap(phys_addr, size, MEMREMAP_WC);
  50. if (!mem_base) {
  51. ret = -EINVAL;
  52. goto out;
  53. }
  54. dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  55. if (!dma_mem) {
  56. ret = -ENOMEM;
  57. goto out;
  58. }
  59. dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  60. if (!dma_mem->bitmap) {
  61. ret = -ENOMEM;
  62. goto out;
  63. }
  64. dma_mem->virt_base = mem_base;
  65. dma_mem->device_base = device_addr;
  66. dma_mem->pfn_base = PFN_DOWN(phys_addr);
  67. dma_mem->size = pages;
  68. dma_mem->flags = flags;
  69. spin_lock_init(&dma_mem->spinlock);
  70. *mem = dma_mem;
  71. return 0;
  72. out:
  73. kfree(dma_mem);
  74. if (mem_base)
  75. memunmap(mem_base);
  76. return ret;
  77. }
  78. static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
  79. {
  80. if (!mem)
  81. return;
  82. memunmap(mem->virt_base);
  83. kfree(mem->bitmap);
  84. kfree(mem);
  85. }
  86. static int dma_assign_coherent_memory(struct device *dev,
  87. struct dma_coherent_mem *mem)
  88. {
  89. if (!dev)
  90. return -ENODEV;
  91. if (dev->dma_mem)
  92. return -EBUSY;
  93. dev->dma_mem = mem;
  94. return 0;
  95. }
  96. int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  97. dma_addr_t device_addr, size_t size, int flags)
  98. {
  99. struct dma_coherent_mem *mem;
  100. int ret;
  101. ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
  102. if (ret)
  103. return ret;
  104. ret = dma_assign_coherent_memory(dev, mem);
  105. if (ret)
  106. dma_release_coherent_memory(mem);
  107. return ret;
  108. }
  109. EXPORT_SYMBOL(dma_declare_coherent_memory);
  110. void dma_release_declared_memory(struct device *dev)
  111. {
  112. struct dma_coherent_mem *mem = dev->dma_mem;
  113. if (!mem)
  114. return;
  115. dma_release_coherent_memory(mem);
  116. dev->dma_mem = NULL;
  117. }
  118. EXPORT_SYMBOL(dma_release_declared_memory);
  119. void *dma_mark_declared_memory_occupied(struct device *dev,
  120. dma_addr_t device_addr, size_t size)
  121. {
  122. struct dma_coherent_mem *mem = dev->dma_mem;
  123. unsigned long flags;
  124. int pos, err;
  125. size += device_addr & ~PAGE_MASK;
  126. if (!mem)
  127. return ERR_PTR(-EINVAL);
  128. spin_lock_irqsave(&mem->spinlock, flags);
  129. pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
  130. err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
  131. spin_unlock_irqrestore(&mem->spinlock, flags);
  132. if (err != 0)
  133. return ERR_PTR(err);
  134. return mem->virt_base + (pos << PAGE_SHIFT);
  135. }
  136. EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
  137. static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
  138. ssize_t size, dma_addr_t *dma_handle)
  139. {
  140. int order = get_order(size);
  141. unsigned long flags;
  142. int pageno;
  143. void *ret;
  144. spin_lock_irqsave(&mem->spinlock, flags);
  145. if (unlikely(size > (mem->size << PAGE_SHIFT)))
  146. goto err;
  147. pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
  148. if (unlikely(pageno < 0))
  149. goto err;
  150. /*
  151. * Memory was found in the coherent area.
  152. */
  153. *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
  154. ret = mem->virt_base + (pageno << PAGE_SHIFT);
  155. spin_unlock_irqrestore(&mem->spinlock, flags);
  156. memset(ret, 0, size);
  157. return ret;
  158. err:
  159. spin_unlock_irqrestore(&mem->spinlock, flags);
  160. return NULL;
  161. }
  162. /**
  163. * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
  164. * @dev: device from which we allocate memory
  165. * @size: size of requested memory area
  166. * @dma_handle: This will be filled with the correct dma handle
  167. * @ret: This pointer will be filled with the virtual address
  168. * to allocated area.
  169. *
  170. * This function should be only called from per-arch dma_alloc_coherent()
  171. * to support allocation from per-device coherent memory pools.
  172. *
  173. * Returns 0 if dma_alloc_coherent should continue with allocating from
  174. * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
  175. */
  176. int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
  177. dma_addr_t *dma_handle, void **ret)
  178. {
  179. struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
  180. if (!mem)
  181. return 0;
  182. *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
  183. if (*ret)
  184. return 1;
  185. /*
  186. * In the case where the allocation can not be satisfied from the
  187. * per-device area, try to fall back to generic memory if the
  188. * constraints allow it.
  189. */
  190. return mem->flags & DMA_MEMORY_EXCLUSIVE;
  191. }
  192. EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
  193. void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
  194. {
  195. if (!dma_coherent_default_memory)
  196. return NULL;
  197. return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
  198. dma_handle);
  199. }
  200. static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
  201. int order, void *vaddr)
  202. {
  203. if (mem && vaddr >= mem->virt_base && vaddr <
  204. (mem->virt_base + (mem->size << PAGE_SHIFT))) {
  205. int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
  206. unsigned long flags;
  207. spin_lock_irqsave(&mem->spinlock, flags);
  208. bitmap_release_region(mem->bitmap, page, order);
  209. spin_unlock_irqrestore(&mem->spinlock, flags);
  210. return 1;
  211. }
  212. return 0;
  213. }
  214. /**
  215. * dma_release_from_dev_coherent() - free memory to device coherent memory pool
  216. * @dev: device from which the memory was allocated
  217. * @order: the order of pages allocated
  218. * @vaddr: virtual address of allocated pages
  219. *
  220. * This checks whether the memory was allocated from the per-device
  221. * coherent memory pool and if so, releases that memory.
  222. *
  223. * Returns 1 if we correctly released the memory, or 0 if the caller should
  224. * proceed with releasing memory from generic pools.
  225. */
  226. int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
  227. {
  228. struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
  229. return __dma_release_from_coherent(mem, order, vaddr);
  230. }
  231. EXPORT_SYMBOL(dma_release_from_dev_coherent);
  232. int dma_release_from_global_coherent(int order, void *vaddr)
  233. {
  234. if (!dma_coherent_default_memory)
  235. return 0;
  236. return __dma_release_from_coherent(dma_coherent_default_memory, order,
  237. vaddr);
  238. }
  239. static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
  240. struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
  241. {
  242. if (mem && vaddr >= mem->virt_base && vaddr + size <=
  243. (mem->virt_base + (mem->size << PAGE_SHIFT))) {
  244. unsigned long off = vma->vm_pgoff;
  245. int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
  246. int user_count = vma_pages(vma);
  247. int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  248. *ret = -ENXIO;
  249. if (off < count && user_count <= count - off) {
  250. unsigned long pfn = mem->pfn_base + start + off;
  251. *ret = remap_pfn_range(vma, vma->vm_start, pfn,
  252. user_count << PAGE_SHIFT,
  253. vma->vm_page_prot);
  254. }
  255. return 1;
  256. }
  257. return 0;
  258. }
  259. /**
  260. * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
  261. * @dev: device from which the memory was allocated
  262. * @vma: vm_area for the userspace memory
  263. * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
  264. * @size: size of the memory buffer allocated
  265. * @ret: result from remap_pfn_range()
  266. *
  267. * This checks whether the memory was allocated from the per-device
  268. * coherent memory pool and if so, maps that memory to the provided vma.
  269. *
  270. * Returns 1 if we correctly mapped the memory, or 0 if the caller should
  271. * proceed with mapping memory from generic pools.
  272. */
  273. int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
  274. void *vaddr, size_t size, int *ret)
  275. {
  276. struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
  277. return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
  278. }
  279. EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
  280. int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
  281. size_t size, int *ret)
  282. {
  283. if (!dma_coherent_default_memory)
  284. return 0;
  285. return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
  286. vaddr, size, ret);
  287. }
  288. /*
  289. * Support for reserved memory regions defined in device tree
  290. */
  291. #ifdef CONFIG_OF_RESERVED_MEM
  292. #include <linux/of.h>
  293. #include <linux/of_fdt.h>
  294. #include <linux/of_reserved_mem.h>
  295. static struct reserved_mem *dma_reserved_default_memory __initdata;
  296. static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
  297. {
  298. struct dma_coherent_mem *mem = rmem->priv;
  299. int ret;
  300. if (!mem) {
  301. ret = dma_init_coherent_memory(rmem->base, rmem->base,
  302. rmem->size,
  303. DMA_MEMORY_EXCLUSIVE, &mem);
  304. if (ret) {
  305. pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
  306. &rmem->base, (unsigned long)rmem->size / SZ_1M);
  307. return ret;
  308. }
  309. }
  310. mem->use_dev_dma_pfn_offset = true;
  311. rmem->priv = mem;
  312. dma_assign_coherent_memory(dev, mem);
  313. return 0;
  314. }
  315. static void rmem_dma_device_release(struct reserved_mem *rmem,
  316. struct device *dev)
  317. {
  318. if (dev)
  319. dev->dma_mem = NULL;
  320. }
  321. static const struct reserved_mem_ops rmem_dma_ops = {
  322. .device_init = rmem_dma_device_init,
  323. .device_release = rmem_dma_device_release,
  324. };
  325. static int __init rmem_dma_setup(struct reserved_mem *rmem)
  326. {
  327. unsigned long node = rmem->fdt_node;
  328. if (of_get_flat_dt_prop(node, "reusable", NULL))
  329. return -EINVAL;
  330. #ifdef CONFIG_ARM
  331. if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
  332. pr_err("Reserved memory: regions without no-map are not yet supported\n");
  333. return -EINVAL;
  334. }
  335. if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
  336. WARN(dma_reserved_default_memory,
  337. "Reserved memory: region for default DMA coherent area is redefined\n");
  338. dma_reserved_default_memory = rmem;
  339. }
  340. #endif
  341. rmem->ops = &rmem_dma_ops;
  342. pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
  343. &rmem->base, (unsigned long)rmem->size / SZ_1M);
  344. return 0;
  345. }
  346. static int __init dma_init_reserved_memory(void)
  347. {
  348. const struct reserved_mem_ops *ops;
  349. int ret;
  350. if (!dma_reserved_default_memory)
  351. return -ENOMEM;
  352. ops = dma_reserved_default_memory->ops;
  353. /*
  354. * We rely on rmem_dma_device_init() does not propagate error of
  355. * dma_assign_coherent_memory() for "NULL" device.
  356. */
  357. ret = ops->device_init(dma_reserved_default_memory, NULL);
  358. if (!ret) {
  359. dma_coherent_default_memory = dma_reserved_default_memory->priv;
  360. pr_info("DMA: default coherent area is set\n");
  361. }
  362. return ret;
  363. }
  364. core_initcall(dma_init_reserved_memory);
  365. RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
  366. #endif