ion_cma_heap.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /*
  2. * drivers/gpu/ion/ion_cma_heap.c
  3. *
  4. * Copyright (C) Linaro 2012
  5. * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/device.h>
  18. #include <linux/ion.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/msm_ion.h>
  24. #include <linux/highmem.h>
  25. #include <mach/iommu_domains.h>
  26. #include <asm/cacheflush.h>
  27. /* for ion_heap_ops structure */
  28. #include "ion_priv.h"
  29. #define ION_CMA_ALLOCATE_FAILED -1
  30. struct ion_cma_buffer_info {
  31. void *cpu_addr;
  32. dma_addr_t handle;
  33. struct sg_table *table;
  34. bool is_cached;
  35. };
  36. static int cma_heap_has_outer_cache;
  37. /*
  38. * Create scatter-list for the already allocated DMA buffer.
  39. * This function could be replace by dma_common_get_sgtable
  40. * as soon as it will avalaible.
  41. */
  42. int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
  43. void *cpu_addr, dma_addr_t handle, size_t size)
  44. {
  45. struct page *page = phys_to_page(handle);
  46. int ret;
  47. ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
  48. if (unlikely(ret))
  49. return ret;
  50. sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
  51. return 0;
  52. }
  53. /* ION CMA heap operations functions */
  54. static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
  55. unsigned long len, unsigned long align,
  56. unsigned long flags)
  57. {
  58. struct device *dev = heap->priv;
  59. struct ion_cma_buffer_info *info;
  60. dev_dbg(dev, "Request buffer allocation len %ld\n", len);
  61. info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
  62. if (!info) {
  63. dev_err(dev, "Can't allocate buffer info\n");
  64. return ION_CMA_ALLOCATE_FAILED;
  65. }
  66. if (!ION_IS_CACHED(flags))
  67. info->cpu_addr = dma_alloc_writecombine(dev, len,
  68. &(info->handle), GFP_KERNEL);
  69. else
  70. info->cpu_addr = dma_alloc_nonconsistent(dev, len,
  71. &(info->handle), GFP_KERNEL);
  72. if (!info->cpu_addr) {
  73. dev_err(dev, "Fail to allocate buffer\n");
  74. goto err;
  75. }
  76. info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  77. if (!info->table) {
  78. dev_err(dev, "Fail to allocate sg table\n");
  79. goto err;
  80. }
  81. info->is_cached = ION_IS_CACHED(flags);
  82. ion_cma_get_sgtable(dev,
  83. info->table, info->cpu_addr, info->handle, len);
  84. /* keep this for memory release */
  85. buffer->priv_virt = info;
  86. dev_dbg(dev, "Allocate buffer %p\n", buffer);
  87. if (heap->id == ION_QSECOM_HEAP_ID ) {
  88. // printk("[ION_alloc id==27|QSEECOM] 0x%p/0x%x => kmap_flush_unused\n", (void*)info->handle, (unsigned int)len);
  89. kmap_flush_unused();
  90. }
  91. return 0;
  92. err:
  93. kfree(info);
  94. return ION_CMA_ALLOCATE_FAILED;
  95. }
  96. static void ion_cma_free(struct ion_buffer *buffer)
  97. {
  98. struct device *dev = buffer->heap->priv;
  99. struct ion_cma_buffer_info *info = buffer->priv_virt;
  100. dev_dbg(dev, "Release buffer %p\n", buffer);
  101. /* release memory */
  102. dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
  103. sg_free_table(info->table);
  104. /* release sg table */
  105. kfree(info->table);
  106. kfree(info);
  107. }
  108. /* return physical address in addr */
  109. static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
  110. ion_phys_addr_t *addr, size_t *len)
  111. {
  112. struct device *dev = heap->priv;
  113. struct ion_cma_buffer_info *info = buffer->priv_virt;
  114. dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
  115. &info->handle);
  116. *addr = info->handle;
  117. *len = buffer->size;
  118. return 0;
  119. }
  120. struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
  121. struct ion_buffer *buffer)
  122. {
  123. struct ion_cma_buffer_info *info = buffer->priv_virt;
  124. return info->table;
  125. }
  126. void ion_cma_heap_unmap_dma(struct ion_heap *heap,
  127. struct ion_buffer *buffer)
  128. {
  129. return;
  130. }
  131. static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
  132. struct vm_area_struct *vma)
  133. {
  134. struct device *dev = buffer->heap->priv;
  135. struct ion_cma_buffer_info *info = buffer->priv_virt;
  136. #ifdef CONFIG_TIMA_RKP
  137. if (buffer->size) {
  138. /* iommu optimization- needs to be turned ON from
  139. * the tz side.
  140. */
  141. cpu_v7_tima_iommu_opt(vma->vm_start, vma->vm_end, (unsigned long)vma->vm_mm->pgd);
  142. __asm__ __volatile__ (
  143. "mcr p15, 0, r0, c8, c3, 0\n"
  144. "dsb\n"
  145. "isb\n");
  146. }
  147. #endif
  148. if (info->is_cached)
  149. return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
  150. info->handle, buffer->size);
  151. else
  152. return dma_mmap_writecombine(dev, vma, info->cpu_addr,
  153. info->handle, buffer->size);
  154. }
  155. static void *ion_cma_map_kernel(struct ion_heap *heap,
  156. struct ion_buffer *buffer)
  157. {
  158. struct ion_cma_buffer_info *info = buffer->priv_virt;
  159. return info->cpu_addr;
  160. }
  161. static void ion_cma_unmap_kernel(struct ion_heap *heap,
  162. struct ion_buffer *buffer)
  163. {
  164. return;
  165. }
  166. static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
  167. const struct list_head *mem_map)
  168. {
  169. if (mem_map) {
  170. struct mem_map_data *data;
  171. seq_printf(s, "\nMemory Map\n");
  172. seq_printf(s, "%16.s %14.s %14.s %14.s\n",
  173. "client", "start address", "end address",
  174. "size (hex)");
  175. list_for_each_entry(data, mem_map, node) {
  176. const char *client_name = "(null)";
  177. if (data->client_name)
  178. client_name = data->client_name;
  179. seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
  180. client_name, &data->addr,
  181. &data->addr_end,
  182. data->size, data->size);
  183. }
  184. }
  185. return 0;
  186. }
  187. static struct ion_heap_ops ion_cma_ops = {
  188. .allocate = ion_cma_allocate,
  189. .free = ion_cma_free,
  190. .map_dma = ion_cma_heap_map_dma,
  191. .unmap_dma = ion_cma_heap_unmap_dma,
  192. .phys = ion_cma_phys,
  193. .map_user = ion_cma_mmap,
  194. .map_kernel = ion_cma_map_kernel,
  195. .unmap_kernel = ion_cma_unmap_kernel,
  196. .print_debug = ion_cma_print_debug,
  197. };
  198. struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
  199. {
  200. struct ion_heap *heap;
  201. heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
  202. if (!heap)
  203. return ERR_PTR(-ENOMEM);
  204. heap->ops = &ion_cma_ops;
  205. /* set device as private heaps data, later it will be
  206. * used to make the link with reserved CMA memory */
  207. heap->priv = data->priv;
  208. heap->type = ION_HEAP_TYPE_DMA;
  209. cma_heap_has_outer_cache = data->has_outer_cache;
  210. return heap;
  211. }
  212. void ion_cma_heap_destroy(struct ion_heap *heap)
  213. {
  214. kfree(heap);
  215. }