ion_heap.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /*
  2. * drivers/gpu/ion/ion_heap.c
  3. *
  4. * Copyright (C) 2011 Google, Inc.
  5. * Copyright (c) 2011-2014,2016 The Linux Foundation. All rights reserved.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/err.h>
  18. #include <linux/freezer.h>
  19. #include <linux/ion.h>
  20. #include <linux/kthread.h>
  21. #include <linux/mm.h>
  22. #include <linux/rtmutex.h>
  23. #include <linux/sched.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/slab.h>
  27. #include <linux/highmem.h>
  28. #include <linux/dma-mapping.h>
  29. #include "ion_priv.h"
  30. void *ion_heap_map_kernel(struct ion_heap *heap,
  31. struct ion_buffer *buffer)
  32. {
  33. struct scatterlist *sg;
  34. int i, j;
  35. void *vaddr;
  36. pgprot_t pgprot;
  37. struct sg_table *table = buffer->sg_table;
  38. int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
  39. struct page **pages = vmalloc(sizeof(struct page *) * npages);
  40. struct page **tmp = pages;
  41. if (!pages)
  42. return 0;
  43. if (buffer->flags & ION_FLAG_CACHED)
  44. pgprot = PAGE_KERNEL;
  45. else
  46. pgprot = pgprot_writecombine(PAGE_KERNEL);
  47. for_each_sg(table->sgl, sg, table->nents, i) {
  48. int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
  49. struct page *page = sg_page(sg);
  50. BUG_ON(i >= npages);
  51. for (j = 0; j < npages_this_entry; j++) {
  52. *(tmp++) = page++;
  53. }
  54. }
  55. vaddr = vmap(pages, npages, VM_MAP, pgprot);
  56. vfree(pages);
  57. return vaddr;
  58. }
  59. void ion_heap_unmap_kernel(struct ion_heap *heap,
  60. struct ion_buffer *buffer)
  61. {
  62. vunmap(buffer->vaddr);
  63. }
  64. int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
  65. struct vm_area_struct *vma)
  66. {
  67. struct sg_table *table = buffer->sg_table;
  68. unsigned long addr = vma->vm_start;
  69. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  70. struct scatterlist *sg;
  71. int i;
  72. #ifdef CONFIG_TIMA_RKP
  73. if (buffer->size) {
  74. /* iommu optimization- needs to be turned ON from
  75. * the tz side.
  76. */
  77. cpu_v7_tima_iommu_opt(vma->vm_start, vma->vm_end, (unsigned long)vma->vm_mm->pgd);
  78. __asm__ __volatile__ (
  79. "mcr p15, 0, r0, c8, c3, 0\n"
  80. "dsb\n"
  81. "isb\n");
  82. }
  83. #endif
  84. for_each_sg(table->sgl, sg, table->nents, i) {
  85. struct page *page = sg_page(sg);
  86. unsigned long remainder = vma->vm_end - addr;
  87. unsigned long len = sg_dma_len(sg);
  88. if (offset >= sg_dma_len(sg)) {
  89. offset -= sg_dma_len(sg);
  90. continue;
  91. } else if (offset) {
  92. page += offset / PAGE_SIZE;
  93. len = sg_dma_len(sg) - offset;
  94. offset = 0;
  95. }
  96. len = min(len, remainder);
  97. remap_pfn_range(vma, addr, page_to_pfn(page), len,
  98. vma->vm_page_prot);
  99. addr += len;
  100. if (addr >= vma->vm_end)
  101. return 0;
  102. }
  103. return 0;
  104. }
  105. #define MAX_VMAP_RETRIES 10
  106. /**
  107. * An optimized page-zero'ing function. vmaps arrays of pages in large
  108. * chunks to minimize the number of memsets and vmaps/vunmaps.
  109. *
  110. * Note that the `pages' array should be composed of all 4K pages.
  111. *
  112. * NOTE: This function does not guarantee synchronization of the caches
  113. * and thus caller is responsible for handling any cache maintenance
  114. * operations needed.
  115. */
  116. int ion_heap_pages_zero(struct page **pages, int num_pages)
  117. {
  118. int i, j, npages_to_vmap;
  119. void *ptr = NULL;
  120. /*
  121. * As an optimization, we manually zero out all of the pages
  122. * in one fell swoop here. To safeguard against insufficient
  123. * vmalloc space, we only vmap `npages_to_vmap' at a time,
  124. * starting with a conservative estimate of 1/8 of the total
  125. * number of vmalloc pages available.
  126. */
  127. npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
  128. >> PAGE_SHIFT;
  129. for (i = 0; i < num_pages; i += npages_to_vmap) {
  130. npages_to_vmap = min(npages_to_vmap, num_pages - i);
  131. for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
  132. ++j) {
  133. ptr = vmap(&pages[i], npages_to_vmap,
  134. VM_IOREMAP, PAGE_KERNEL);
  135. if (ptr)
  136. break;
  137. else
  138. npages_to_vmap >>= 1;
  139. }
  140. if (!ptr)
  141. return -ENOMEM;
  142. memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
  143. vunmap(ptr);
  144. }
  145. return 0;
  146. }
  147. int ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
  148. {
  149. struct page **pages;
  150. unsigned int page_tbl_size;
  151. pages_mem->free_fn = kfree;
  152. page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
  153. if (page_tbl_size > SZ_8K) {
  154. /*
  155. * Do fallback to ensure we have a balance between
  156. * performance and availability.
  157. */
  158. pages = kmalloc(page_tbl_size,
  159. __GFP_COMP | __GFP_NORETRY |
  160. __GFP_NO_KSWAPD | __GFP_NOWARN);
  161. if (!pages) {
  162. pages = vmalloc(page_tbl_size);
  163. pages_mem->free_fn = vfree;
  164. }
  165. } else {
  166. pages = kmalloc(page_tbl_size, GFP_KERNEL);
  167. }
  168. if (!pages)
  169. return -ENOMEM;
  170. pages_mem->pages = pages;
  171. return 0;
  172. }
  173. void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
  174. {
  175. pages_mem->free_fn(pages_mem->pages);
  176. }
  177. int ion_heap_high_order_page_zero(struct page *page, int order)
  178. {
  179. int i, ret;
  180. struct pages_mem pages_mem;
  181. int npages = 1 << order;
  182. pages_mem.size = npages * PAGE_SIZE;
  183. if (ion_heap_alloc_pages_mem(&pages_mem))
  184. return -ENOMEM;
  185. for (i = 0; i < (1 << order); ++i)
  186. pages_mem.pages[i] = page + i;
  187. ret = ion_heap_pages_zero(pages_mem.pages, npages);
  188. dma_sync_single_for_device(NULL, page_to_phys(page), pages_mem.size,
  189. DMA_BIDIRECTIONAL);
  190. ion_heap_free_pages_mem(&pages_mem);
  191. return ret;
  192. }
  193. int ion_heap_buffer_zero(struct ion_buffer *buffer)
  194. {
  195. struct sg_table *table = buffer->sg_table;
  196. struct scatterlist *sg;
  197. int i, j, ret = 0, npages = 0;
  198. struct pages_mem pages_mem;
  199. pages_mem.size = PAGE_ALIGN(buffer->size);
  200. if (ion_heap_alloc_pages_mem(&pages_mem))
  201. return -ENOMEM;
  202. for_each_sg(table->sgl, sg, table->nents, i) {
  203. struct page *page = sg_page(sg);
  204. unsigned long len = sg_dma_len(sg);
  205. for (j = 0; j < len / PAGE_SIZE; j++)
  206. pages_mem.pages[npages++] = page + j;
  207. }
  208. ret = ion_heap_pages_zero(pages_mem.pages, npages);
  209. dma_sync_sg_for_device(NULL, table->sgl, table->nents,
  210. DMA_BIDIRECTIONAL);
  211. ion_heap_free_pages_mem(&pages_mem);
  212. return ret;
  213. }
  214. void ion_heap_free_page(struct ion_buffer *buffer, struct page *page,
  215. unsigned int order)
  216. {
  217. int i;
  218. if (!ion_buffer_fault_user_mappings(buffer)) {
  219. __free_pages(page, order);
  220. return;
  221. }
  222. for (i = 0; i < (1 << order); i++)
  223. __free_page(page + i);
  224. }
  225. void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
  226. {
  227. rt_mutex_lock(&heap->lock);
  228. list_add(&buffer->list, &heap->free_list);
  229. heap->free_list_size += buffer->size;
  230. rt_mutex_unlock(&heap->lock);
  231. wake_up(&heap->waitqueue);
  232. }
  233. size_t ion_heap_freelist_size(struct ion_heap *heap)
  234. {
  235. size_t size;
  236. rt_mutex_lock(&heap->lock);
  237. size = heap->free_list_size;
  238. rt_mutex_unlock(&heap->lock);
  239. return size;
  240. }
  241. static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
  242. bool skip_pools)
  243. {
  244. struct ion_buffer *buffer;
  245. size_t total_drained = 0;
  246. if (ion_heap_freelist_size(heap) == 0)
  247. return 0;
  248. if (size == 0)
  249. size = ion_heap_freelist_size(heap);
  250. while (true) {
  251. rt_mutex_lock(&heap->lock);
  252. if (list_empty(&heap->free_list) || total_drained >= size ) {
  253. rt_mutex_unlock(&heap->lock);
  254. break;
  255. }
  256. buffer = list_first_entry(&heap->free_list, struct ion_buffer,
  257. list);
  258. list_del(&buffer->list);
  259. heap->free_list_size -= buffer->size;
  260. total_drained += buffer->size;
  261. if (skip_pools)
  262. buffer->flags |= ION_FLAG_FREED_FROM_SHRINKER;
  263. rt_mutex_unlock(&heap->lock);
  264. ion_buffer_destroy(buffer);
  265. }
  266. return total_drained;
  267. }
  268. size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
  269. {
  270. return _ion_heap_freelist_drain(heap, size, false);
  271. }
  272. size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap, size_t size)
  273. {
  274. return _ion_heap_freelist_drain(heap, size, true);
  275. }
  276. int ion_heap_deferred_free(void *data)
  277. {
  278. struct ion_heap *heap = data;
  279. while (true) {
  280. struct ion_buffer *buffer;
  281. wait_event_freezable(heap->waitqueue,
  282. ion_heap_freelist_size(heap) > 0);
  283. rt_mutex_lock(&heap->lock);
  284. if (list_empty(&heap->free_list)) {
  285. rt_mutex_unlock(&heap->lock);
  286. continue;
  287. }
  288. buffer = list_first_entry(&heap->free_list, struct ion_buffer,
  289. list);
  290. list_del(&buffer->list);
  291. heap->free_list_size -= buffer->size;
  292. rt_mutex_unlock(&heap->lock);
  293. ion_buffer_destroy(buffer);
  294. }
  295. return 0;
  296. }
  297. int ion_heap_init_deferred_free(struct ion_heap *heap)
  298. {
  299. struct sched_param param = { .sched_priority = 0 };
  300. INIT_LIST_HEAD(&heap->free_list);
  301. heap->free_list_size = 0;
  302. rt_mutex_init(&heap->lock);
  303. init_waitqueue_head(&heap->waitqueue);
  304. heap->task = kthread_run(ion_heap_deferred_free, heap,
  305. "%s", heap->name);
  306. sched_setscheduler(heap->task, SCHED_IDLE, &param);
  307. if (IS_ERR(heap->task)) {
  308. pr_err("%s: creating thread for deferred free failed\n",
  309. __func__);
  310. return PTR_RET(heap->task);
  311. }
  312. return 0;
  313. }
  314. struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
  315. {
  316. struct ion_heap *heap = NULL;
  317. switch (heap_data->type) {
  318. case ION_HEAP_TYPE_SYSTEM_CONTIG:
  319. pr_err("%s: Heap type is disabled: %d\n", __func__,
  320. heap_data->type);
  321. return ERR_PTR(-EINVAL);
  322. case ION_HEAP_TYPE_SYSTEM:
  323. heap = ion_system_heap_create(heap_data);
  324. break;
  325. case ION_HEAP_TYPE_CARVEOUT:
  326. heap = ion_carveout_heap_create(heap_data);
  327. break;
  328. case ION_HEAP_TYPE_CHUNK:
  329. heap = ion_chunk_heap_create(heap_data);
  330. break;
  331. default:
  332. pr_err("%s: Invalid heap type %d\n", __func__,
  333. heap_data->type);
  334. return ERR_PTR(-EINVAL);
  335. }
  336. if (IS_ERR_OR_NULL(heap)) {
  337. pr_err("%s: error creating heap %s type %d base %pa size %u\n",
  338. __func__, heap_data->name, heap_data->type,
  339. &heap_data->base, heap_data->size);
  340. return ERR_PTR(-EINVAL);
  341. }
  342. heap->name = heap_data->name;
  343. heap->id = heap_data->id;
  344. heap->priv = heap_data->priv;
  345. return heap;
  346. }
  347. void ion_heap_destroy(struct ion_heap *heap)
  348. {
  349. if (!heap)
  350. return;
  351. switch (heap->type) {
  352. case ION_HEAP_TYPE_SYSTEM_CONTIG:
  353. pr_err("%s: Heap type is disabled: %d\n", __func__,
  354. heap->type);
  355. break;
  356. case ION_HEAP_TYPE_SYSTEM:
  357. ion_system_heap_destroy(heap);
  358. break;
  359. case ION_HEAP_TYPE_CARVEOUT:
  360. ion_carveout_heap_destroy(heap);
  361. break;
  362. case ION_HEAP_TYPE_CHUNK:
  363. ion_chunk_heap_destroy(heap);
  364. break;
  365. default:
  366. pr_err("%s: Invalid heap type %d\n", __func__,
  367. heap->type);
  368. }
  369. }