ion_removed_heap.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. /*
  2. * drivers/gpu/ion/ion_removed_heap.c
  3. *
  4. * Copyright (C) 2011 Google, Inc.
  5. * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/spinlock.h>
  18. #include <linux/err.h>
  19. #include <linux/genalloc.h>
  20. #include <linux/io.h>
  21. #include <linux/ion.h>
  22. #include <linux/mm.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/seq_file.h>
  27. #include "ion_priv.h"
  28. #include <asm/mach/map.h>
  29. #include <asm/cacheflush.h>
  30. #include <linux/msm_ion.h>
  31. struct ion_removed_heap {
  32. struct ion_heap heap;
  33. struct gen_pool *pool;
  34. ion_phys_addr_t base;
  35. unsigned long allocated_bytes;
  36. unsigned long total_size;
  37. int (*request_region)(void *);
  38. int (*release_region)(void *);
  39. atomic_t map_count;
  40. void *bus_id;
  41. };
  42. ion_phys_addr_t ion_removed_allocate(struct ion_heap *heap,
  43. unsigned long size,
  44. unsigned long align)
  45. {
  46. struct ion_removed_heap *removed_heap =
  47. container_of(heap, struct ion_removed_heap, heap);
  48. unsigned long offset = gen_pool_alloc_aligned(removed_heap->pool,
  49. size, ilog2(align));
  50. if (!offset) {
  51. if ((removed_heap->total_size -
  52. removed_heap->allocated_bytes) >= size)
  53. pr_debug("%s: heap %s has enough memory (%lx) but the allocation of size %lx still failed. Memory is probably fragmented.",
  54. __func__, heap->name,
  55. removed_heap->total_size -
  56. removed_heap->allocated_bytes, size);
  57. return ION_CARVEOUT_ALLOCATE_FAIL;
  58. }
  59. removed_heap->allocated_bytes += size;
  60. return offset;
  61. }
  62. void ion_removed_free(struct ion_heap *heap, ion_phys_addr_t addr,
  63. unsigned long size)
  64. {
  65. struct ion_removed_heap *removed_heap =
  66. container_of(heap, struct ion_removed_heap, heap);
  67. if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
  68. return;
  69. gen_pool_free(removed_heap->pool, addr, size);
  70. removed_heap->allocated_bytes -= size;
  71. }
  72. static int ion_removed_heap_phys(struct ion_heap *heap,
  73. struct ion_buffer *buffer,
  74. ion_phys_addr_t *addr, size_t *len)
  75. {
  76. *addr = buffer->priv_phys;
  77. *len = buffer->size;
  78. return 0;
  79. }
  80. static int ion_removed_heap_allocate(struct ion_heap *heap,
  81. struct ion_buffer *buffer,
  82. unsigned long size, unsigned long align,
  83. unsigned long flags)
  84. {
  85. buffer->priv_phys = ion_removed_allocate(heap, size, align);
  86. return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
  87. }
  88. static void ion_removed_heap_free(struct ion_buffer *buffer)
  89. {
  90. struct ion_heap *heap = buffer->heap;
  91. ion_removed_free(heap, buffer->priv_phys, buffer->size);
  92. buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
  93. }
  94. struct sg_table *ion_removed_heap_map_dma(struct ion_heap *heap,
  95. struct ion_buffer *buffer)
  96. {
  97. struct sg_table *table;
  98. int ret;
  99. table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  100. if (!table)
  101. return ERR_PTR(-ENOMEM);
  102. ret = sg_alloc_table(table, 1, GFP_KERNEL);
  103. if (ret)
  104. goto err0;
  105. table->sgl->length = buffer->size;
  106. table->sgl->offset = 0;
  107. table->sgl->dma_address = buffer->priv_phys;
  108. return table;
  109. err0:
  110. kfree(table);
  111. return ERR_PTR(ret);
  112. }
  113. void ion_removed_heap_unmap_dma(struct ion_heap *heap,
  114. struct ion_buffer *buffer)
  115. {
  116. if (buffer->sg_table)
  117. sg_free_table(buffer->sg_table);
  118. kfree(buffer->sg_table);
  119. buffer->sg_table = 0;
  120. }
  121. static int ion_removed_request_region(struct ion_removed_heap *removed_heap)
  122. {
  123. int ret_value = 0;
  124. if (atomic_inc_return(&removed_heap->map_count) == 1) {
  125. if (removed_heap->request_region) {
  126. ret_value = removed_heap->request_region(
  127. removed_heap->bus_id);
  128. if (ret_value) {
  129. pr_err("Unable to request SMI region");
  130. atomic_dec(&removed_heap->map_count);
  131. }
  132. }
  133. }
  134. return ret_value;
  135. }
  136. static int ion_removed_release_region(struct ion_removed_heap *removed_heap)
  137. {
  138. int ret_value = 0;
  139. if (atomic_dec_and_test(&removed_heap->map_count)) {
  140. if (removed_heap->release_region) {
  141. ret_value = removed_heap->release_region(
  142. removed_heap->bus_id);
  143. if (ret_value)
  144. pr_err("Unable to release SMI region");
  145. }
  146. }
  147. return ret_value;
  148. }
  149. void *ion_removed_heap_map_kernel(struct ion_heap *heap,
  150. struct ion_buffer *buffer)
  151. {
  152. struct ion_removed_heap *removed_heap =
  153. container_of(heap, struct ion_removed_heap, heap);
  154. void *ret_value;
  155. if (ion_removed_request_region(removed_heap))
  156. return NULL;
  157. if (ION_IS_CACHED(buffer->flags))
  158. ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
  159. else
  160. ret_value = ioremap(buffer->priv_phys, buffer->size);
  161. if (!ret_value)
  162. ion_removed_release_region(removed_heap);
  163. return ret_value;
  164. }
  165. void ion_removed_heap_unmap_kernel(struct ion_heap *heap,
  166. struct ion_buffer *buffer)
  167. {
  168. struct ion_removed_heap *removed_heap =
  169. container_of(heap, struct ion_removed_heap, heap);
  170. __arm_iounmap(buffer->vaddr);
  171. buffer->vaddr = NULL;
  172. ion_removed_release_region(removed_heap);
  173. return;
  174. }
  175. int ion_removed_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
  176. struct vm_area_struct *vma)
  177. {
  178. struct ion_removed_heap *removed_heap =
  179. container_of(heap, struct ion_removed_heap, heap);
  180. int ret_value = 0;
  181. if (ion_removed_request_region(removed_heap))
  182. return -EINVAL;
  183. if (!ION_IS_CACHED(buffer->flags))
  184. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  185. ret_value = remap_pfn_range(vma, vma->vm_start,
  186. __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
  187. vma->vm_end - vma->vm_start,
  188. vma->vm_page_prot);
  189. if (ret_value)
  190. ion_removed_release_region(removed_heap);
  191. return ret_value;
  192. }
  193. void ion_removed_heap_unmap_user(struct ion_heap *heap,
  194. struct ion_buffer *buffer)
  195. {
  196. struct ion_removed_heap *removed_heap =
  197. container_of(heap, struct ion_removed_heap, heap);
  198. ion_removed_release_region(removed_heap);
  199. }
  200. static int ion_removed_print_debug(struct ion_heap *heap, struct seq_file *s,
  201. const struct list_head *mem_map)
  202. {
  203. struct ion_removed_heap *removed_heap =
  204. container_of(heap, struct ion_removed_heap, heap);
  205. seq_printf(s, "total bytes currently allocated: %lx\n",
  206. removed_heap->allocated_bytes);
  207. seq_printf(s, "total heap size: %lx\n", removed_heap->total_size);
  208. if (mem_map) {
  209. unsigned long base = removed_heap->base;
  210. unsigned long size = removed_heap->total_size;
  211. unsigned long end = base+size;
  212. unsigned long last_end = base;
  213. struct mem_map_data *data;
  214. seq_printf(s, "\nMemory Map\n");
  215. seq_printf(s, "%16.s %14.s %14.s %14.s\n",
  216. "client", "start address", "end address",
  217. "size (hex)");
  218. list_for_each_entry(data, mem_map, node) {
  219. const char *client_name = "(null)";
  220. if (last_end < data->addr) {
  221. phys_addr_t da;
  222. da = data->addr-1;
  223. seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
  224. "FREE", &last_end, &da,
  225. data->addr-last_end,
  226. data->addr-last_end);
  227. }
  228. if (data->client_name)
  229. client_name = data->client_name;
  230. seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
  231. client_name, &data->addr,
  232. &data->addr_end,
  233. data->size, data->size);
  234. last_end = data->addr_end+1;
  235. }
  236. if (last_end < end) {
  237. seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
  238. last_end, end-1, end-last_end, end-last_end);
  239. }
  240. }
  241. return 0;
  242. }
  243. static struct ion_heap_ops removed_heap_ops = {
  244. .allocate = ion_removed_heap_allocate,
  245. .free = ion_removed_heap_free,
  246. .phys = ion_removed_heap_phys,
  247. .map_user = ion_removed_heap_map_user,
  248. .map_kernel = ion_removed_heap_map_kernel,
  249. .unmap_user = ion_removed_heap_unmap_user,
  250. .unmap_kernel = ion_removed_heap_unmap_kernel,
  251. .map_dma = ion_removed_heap_map_dma,
  252. .unmap_dma = ion_removed_heap_unmap_dma,
  253. .print_debug = ion_removed_print_debug,
  254. };
  255. struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *heap_data)
  256. {
  257. struct ion_removed_heap *removed_heap;
  258. int ret;
  259. removed_heap = kzalloc(sizeof(struct ion_removed_heap), GFP_KERNEL);
  260. if (!removed_heap)
  261. return ERR_PTR(-ENOMEM);
  262. removed_heap->pool = gen_pool_create(12, -1);
  263. if (!removed_heap->pool) {
  264. kfree(removed_heap);
  265. return ERR_PTR(-ENOMEM);
  266. }
  267. removed_heap->base = heap_data->base;
  268. ret = gen_pool_add(removed_heap->pool, removed_heap->base,
  269. heap_data->size, -1);
  270. if (ret < 0) {
  271. gen_pool_destroy(removed_heap->pool);
  272. kfree(removed_heap);
  273. return ERR_PTR(-EINVAL);
  274. }
  275. removed_heap->heap.ops = &removed_heap_ops;
  276. removed_heap->heap.type = ION_HEAP_TYPE_REMOVED;
  277. removed_heap->allocated_bytes = 0;
  278. removed_heap->total_size = heap_data->size;
  279. if (heap_data->extra_data) {
  280. struct ion_co_heap_pdata *extra_data =
  281. heap_data->extra_data;
  282. if (extra_data->setup_region)
  283. removed_heap->bus_id = extra_data->setup_region();
  284. if (extra_data->request_region)
  285. removed_heap->request_region =
  286. extra_data->request_region;
  287. if (extra_data->release_region)
  288. removed_heap->release_region =
  289. extra_data->release_region;
  290. }
  291. return &removed_heap->heap;
  292. }
  293. void ion_removed_heap_destroy(struct ion_heap *heap)
  294. {
  295. struct ion_removed_heap *removed_heap =
  296. container_of(heap, struct ion_removed_heap, heap);
  297. gen_pool_destroy(removed_heap->pool);
  298. kfree(removed_heap);
  299. removed_heap = NULL;
  300. }