ion_system_heap.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * drivers/gpu/ion/ion_system_heap.c
  3. *
  4. * Copyright (C) 2011 Google, Inc.
  5. * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <asm/page.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/err.h>
  20. #include <linux/highmem.h>
  21. #include <linux/ion.h>
  22. #include <linux/mm.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/slab.h>
  26. #include <linux/vmalloc.h>
  27. #include "ion_priv.h"
  28. #include <linux/dma-mapping.h>
  29. #include <trace/events/kmem.h>
  30. static unsigned int high_order_gfp_flags = (GFP_HIGHUSER |
  31. __GFP_NOWARN | __GFP_NORETRY |
  32. __GFP_NO_KSWAPD) & ~__GFP_WAIT;
  33. static unsigned int low_order_gfp_flags = (GFP_HIGHUSER |
  34. __GFP_NOWARN);
  35. static const unsigned int orders[] = {9, 8, 4, 0};
  36. static const int num_orders = ARRAY_SIZE(orders);
  37. static int order_to_index(unsigned int order)
  38. {
  39. int i;
  40. for (i = 0; i < num_orders; i++)
  41. if (order == orders[i])
  42. return i;
  43. BUG();
  44. return -1;
  45. }
  46. static unsigned int order_to_size(int order)
  47. {
  48. return PAGE_SIZE << order;
  49. }
  50. struct ion_system_heap {
  51. struct ion_heap heap;
  52. struct ion_page_pool **uncached_pools;
  53. struct ion_page_pool **cached_pools;
  54. };
  55. struct page_info {
  56. struct page *page;
  57. bool from_pool;
  58. unsigned int order;
  59. struct list_head list;
  60. };
  61. static struct page *alloc_buffer_page(struct ion_system_heap *heap,
  62. struct ion_buffer *buffer,
  63. unsigned long order,
  64. bool *from_pool)
  65. {
  66. bool cached = ion_buffer_cached(buffer);
  67. bool split_pages = ion_buffer_fault_user_mappings(buffer);
  68. struct page *page;
  69. struct ion_page_pool *pool;
  70. if (!cached)
  71. pool = heap->uncached_pools[order_to_index(order)];
  72. else
  73. pool = heap->cached_pools[order_to_index(order)];
  74. page = ion_page_pool_alloc(pool, from_pool);
  75. if (!page)
  76. return 0;
  77. if (split_pages)
  78. split_page(page, order);
  79. return page;
  80. }
  81. static void free_buffer_page(struct ion_system_heap *heap,
  82. struct ion_buffer *buffer, struct page *page,
  83. unsigned int order)
  84. {
  85. bool cached = ion_buffer_cached(buffer);
  86. bool split_pages = ion_buffer_fault_user_mappings(buffer);
  87. int i;
  88. if ((buffer->flags & ION_FLAG_FREED_FROM_SHRINKER)) {
  89. if (split_pages) {
  90. for (i = 0; i < (1 << order); i++)
  91. __free_page(page + i);
  92. } else {
  93. __free_pages(page, order);
  94. }
  95. } else {
  96. struct ion_page_pool *pool;
  97. if (cached)
  98. pool = heap->cached_pools[order_to_index(order)];
  99. else
  100. pool = heap->uncached_pools[order_to_index(order)];
  101. ion_page_pool_free(pool, page);
  102. }
  103. }
  104. static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
  105. struct ion_buffer *buffer,
  106. unsigned long size,
  107. unsigned int max_order)
  108. {
  109. struct page *page;
  110. struct page_info *info;
  111. int i;
  112. bool from_pool;
  113. for (i = 0; i < num_orders; i++) {
  114. if (size < order_to_size(orders[i]))
  115. continue;
  116. if (max_order < orders[i])
  117. continue;
  118. page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
  119. if (!page)
  120. continue;
  121. info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
  122. if (info) {
  123. info->page = page;
  124. info->order = orders[i];
  125. info->from_pool = from_pool;
  126. }
  127. return info;
  128. }
  129. return NULL;
  130. }
  131. static unsigned int process_info(struct page_info *info,
  132. struct scatterlist *sg,
  133. struct scatterlist *sg_sync,
  134. struct pages_mem *data, unsigned int i)
  135. {
  136. struct page *page = info->page;
  137. unsigned int j;
  138. if (sg_sync) {
  139. sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
  140. sg_dma_address(sg_sync) = page_to_phys(page);
  141. }
  142. sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
  143. /*
  144. * This is not correct - sg_dma_address needs a dma_addr_t
  145. * that is valid for the the targeted device, but this works
  146. * on the currently targeted hardware.
  147. */
  148. sg_dma_address(sg) = page_to_phys(page);
  149. if (data) {
  150. for (j = 0; j < (1 << info->order); ++j)
  151. data->pages[i++] = nth_page(page, j);
  152. }
  153. list_del(&info->list);
  154. kfree(info);
  155. return i;
  156. }
  157. static int ion_system_heap_allocate(struct ion_heap *heap,
  158. struct ion_buffer *buffer,
  159. unsigned long size, unsigned long align,
  160. unsigned long flags)
  161. {
  162. struct ion_system_heap *sys_heap = container_of(heap,
  163. struct ion_system_heap,
  164. heap);
  165. struct sg_table *table;
  166. struct sg_table table_sync;
  167. struct scatterlist *sg;
  168. struct scatterlist *sg_sync;
  169. int ret;
  170. struct list_head pages;
  171. struct list_head pages_from_pool;
  172. struct page_info *info, *tmp_info;
  173. int i = 0;
  174. unsigned int nents_sync = 0;
  175. unsigned long size_remaining = PAGE_ALIGN(size);
  176. unsigned int max_order = orders[0];
  177. struct pages_mem data;
  178. unsigned int sz;
  179. bool split_pages = ion_buffer_fault_user_mappings(buffer);
  180. data.size = 0;
  181. INIT_LIST_HEAD(&pages);
  182. INIT_LIST_HEAD(&pages_from_pool);
  183. while (size_remaining > 0) {
  184. info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
  185. if (!info)
  186. goto err;
  187. sz = (1 << info->order) * PAGE_SIZE;
  188. if (info->from_pool) {
  189. list_add_tail(&info->list, &pages_from_pool);
  190. } else {
  191. list_add_tail(&info->list, &pages);
  192. data.size += sz;
  193. ++nents_sync;
  194. }
  195. size_remaining -= sz;
  196. max_order = info->order;
  197. i++;
  198. }
  199. ret = ion_heap_alloc_pages_mem(&data);
  200. if (ret)
  201. goto err;
  202. table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  203. if (!table)
  204. goto err_free_data_pages;
  205. if (split_pages)
  206. ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
  207. GFP_KERNEL);
  208. else
  209. ret = sg_alloc_table(table, i, GFP_KERNEL);
  210. if (ret)
  211. goto err1;
  212. if (nents_sync) {
  213. ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
  214. if (ret)
  215. goto err_free_sg;
  216. }
  217. i = 0;
  218. sg = table->sgl;
  219. sg_sync = table_sync.sgl;
  220. /*
  221. * We now have two separate lists. One list contains pages from the
  222. * pool and the other pages from buddy. We want to merge these
  223. * together while preserving the ordering of the pages (higher order
  224. * first).
  225. */
  226. do {
  227. if (!list_empty(&pages))
  228. info = list_first_entry(&pages, struct page_info, list);
  229. else
  230. info = NULL;
  231. if (!list_empty(&pages_from_pool))
  232. tmp_info = list_first_entry(&pages_from_pool,
  233. struct page_info, list);
  234. else
  235. tmp_info = NULL;
  236. if (info && tmp_info) {
  237. if (info->order >= tmp_info->order) {
  238. i = process_info(info, sg, sg_sync, &data, i);
  239. sg_sync = sg_next(sg_sync);
  240. } else {
  241. i = process_info(tmp_info, sg, 0, 0, i);
  242. }
  243. } else if (info) {
  244. i = process_info(info, sg, sg_sync, &data, i);
  245. sg_sync = sg_next(sg_sync);
  246. } else if (tmp_info) {
  247. i = process_info(tmp_info, sg, 0, 0, i);
  248. } else {
  249. BUG();
  250. }
  251. sg = sg_next(sg);
  252. } while (sg);
  253. ret = ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT);
  254. if (ret) {
  255. pr_err("Unable to zero pages\n");
  256. goto err_free_sg2;
  257. }
  258. if (nents_sync)
  259. dma_sync_sg_for_device(NULL, table_sync.sgl, table_sync.nents,
  260. DMA_BIDIRECTIONAL);
  261. buffer->priv_virt = table;
  262. if (nents_sync)
  263. sg_free_table(&table_sync);
  264. ion_heap_free_pages_mem(&data);
  265. return 0;
  266. err_free_sg2:
  267. /* We failed to zero buffers. Bypass pool */
  268. buffer->flags |= ION_FLAG_FREED_FROM_SHRINKER;
  269. for_each_sg(table->sgl, sg, table->nents, i)
  270. free_buffer_page(sys_heap, buffer, sg_page(sg),
  271. get_order(sg->length));
  272. if (nents_sync)
  273. sg_free_table(&table_sync);
  274. err_free_sg:
  275. sg_free_table(table);
  276. err1:
  277. kfree(table);
  278. err_free_data_pages:
  279. ion_heap_free_pages_mem(&data);
  280. err:
  281. list_for_each_entry_safe(info, tmp_info, &pages, list) {
  282. free_buffer_page(sys_heap, buffer, info->page, info->order);
  283. kfree(info);
  284. }
  285. list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) {
  286. free_buffer_page(sys_heap, buffer, info->page, info->order);
  287. kfree(info);
  288. }
  289. return -ENOMEM;
  290. }
  291. void ion_system_heap_free(struct ion_buffer *buffer)
  292. {
  293. struct ion_heap *heap = buffer->heap;
  294. struct ion_system_heap *sys_heap = container_of(heap,
  295. struct ion_system_heap,
  296. heap);
  297. struct sg_table *table = buffer->sg_table;
  298. struct scatterlist *sg;
  299. LIST_HEAD(pages);
  300. int i;
  301. if (!(buffer->flags & ION_FLAG_FREED_FROM_SHRINKER))
  302. ion_heap_buffer_zero(buffer);
  303. for_each_sg(table->sgl, sg, table->nents, i)
  304. free_buffer_page(sys_heap, buffer, sg_page(sg),
  305. get_order(sg_dma_len(sg)));
  306. sg_free_table(table);
  307. kfree(table);
  308. }
  309. struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
  310. struct ion_buffer *buffer)
  311. {
  312. return buffer->priv_virt;
  313. }
  314. void ion_system_heap_unmap_dma(struct ion_heap *heap,
  315. struct ion_buffer *buffer)
  316. {
  317. return;
  318. }
  319. static struct ion_heap_ops system_heap_ops = {
  320. .allocate = ion_system_heap_allocate,
  321. .free = ion_system_heap_free,
  322. .map_dma = ion_system_heap_map_dma,
  323. .unmap_dma = ion_system_heap_unmap_dma,
  324. .map_kernel = ion_heap_map_kernel,
  325. .unmap_kernel = ion_heap_unmap_kernel,
  326. .map_user = ion_heap_map_user,
  327. };
  328. static int ion_system_heap_shrink(struct shrinker *shrinker,
  329. struct shrink_control *sc) {
  330. struct ion_heap *heap = container_of(shrinker, struct ion_heap,
  331. shrinker);
  332. struct ion_system_heap *sys_heap = container_of(heap,
  333. struct ion_system_heap,
  334. heap);
  335. int nr_total = 0;
  336. int nr_freed = 0;
  337. int i;
  338. if (sc->nr_to_scan == 0)
  339. goto end;
  340. /* shrink the free list first, no point in zeroing the memory if
  341. we're just going to reclaim it. Also, skip any possible
  342. page pooling */
  343. nr_freed += ion_heap_freelist_drain_from_shrinker(
  344. heap, sc->nr_to_scan * PAGE_SIZE) / PAGE_SIZE;
  345. if (nr_freed >= sc->nr_to_scan)
  346. goto end;
  347. for (i = 0; i < num_orders; i++) {
  348. nr_freed += ion_page_pool_shrink(sys_heap->uncached_pools[i],
  349. sc->gfp_mask, sc->nr_to_scan);
  350. if (nr_freed >= sc->nr_to_scan)
  351. goto end;
  352. nr_freed += ion_page_pool_shrink(sys_heap->cached_pools[i],
  353. sc->gfp_mask, sc->nr_to_scan);
  354. if (nr_freed >= sc->nr_to_scan)
  355. goto end;
  356. }
  357. end:
  358. /* total number of items is whatever the page pools are holding
  359. plus whatever's in the freelist */
  360. for (i = 0; i < num_orders; i++) {
  361. nr_total += ion_page_pool_shrink(
  362. sys_heap->uncached_pools[i], sc->gfp_mask, 0);
  363. nr_total += ion_page_pool_shrink(
  364. sys_heap->cached_pools[i], sc->gfp_mask, 0);
  365. }
  366. nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
  367. return nr_total;
  368. }
  369. static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
  370. void *unused)
  371. {
  372. struct ion_system_heap *sys_heap = container_of(heap,
  373. struct ion_system_heap,
  374. heap);
  375. int i;
  376. for (i = 0; i < num_orders; i++) {
  377. struct ion_page_pool *pool = sys_heap->uncached_pools[i];
  378. seq_printf(s,
  379. "%d order %u highmem pages in uncached pool = %lu total\n",
  380. pool->high_count, pool->order,
  381. (1 << pool->order) * PAGE_SIZE * pool->high_count);
  382. seq_printf(s,
  383. "%d order %u lowmem pages in uncached pool = %lu total\n",
  384. pool->low_count, pool->order,
  385. (1 << pool->order) * PAGE_SIZE * pool->low_count);
  386. }
  387. for (i = 0; i < num_orders; i++) {
  388. struct ion_page_pool *pool = sys_heap->cached_pools[i];
  389. seq_printf(s,
  390. "%d order %u highmem pages in cached pool = %lu total\n",
  391. pool->high_count, pool->order,
  392. (1 << pool->order) * PAGE_SIZE * pool->high_count);
  393. seq_printf(s,
  394. "%d order %u lowmem pages in cached pool = %lu total\n",
  395. pool->low_count, pool->order,
  396. (1 << pool->order) * PAGE_SIZE * pool->low_count);
  397. }
  398. return 0;
  399. }
  400. static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
  401. {
  402. int i;
  403. for (i = 0; i < num_orders; i++)
  404. if (pools[i]) {
  405. ion_page_pool_destroy(pools[i]);
  406. pools[i] = NULL;
  407. }
  408. }
  409. /**
  410. * ion_system_heap_create_pools - Creates pools for all orders
  411. *
  412. * If this fails you don't need to destroy any pools. It's all or
  413. * nothing. If it succeeds you'll eventually need to use
  414. * ion_system_heap_destroy_pools to destroy the pools.
  415. */
  416. static int ion_system_heap_create_pools(struct ion_page_pool **pools)
  417. {
  418. int i;
  419. for (i = 0; i < num_orders; i++) {
  420. struct ion_page_pool *pool;
  421. gfp_t gfp_flags = low_order_gfp_flags;
  422. if (orders[i] > 0)
  423. gfp_flags = high_order_gfp_flags;
  424. pool = ion_page_pool_create(gfp_flags, orders[i]);
  425. if (!pool)
  426. goto err_create_pool;
  427. pools[i] = pool;
  428. }
  429. return 0;
  430. err_create_pool:
  431. ion_system_heap_destroy_pools(pools);
  432. return 1;
  433. }
  434. struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
  435. {
  436. struct ion_system_heap *heap;
  437. int pools_size = sizeof(struct ion_page_pool *) * num_orders;
  438. heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
  439. if (!heap)
  440. return ERR_PTR(-ENOMEM);
  441. heap->heap.ops = &system_heap_ops;
  442. heap->heap.type = ION_HEAP_TYPE_SYSTEM;
  443. heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
  444. heap->uncached_pools = kzalloc(pools_size, GFP_KERNEL);
  445. if (!heap->uncached_pools)
  446. goto err_alloc_uncached_pools;
  447. heap->cached_pools = kzalloc(pools_size, GFP_KERNEL);
  448. if (!heap->cached_pools)
  449. goto err_alloc_cached_pools;
  450. if (ion_system_heap_create_pools(heap->uncached_pools))
  451. goto err_create_uncached_pools;
  452. if (ion_system_heap_create_pools(heap->cached_pools))
  453. goto err_create_cached_pools;
  454. heap->heap.shrinker.shrink = ion_system_heap_shrink;
  455. heap->heap.shrinker.seeks = DEFAULT_SEEKS;
  456. heap->heap.shrinker.batch = 0;
  457. register_shrinker(&heap->heap.shrinker);
  458. heap->heap.debug_show = ion_system_heap_debug_show;
  459. return &heap->heap;
  460. err_create_cached_pools:
  461. ion_system_heap_destroy_pools(heap->uncached_pools);
  462. err_create_uncached_pools:
  463. kfree(heap->cached_pools);
  464. err_alloc_cached_pools:
  465. kfree(heap->uncached_pools);
  466. err_alloc_uncached_pools:
  467. kfree(heap);
  468. return ERR_PTR(-ENOMEM);
  469. }
  470. void ion_system_heap_destroy(struct ion_heap *heap)
  471. {
  472. struct ion_system_heap *sys_heap = container_of(heap,
  473. struct ion_system_heap,
  474. heap);
  475. ion_system_heap_destroy_pools(sys_heap->uncached_pools);
  476. ion_system_heap_destroy_pools(sys_heap->cached_pools);
  477. kfree(sys_heap->uncached_pools);
  478. kfree(sys_heap->cached_pools);
  479. kfree(sys_heap);
  480. }
  481. struct kmalloc_buffer_info {
  482. struct sg_table *table;
  483. void *vaddr;
  484. };
  485. static int ion_system_contig_heap_allocate(struct ion_heap *heap,
  486. struct ion_buffer *buffer,
  487. unsigned long len,
  488. unsigned long align,
  489. unsigned long flags)
  490. {
  491. int ret;
  492. struct kmalloc_buffer_info *info;
  493. info = kmalloc(sizeof(struct kmalloc_buffer_info), GFP_KERNEL);
  494. if (!info) {
  495. ret = -ENOMEM;
  496. goto out;
  497. }
  498. info->table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  499. if (!info->table) {
  500. ret = -ENOMEM;
  501. goto kfree_info;
  502. }
  503. ret = sg_alloc_table(info->table, 1, GFP_KERNEL);
  504. if (ret)
  505. goto kfree_table;
  506. info->vaddr = kzalloc(len, GFP_KERNEL);
  507. if (!info->vaddr) {
  508. ret = -ENOMEM;
  509. goto sg_free_table;
  510. }
  511. sg_set_page(info->table->sgl, virt_to_page(info->vaddr), len,
  512. 0);
  513. sg_dma_address(info->table->sgl) = virt_to_phys(info->vaddr);
  514. dma_sync_sg_for_device(NULL, info->table->sgl, 1, DMA_BIDIRECTIONAL);
  515. buffer->priv_virt = info;
  516. return 0;
  517. sg_free_table:
  518. sg_free_table(info->table);
  519. kfree_table:
  520. kfree(info->table);
  521. kfree_info:
  522. kfree(info);
  523. out:
  524. return ret;
  525. }
  526. void ion_system_contig_heap_free(struct ion_buffer *buffer)
  527. {
  528. struct kmalloc_buffer_info *info = buffer->priv_virt;
  529. sg_free_table(info->table);
  530. kfree(info->table);
  531. kfree(info->vaddr);
  532. }
  533. static int ion_system_contig_heap_phys(struct ion_heap *heap,
  534. struct ion_buffer *buffer,
  535. ion_phys_addr_t *addr, size_t *len)
  536. {
  537. struct kmalloc_buffer_info *info = buffer->priv_virt;
  538. *addr = virt_to_phys(info->vaddr);
  539. *len = buffer->size;
  540. return 0;
  541. }
  542. struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
  543. struct ion_buffer *buffer)
  544. {
  545. struct kmalloc_buffer_info *info = buffer->priv_virt;
  546. return info->table;
  547. }
  548. void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
  549. struct ion_buffer *buffer)
  550. {
  551. }
  552. static struct ion_heap_ops kmalloc_ops = {
  553. .allocate = ion_system_contig_heap_allocate,
  554. .free = ion_system_contig_heap_free,
  555. .phys = ion_system_contig_heap_phys,
  556. .map_dma = ion_system_contig_heap_map_dma,
  557. .unmap_dma = ion_system_contig_heap_unmap_dma,
  558. .map_kernel = ion_heap_map_kernel,
  559. .unmap_kernel = ion_heap_unmap_kernel,
  560. .map_user = ion_heap_map_user,
  561. };
  562. struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
  563. {
  564. struct ion_heap *heap;
  565. heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
  566. if (!heap)
  567. return ERR_PTR(-ENOMEM);
  568. heap->ops = &kmalloc_ops;
  569. heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
  570. return heap;
  571. }
  572. void ion_system_contig_heap_destroy(struct ion_heap *heap)
  573. {
  574. kfree(heap);
  575. }