ion_chunk_heap.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * drivers/gpu/ion/ion_chunk_heap.c
  3. *
  4. * Copyright (C) 2012 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. //#include <linux/spinlock.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/err.h>
  19. #include <linux/genalloc.h>
  20. #include <linux/io.h>
  21. #include <linux/ion.h>
  22. #include <linux/mm.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include "ion_priv.h"
  27. #include <asm/mach/map.h>
  28. struct ion_chunk_heap {
  29. struct ion_heap heap;
  30. struct gen_pool *pool;
  31. ion_phys_addr_t base;
  32. unsigned long chunk_size;
  33. unsigned long size;
  34. unsigned long allocated;
  35. };
  36. static int ion_chunk_heap_allocate(struct ion_heap *heap,
  37. struct ion_buffer *buffer,
  38. unsigned long size, unsigned long align,
  39. unsigned long flags)
  40. {
  41. struct ion_chunk_heap *chunk_heap =
  42. container_of(heap, struct ion_chunk_heap, heap);
  43. struct sg_table *table;
  44. struct scatterlist *sg;
  45. int ret, i;
  46. unsigned long num_chunks;
  47. unsigned long allocated_size;
  48. if (ion_buffer_fault_user_mappings(buffer))
  49. return -ENOMEM;
  50. allocated_size = ALIGN(size, chunk_heap->chunk_size);
  51. num_chunks = allocated_size / chunk_heap->chunk_size;
  52. if (allocated_size > chunk_heap->size - chunk_heap->allocated)
  53. return -ENOMEM;
  54. table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  55. if (!table)
  56. return -ENOMEM;
  57. ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
  58. if (ret) {
  59. kfree(table);
  60. return ret;
  61. }
  62. sg = table->sgl;
  63. for (i = 0; i < num_chunks; i++) {
  64. unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
  65. chunk_heap->chunk_size);
  66. if (!paddr)
  67. goto err;
  68. sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
  69. sg = sg_next(sg);
  70. }
  71. buffer->priv_virt = table;
  72. chunk_heap->allocated += allocated_size;
  73. return 0;
  74. err:
  75. sg = table->sgl;
  76. for (i -= 1; i >= 0; i--) {
  77. gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
  78. sg_dma_len(sg));
  79. sg = sg_next(sg);
  80. }
  81. sg_free_table(table);
  82. kfree(table);
  83. return -ENOMEM;
  84. }
  85. static void ion_chunk_heap_free(struct ion_buffer *buffer)
  86. {
  87. struct ion_heap *heap = buffer->heap;
  88. struct ion_chunk_heap *chunk_heap =
  89. container_of(heap, struct ion_chunk_heap, heap);
  90. struct sg_table *table = buffer->priv_virt;
  91. struct scatterlist *sg;
  92. int i;
  93. unsigned long allocated_size;
  94. allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
  95. ion_heap_buffer_zero(buffer);
  96. for_each_sg(table->sgl, sg, table->nents, i) {
  97. if (ion_buffer_cached(buffer))
  98. dma_sync_sg_for_device(NULL, sg, 1, DMA_BIDIRECTIONAL);
  99. gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
  100. sg_dma_len(sg));
  101. }
  102. chunk_heap->allocated -= allocated_size;
  103. sg_free_table(table);
  104. kfree(table);
  105. }
  106. struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
  107. struct ion_buffer *buffer)
  108. {
  109. return buffer->priv_virt;
  110. }
  111. void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
  112. struct ion_buffer *buffer)
  113. {
  114. return;
  115. }
  116. static struct ion_heap_ops chunk_heap_ops = {
  117. .allocate = ion_chunk_heap_allocate,
  118. .free = ion_chunk_heap_free,
  119. .map_dma = ion_chunk_heap_map_dma,
  120. .unmap_dma = ion_chunk_heap_unmap_dma,
  121. .map_user = ion_heap_map_user,
  122. .map_kernel = ion_heap_map_kernel,
  123. .unmap_kernel = ion_heap_unmap_kernel,
  124. };
  125. struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
  126. {
  127. struct ion_chunk_heap *chunk_heap;
  128. struct scatterlist sg;
  129. chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
  130. if (!chunk_heap)
  131. return ERR_PTR(-ENOMEM);
  132. chunk_heap->chunk_size = (unsigned long)heap_data->priv;
  133. chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
  134. PAGE_SHIFT, -1);
  135. if (!chunk_heap->pool) {
  136. kfree(chunk_heap);
  137. return ERR_PTR(-ENOMEM);
  138. }
  139. chunk_heap->base = heap_data->base;
  140. chunk_heap->size = heap_data->size;
  141. chunk_heap->allocated = 0;
  142. sg_init_table(&sg, 1);
  143. sg_set_page(&sg, phys_to_page(heap_data->base), heap_data->size, 0);
  144. dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
  145. gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
  146. chunk_heap->heap.ops = &chunk_heap_ops;
  147. chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
  148. chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
  149. pr_info("%s: base %pa size %zd align %pa\n", __func__,
  150. &chunk_heap->base, heap_data->size, &heap_data->align);
  151. return &chunk_heap->heap;
  152. }
  153. void ion_chunk_heap_destroy(struct ion_heap *heap)
  154. {
  155. struct ion_chunk_heap *chunk_heap =
  156. container_of(heap, struct ion_chunk_heap, heap);
  157. gen_pool_destroy(chunk_heap->pool);
  158. kfree(chunk_heap);
  159. chunk_heap = NULL;
  160. }