ion_cp_heap.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /*
  2. * drivers/gpu/ion/ion_cp_heap.c
  3. *
  4. * Copyright (C) 2011 Google, Inc.
  5. * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/spinlock.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/genalloc.h>
  21. #include <linux/io.h>
  22. #include <linux/msm_ion.h>
  23. #include <linux/mm.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/slab.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/memory_alloc.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/iommu.h>
  30. #include <linux/dma-mapping.h>
  31. #include <trace/events/kmem.h>
  32. #include <asm/mach/map.h>
  33. #include <mach/msm_memtypes.h>
  34. #include <mach/scm.h>
  35. #include <mach/iommu_domains.h>
  36. #include "ion_priv.h"
  37. #include <asm/mach/map.h>
  38. #include <asm/cacheflush.h>
  39. #include "msm/ion_cp_common.h"
  40. /**
  41. * struct ion_cp_heap - container for the heap and shared heap data
  42. * @heap: the heap information structure
  43. * @pool: memory pool to allocate from.
  44. * @base: the base address of the memory pool.
  45. * @permission_type: Identifier for the memory used by SCM for protecting
  46. * and unprotecting memory.
  47. * @secure_base: Base address used when securing a heap that is shared.
  48. * @secure_size: Size used when securing a heap that is shared.
  49. * @lock: mutex to protect shared access.
  50. * @heap_protected: Indicates whether heap has been protected or not.
  51. * @allocated_bytes: the total number of allocated bytes from the pool.
  52. * @total_size: the total size of the memory pool.
  53. * @request_region: function pointer to call when first mapping of memory
  54. * occurs.
  55. * @release_region: function pointer to call when last mapping of memory
  56. * unmapped.
  57. * @bus_id: token used with request/release region.
  58. * @kmap_cached_count: the total number of times this heap has been mapped in
  59. * kernel space (cached).
  60. * @kmap_uncached_count:the total number of times this heap has been mapped in
  61. * kernel space (un-cached).
  62. * @umap_count: the total number of times this heap has been mapped in
  63. * user space.
  64. * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
  65. */
  66. struct ion_cp_heap {
  67. struct ion_heap heap;
  68. struct gen_pool *pool;
  69. ion_phys_addr_t base;
  70. unsigned int permission_type;
  71. ion_phys_addr_t secure_base;
  72. size_t secure_size;
  73. struct mutex lock;
  74. unsigned int heap_protected;
  75. unsigned long allocated_bytes;
  76. unsigned long total_size;
  77. int (*heap_request_region)(void *);
  78. int (*heap_release_region)(void *);
  79. void *bus_id;
  80. unsigned long kmap_cached_count;
  81. unsigned long kmap_uncached_count;
  82. unsigned long umap_count;
  83. unsigned int has_outer_cache;
  84. atomic_t protect_cnt;
  85. void *cpu_addr;
  86. size_t heap_size;
  87. dma_addr_t handle;
  88. int cma;
  89. int allow_non_secure_allocation;
  90. };
  91. enum {
  92. HEAP_NOT_PROTECTED = 0,
  93. HEAP_PROTECTED = 1,
  94. };
  95. #define DMA_ALLOC_TRIES 5
  96. static int allocate_heap_memory(struct ion_heap *heap)
  97. {
  98. struct device *dev = heap->priv;
  99. struct ion_cp_heap *cp_heap =
  100. container_of(heap, struct ion_cp_heap, heap);
  101. int ret;
  102. int tries = 0;
  103. DEFINE_DMA_ATTRS(attrs);
  104. dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
  105. if (cp_heap->cpu_addr)
  106. return 0;
  107. while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
  108. cp_heap->cpu_addr = dma_alloc_attrs(dev,
  109. cp_heap->heap_size,
  110. &(cp_heap->handle),
  111. 0,
  112. &attrs);
  113. if (!cp_heap->cpu_addr) {
  114. trace_ion_cp_alloc_retry(tries);
  115. msleep(20);
  116. }
  117. }
  118. if (!cp_heap->cpu_addr)
  119. goto out;
  120. cp_heap->base = cp_heap->handle;
  121. cp_heap->pool = gen_pool_create(12, -1);
  122. if (!cp_heap->pool)
  123. goto out_free;
  124. ret = gen_pool_add(cp_heap->pool, cp_heap->base,
  125. cp_heap->heap_size, -1);
  126. if (ret < 0)
  127. goto out_pool;
  128. return 0;
  129. out_pool:
  130. gen_pool_destroy(cp_heap->pool);
  131. out_free:
  132. dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
  133. cp_heap->handle);
  134. out:
  135. return ION_CP_ALLOCATE_FAIL;
  136. }
  137. static void free_heap_memory(struct ion_heap *heap)
  138. {
  139. struct device *dev = heap->priv;
  140. struct ion_cp_heap *cp_heap =
  141. container_of(heap, struct ion_cp_heap, heap);
  142. /* release memory */
  143. dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
  144. cp_heap->handle);
  145. gen_pool_destroy(cp_heap->pool);
  146. cp_heap->pool = NULL;
  147. cp_heap->cpu_addr = 0;
  148. }
  149. /**
  150. * Get the total number of kernel mappings.
  151. * Must be called with heap->lock locked.
  152. */
  153. static unsigned long ion_cp_get_total_kmap_count(
  154. const struct ion_cp_heap *cp_heap)
  155. {
  156. return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
  157. }
  158. static int ion_on_first_alloc(struct ion_heap *heap)
  159. {
  160. struct ion_cp_heap *cp_heap =
  161. container_of(heap, struct ion_cp_heap, heap);
  162. int ret_value;
  163. if (cp_heap->cma) {
  164. ret_value = allocate_heap_memory(heap);
  165. if (ret_value)
  166. return 1;
  167. }
  168. return 0;
  169. }
  170. static void ion_on_last_free(struct ion_heap *heap)
  171. {
  172. struct ion_cp_heap *cp_heap =
  173. container_of(heap, struct ion_cp_heap, heap);
  174. if (cp_heap->cma)
  175. free_heap_memory(heap);
  176. }
  177. /**
  178. * Protects memory if heap is unsecured heap.
  179. * Must be called with heap->lock locked.
  180. */
  181. static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
  182. {
  183. struct ion_cp_heap *cp_heap =
  184. container_of(heap, struct ion_cp_heap, heap);
  185. int ret_value = 0;
  186. if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
  187. /* Make sure we are in C state when the heap is protected. */
  188. if (!cp_heap->allocated_bytes)
  189. if (ion_on_first_alloc(heap))
  190. goto out;
  191. ret_value = ion_cp_protect_mem(cp_heap->secure_base,
  192. cp_heap->secure_size, cp_heap->permission_type,
  193. version, data);
  194. if (ret_value) {
  195. pr_err("Failed to protect memory for heap %s - "
  196. "error code: %d\n", heap->name, ret_value);
  197. if (!cp_heap->allocated_bytes)
  198. ion_on_last_free(heap);
  199. atomic_dec(&cp_heap->protect_cnt);
  200. } else {
  201. cp_heap->heap_protected = HEAP_PROTECTED;
  202. pr_debug("Protected heap %s @ 0x%pa\n",
  203. heap->name, &cp_heap->base);
  204. }
  205. }
  206. out:
  207. pr_debug("%s: protect count is %d\n", __func__,
  208. atomic_read(&cp_heap->protect_cnt));
  209. BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
  210. return ret_value;
  211. }
  212. /**
  213. * Unprotects memory if heap is secure heap.
  214. * Must be called with heap->lock locked.
  215. */
  216. static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
  217. {
  218. struct ion_cp_heap *cp_heap =
  219. container_of(heap, struct ion_cp_heap, heap);
  220. if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
  221. int error_code = ion_cp_unprotect_mem(
  222. cp_heap->secure_base, cp_heap->secure_size,
  223. cp_heap->permission_type, version, data);
  224. if (error_code) {
  225. pr_err("Failed to un-protect memory for heap %s - "
  226. "error code: %d\n", heap->name, error_code);
  227. } else {
  228. cp_heap->heap_protected = HEAP_NOT_PROTECTED;
  229. pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
  230. (unsigned int) cp_heap->base);
  231. if (!cp_heap->allocated_bytes)
  232. ion_on_last_free(heap);
  233. }
  234. }
  235. pr_debug("%s: protect count is %d\n", __func__,
  236. atomic_read(&cp_heap->protect_cnt));
  237. BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
  238. }
  239. ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
  240. unsigned long size,
  241. unsigned long align,
  242. unsigned long flags)
  243. {
  244. unsigned long offset;
  245. unsigned long secure_allocation = flags & ION_FLAG_SECURE;
  246. unsigned long force_contig = flags & ION_FLAG_FORCE_CONTIGUOUS;
  247. struct ion_cp_heap *cp_heap =
  248. container_of(heap, struct ion_cp_heap, heap);
  249. mutex_lock(&cp_heap->lock);
  250. if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
  251. mutex_unlock(&cp_heap->lock);
  252. pr_err("ION cannot allocate un-secure memory from protected"
  253. " heap %s\n", heap->name);
  254. return ION_CP_ALLOCATE_FAIL;
  255. }
  256. if (!force_contig && !secure_allocation &&
  257. !cp_heap->allow_non_secure_allocation) {
  258. mutex_unlock(&cp_heap->lock);
  259. pr_debug("%s: non-secure allocation disallowed from this heap\n",
  260. __func__);
  261. return ION_CP_ALLOCATE_FAIL;
  262. }
  263. /*
  264. * The check above already checked for non-secure allocations when the
  265. * heap is protected. HEAP_PROTECTED implies that this must be a secure
  266. * allocation. If the heap is protected and there are userspace or
  267. * cached kernel mappings, something has gone wrong in the security
  268. * model.
  269. */
  270. if (cp_heap->heap_protected == HEAP_PROTECTED) {
  271. BUG_ON(cp_heap->umap_count != 0);
  272. BUG_ON(cp_heap->kmap_cached_count != 0);
  273. }
  274. /*
  275. * if this is the first reusable allocation, transition
  276. * the heap
  277. */
  278. if (!cp_heap->allocated_bytes)
  279. if (ion_on_first_alloc(heap)) {
  280. mutex_unlock(&cp_heap->lock);
  281. return ION_RESERVED_ALLOCATE_FAIL;
  282. }
  283. cp_heap->allocated_bytes += size;
  284. mutex_unlock(&cp_heap->lock);
  285. offset = gen_pool_alloc_aligned(cp_heap->pool,
  286. size, ilog2(align));
  287. if (!offset) {
  288. mutex_lock(&cp_heap->lock);
  289. cp_heap->allocated_bytes -= size;
  290. if ((cp_heap->total_size -
  291. cp_heap->allocated_bytes) >= size)
  292. pr_debug("%s: heap %s has enough memory (%lx) but"
  293. " the allocation of size %lx still failed."
  294. " Memory is probably fragmented.\n",
  295. __func__, heap->name,
  296. cp_heap->total_size -
  297. cp_heap->allocated_bytes, size);
  298. if (!cp_heap->allocated_bytes &&
  299. cp_heap->heap_protected == HEAP_NOT_PROTECTED)
  300. ion_on_last_free(heap);
  301. mutex_unlock(&cp_heap->lock);
  302. return ION_CP_ALLOCATE_FAIL;
  303. }
  304. return offset;
  305. }
  306. void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
  307. unsigned long size)
  308. {
  309. struct ion_cp_heap *cp_heap =
  310. container_of(heap, struct ion_cp_heap, heap);
  311. if (addr == ION_CP_ALLOCATE_FAIL)
  312. return;
  313. gen_pool_free(cp_heap->pool, addr, size);
  314. mutex_lock(&cp_heap->lock);
  315. cp_heap->allocated_bytes -= size;
  316. if (!cp_heap->allocated_bytes &&
  317. cp_heap->heap_protected == HEAP_NOT_PROTECTED)
  318. ion_on_last_free(heap);
  319. mutex_unlock(&cp_heap->lock);
  320. }
  321. static int ion_cp_heap_phys(struct ion_heap *heap,
  322. struct ion_buffer *buffer,
  323. ion_phys_addr_t *addr, size_t *len)
  324. {
  325. struct ion_cp_buffer *buf = buffer->priv_virt;
  326. *addr = buf->buffer;
  327. *len = buffer->size;
  328. return 0;
  329. }
  330. static int ion_cp_heap_allocate(struct ion_heap *heap,
  331. struct ion_buffer *buffer,
  332. unsigned long size, unsigned long align,
  333. unsigned long flags)
  334. {
  335. struct ion_cp_buffer *buf;
  336. phys_addr_t addr;
  337. /*
  338. * we never want Ion to fault pages in for us with this
  339. * heap. We want to set up the mappings ourselves in .map_user
  340. */
  341. flags |= ION_FLAG_CACHED_NEEDS_SYNC;
  342. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  343. if (!buf)
  344. return ION_CP_ALLOCATE_FAIL;
  345. addr = ion_cp_allocate(heap, size, align, flags);
  346. if (addr == ION_CP_ALLOCATE_FAIL)
  347. return -ENOMEM;
  348. buf->buffer = addr;
  349. buf->want_delayed_unsecure = 0;
  350. atomic_set(&buf->secure_cnt, 0);
  351. mutex_init(&buf->lock);
  352. buf->is_secure = flags & ION_FLAG_SECURE ? 1 : 0;
  353. buffer->priv_virt = buf;
  354. return 0;
  355. }
  356. static void ion_cp_heap_free(struct ion_buffer *buffer)
  357. {
  358. struct ion_heap *heap = buffer->heap;
  359. struct ion_cp_buffer *buf = buffer->priv_virt;
  360. ion_cp_free(heap, buf->buffer, buffer->size);
  361. WARN_ON(atomic_read(&buf->secure_cnt));
  362. WARN_ON(atomic_read(&buf->map_cnt));
  363. kfree(buf);
  364. buffer->priv_virt = NULL;
  365. }
  366. struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
  367. {
  368. size_t chunk_size = buffer->size;
  369. struct ion_cp_buffer *buf = buffer->priv_virt;
  370. if (ION_IS_CACHED(buffer->flags))
  371. chunk_size = PAGE_SIZE;
  372. else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
  373. chunk_size = SZ_1M;
  374. return ion_create_chunked_sg_table(buf->buffer, chunk_size,
  375. buffer->size);
  376. }
  377. struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
  378. struct ion_buffer *buffer)
  379. {
  380. return ion_cp_heap_create_sg_table(buffer);
  381. }
  382. void ion_cp_heap_unmap_dma(struct ion_heap *heap,
  383. struct ion_buffer *buffer)
  384. {
  385. if (buffer->sg_table)
  386. sg_free_table(buffer->sg_table);
  387. kfree(buffer->sg_table);
  388. buffer->sg_table = 0;
  389. }
  390. /**
  391. * Call request region for SMI memory of this is the first mapping.
  392. */
  393. static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
  394. {
  395. int ret_value = 0;
  396. if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
  397. if (cp_heap->heap_request_region)
  398. ret_value = cp_heap->heap_request_region(
  399. cp_heap->bus_id);
  400. return ret_value;
  401. }
  402. /**
  403. * Call release region for SMI memory of this is the last un-mapping.
  404. */
  405. static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
  406. {
  407. int ret_value = 0;
  408. if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
  409. if (cp_heap->heap_release_region)
  410. ret_value = cp_heap->heap_release_region(
  411. cp_heap->bus_id);
  412. return ret_value;
  413. }
  414. void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
  415. {
  416. struct ion_cp_heap *cp_heap =
  417. container_of(heap, struct ion_cp_heap, heap);
  418. void *ret_value = NULL;
  419. struct ion_cp_buffer *buf = buffer->priv_virt;
  420. mutex_lock(&cp_heap->lock);
  421. if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
  422. ((cp_heap->heap_protected == HEAP_PROTECTED) &&
  423. !ION_IS_CACHED(buffer->flags))) {
  424. if (ion_cp_request_region(cp_heap)) {
  425. mutex_unlock(&cp_heap->lock);
  426. return NULL;
  427. }
  428. if (cp_heap->cma) {
  429. int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
  430. struct page **pages = vmalloc(
  431. sizeof(struct page *) * npages);
  432. int i;
  433. pgprot_t pgprot;
  434. if (!pages) {
  435. mutex_unlock(&cp_heap->lock);
  436. return ERR_PTR(-ENOMEM);
  437. }
  438. if (ION_IS_CACHED(buffer->flags))
  439. pgprot = PAGE_KERNEL;
  440. else
  441. pgprot = pgprot_writecombine(PAGE_KERNEL);
  442. for (i = 0; i < npages; i++) {
  443. pages[i] = phys_to_page(buf->buffer +
  444. i * PAGE_SIZE);
  445. }
  446. ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
  447. vfree(pages);
  448. } else {
  449. if (ION_IS_CACHED(buffer->flags))
  450. ret_value = ioremap_cached(buf->buffer,
  451. buffer->size);
  452. else
  453. ret_value = ioremap(buf->buffer,
  454. buffer->size);
  455. }
  456. if (!ret_value) {
  457. ion_cp_release_region(cp_heap);
  458. } else {
  459. if (ION_IS_CACHED(buffer->flags))
  460. ++cp_heap->kmap_cached_count;
  461. else
  462. ++cp_heap->kmap_uncached_count;
  463. atomic_inc(&buf->map_cnt);
  464. }
  465. }
  466. mutex_unlock(&cp_heap->lock);
  467. return ret_value;
  468. }
  469. void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
  470. struct ion_buffer *buffer)
  471. {
  472. struct ion_cp_heap *cp_heap =
  473. container_of(heap, struct ion_cp_heap, heap);
  474. struct ion_cp_buffer *buf = buffer->priv_virt;
  475. if (cp_heap->cma)
  476. vunmap(buffer->vaddr);
  477. else
  478. __arm_iounmap(buffer->vaddr);
  479. buffer->vaddr = NULL;
  480. mutex_lock(&cp_heap->lock);
  481. if (ION_IS_CACHED(buffer->flags))
  482. --cp_heap->kmap_cached_count;
  483. else
  484. --cp_heap->kmap_uncached_count;
  485. atomic_dec(&buf->map_cnt);
  486. ion_cp_release_region(cp_heap);
  487. mutex_unlock(&cp_heap->lock);
  488. return;
  489. }
  490. int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
  491. struct vm_area_struct *vma)
  492. {
  493. int ret_value = -EAGAIN;
  494. struct ion_cp_heap *cp_heap =
  495. container_of(heap, struct ion_cp_heap, heap);
  496. struct ion_cp_buffer *buf = buffer->priv_virt;
  497. mutex_lock(&cp_heap->lock);
  498. if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
  499. if (ion_cp_request_region(cp_heap)) {
  500. mutex_unlock(&cp_heap->lock);
  501. return -EINVAL;
  502. }
  503. if (!ION_IS_CACHED(buffer->flags))
  504. vma->vm_page_prot = pgprot_writecombine(
  505. vma->vm_page_prot);
  506. ret_value = remap_pfn_range(vma, vma->vm_start,
  507. __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
  508. vma->vm_end - vma->vm_start,
  509. vma->vm_page_prot);
  510. if (ret_value) {
  511. ion_cp_release_region(cp_heap);
  512. } else {
  513. atomic_inc(&buf->map_cnt);
  514. ++cp_heap->umap_count;
  515. }
  516. }
  517. mutex_unlock(&cp_heap->lock);
  518. return ret_value;
  519. }
  520. void ion_cp_heap_unmap_user(struct ion_heap *heap,
  521. struct ion_buffer *buffer)
  522. {
  523. struct ion_cp_heap *cp_heap =
  524. container_of(heap, struct ion_cp_heap, heap);
  525. struct ion_cp_buffer *buf = buffer->priv_virt;
  526. mutex_lock(&cp_heap->lock);
  527. --cp_heap->umap_count;
  528. atomic_dec(&buf->map_cnt);
  529. ion_cp_release_region(cp_heap);
  530. mutex_unlock(&cp_heap->lock);
  531. }
  532. static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
  533. const struct list_head *mem_map)
  534. {
  535. unsigned long total_alloc;
  536. unsigned long total_size;
  537. unsigned long umap_count;
  538. unsigned long kmap_count;
  539. unsigned long heap_protected;
  540. struct ion_cp_heap *cp_heap =
  541. container_of(heap, struct ion_cp_heap, heap);
  542. mutex_lock(&cp_heap->lock);
  543. total_alloc = cp_heap->allocated_bytes;
  544. total_size = cp_heap->total_size;
  545. umap_count = cp_heap->umap_count;
  546. kmap_count = ion_cp_get_total_kmap_count(cp_heap);
  547. heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
  548. mutex_unlock(&cp_heap->lock);
  549. seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
  550. seq_printf(s, "total heap size: %lx\n", total_size);
  551. seq_printf(s, "umapping count: %lx\n", umap_count);
  552. seq_printf(s, "kmapping count: %lx\n", kmap_count);
  553. seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
  554. if (mem_map) {
  555. unsigned long base = cp_heap->base;
  556. unsigned long size = cp_heap->total_size;
  557. unsigned long end = base+size;
  558. unsigned long last_end = base;
  559. struct mem_map_data *data;
  560. seq_printf(s, "\nMemory Map\n");
  561. seq_printf(s, "%16.s %14.s %14.s %14.s\n",
  562. "client", "start address", "end address",
  563. "size (hex)");
  564. list_for_each_entry(data, mem_map, node) {
  565. const char *client_name = "(null)";
  566. if (last_end < data->addr) {
  567. phys_addr_t da;
  568. da = data->addr-1;
  569. seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
  570. "FREE", &last_end, &da,
  571. data->addr-last_end,
  572. data->addr-last_end);
  573. }
  574. if (data->client_name)
  575. client_name = data->client_name;
  576. seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
  577. client_name, &data->addr,
  578. &data->addr_end,
  579. data->size, data->size);
  580. last_end = data->addr_end+1;
  581. }
  582. if (last_end < end) {
  583. seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
  584. last_end, end-1, end-last_end, end-last_end);
  585. }
  586. }
  587. return 0;
  588. }
  589. int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
  590. {
  591. int ret_value;
  592. struct ion_cp_heap *cp_heap =
  593. container_of(heap, struct ion_cp_heap, heap);
  594. mutex_lock(&cp_heap->lock);
  595. if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
  596. ret_value = ion_cp_protect(heap, version, data);
  597. } else {
  598. pr_err("ION cannot secure heap with outstanding mappings: "
  599. "User space: %lu, kernel space (cached): %lu\n",
  600. cp_heap->umap_count, cp_heap->kmap_cached_count);
  601. ret_value = -EINVAL;
  602. }
  603. mutex_unlock(&cp_heap->lock);
  604. return ret_value;
  605. }
  606. int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
  607. {
  608. int ret_value = 0;
  609. struct ion_cp_heap *cp_heap =
  610. container_of(heap, struct ion_cp_heap, heap);
  611. mutex_lock(&cp_heap->lock);
  612. ion_cp_unprotect(heap, version, data);
  613. mutex_unlock(&cp_heap->lock);
  614. return ret_value;
  615. }
  616. static struct ion_heap_ops cp_heap_ops = {
  617. .allocate = ion_cp_heap_allocate,
  618. .free = ion_cp_heap_free,
  619. .phys = ion_cp_heap_phys,
  620. .map_user = ion_cp_heap_map_user,
  621. .unmap_user = ion_cp_heap_unmap_user,
  622. .map_kernel = ion_cp_heap_map_kernel,
  623. .unmap_kernel = ion_cp_heap_unmap_kernel,
  624. .map_dma = ion_cp_heap_map_dma,
  625. .unmap_dma = ion_cp_heap_unmap_dma,
  626. .print_debug = ion_cp_print_debug,
  627. .secure_heap = ion_cp_secure_heap,
  628. .unsecure_heap = ion_cp_unsecure_heap,
  629. .secure_buffer = ion_cp_secure_buffer,
  630. .unsecure_buffer = ion_cp_unsecure_buffer,
  631. };
  632. struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
  633. {
  634. struct ion_cp_heap *cp_heap;
  635. int ret;
  636. cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
  637. if (!cp_heap)
  638. return ERR_PTR(-ENOMEM);
  639. mutex_init(&cp_heap->lock);
  640. cp_heap->allocated_bytes = 0;
  641. cp_heap->umap_count = 0;
  642. cp_heap->kmap_cached_count = 0;
  643. cp_heap->kmap_uncached_count = 0;
  644. cp_heap->total_size = heap_data->size;
  645. cp_heap->heap.ops = &cp_heap_ops;
  646. cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
  647. cp_heap->heap_protected = HEAP_NOT_PROTECTED;
  648. cp_heap->secure_base = heap_data->base;
  649. cp_heap->secure_size = heap_data->size;
  650. cp_heap->has_outer_cache = heap_data->has_outer_cache;
  651. cp_heap->heap_size = heap_data->size;
  652. atomic_set(&cp_heap->protect_cnt, 0);
  653. if (heap_data->extra_data) {
  654. struct ion_cp_heap_pdata *extra_data =
  655. heap_data->extra_data;
  656. cp_heap->permission_type = extra_data->permission_type;
  657. if (extra_data->secure_size) {
  658. cp_heap->secure_base = extra_data->secure_base;
  659. cp_heap->secure_size = extra_data->secure_size;
  660. }
  661. if (extra_data->setup_region)
  662. cp_heap->bus_id = extra_data->setup_region();
  663. if (extra_data->request_region)
  664. cp_heap->heap_request_region =
  665. extra_data->request_region;
  666. if (extra_data->release_region)
  667. cp_heap->heap_release_region =
  668. extra_data->release_region;
  669. cp_heap->cma = extra_data->is_cma;
  670. cp_heap->allow_non_secure_allocation =
  671. extra_data->allow_nonsecure_alloc;
  672. }
  673. if (cp_heap->cma) {
  674. cp_heap->pool = NULL;
  675. cp_heap->cpu_addr = 0;
  676. cp_heap->heap.priv = heap_data->priv;
  677. } else {
  678. cp_heap->pool = gen_pool_create(12, -1);
  679. if (!cp_heap->pool)
  680. goto free_heap;
  681. cp_heap->base = heap_data->base;
  682. ret = gen_pool_add(cp_heap->pool, cp_heap->base,
  683. heap_data->size, -1);
  684. if (ret < 0)
  685. goto destroy_pool;
  686. }
  687. return &cp_heap->heap;
  688. destroy_pool:
  689. gen_pool_destroy(cp_heap->pool);
  690. free_heap:
  691. kfree(cp_heap);
  692. return ERR_PTR(-ENOMEM);
  693. }
  694. void ion_cp_heap_destroy(struct ion_heap *heap)
  695. {
  696. struct ion_cp_heap *cp_heap =
  697. container_of(heap, struct ion_cp_heap, heap);
  698. gen_pool_destroy(cp_heap->pool);
  699. kfree(cp_heap);
  700. cp_heap = NULL;
  701. }
  702. void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
  703. unsigned long *size) \
  704. {
  705. struct ion_cp_heap *cp_heap =
  706. container_of(heap, struct ion_cp_heap, heap);
  707. *base = cp_heap->base;
  708. *size = cp_heap->total_size;
  709. }