kgsl_sharedmem.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #ifndef __KGSL_SHAREDMEM_H
  14. #define __KGSL_SHAREDMEM_H
  15. #include <linux/slab.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/vmalloc.h>
  18. #include "kgsl_mmu.h"
  19. #include <linux/slab.h>
  20. #include <linux/kmemleak.h>
  21. #include <linux/iommu.h>
  22. #include "kgsl_log.h"
  23. struct kgsl_device;
  24. struct kgsl_process_private;
  25. #define KGSL_CACHE_OP_INV 0x01
  26. #define KGSL_CACHE_OP_FLUSH 0x02
  27. #define KGSL_CACHE_OP_CLEAN 0x03
  28. extern struct kgsl_memdesc_ops kgsl_page_alloc_ops;
  29. int kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
  30. struct kgsl_pagetable *pagetable, size_t size);
  31. int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
  32. struct kgsl_pagetable *pagetable,
  33. size_t size);
  34. int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
  35. int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
  36. struct kgsl_pagetable *pagetable,
  37. size_t size);
  38. int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
  39. struct kgsl_pagetable *pagetable,
  40. size_t size);
  41. void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
  42. int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
  43. uint32_t *dst,
  44. unsigned int offsetbytes);
  45. int kgsl_sharedmem_writel(struct kgsl_device *device,
  46. const struct kgsl_memdesc *memdesc,
  47. unsigned int offsetbytes,
  48. uint32_t src);
  49. int kgsl_sharedmem_set(struct kgsl_device *device,
  50. const struct kgsl_memdesc *memdesc,
  51. unsigned int offsetbytes, unsigned int value,
  52. unsigned int sizebytes);
  53. void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
  54. int kgsl_process_init_sysfs(struct kgsl_device *device,
  55. struct kgsl_process_private *private);
  56. void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
  57. int kgsl_sharedmem_init_sysfs(void);
  58. void kgsl_sharedmem_uninit_sysfs(void);
  59. /*
  60. * kgsl_memdesc_get_align - Get alignment flags from a memdesc
  61. * @memdesc - the memdesc
  62. *
  63. * Returns the alignment requested, as power of 2 exponent.
  64. */
  65. static inline int
  66. kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
  67. {
  68. return (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
  69. }
  70. /*
  71. * kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
  72. * @memdesc: the memdesc
  73. *
  74. * Returns a KGSL_CACHEMODE* value.
  75. */
  76. static inline int
  77. kgsl_memdesc_get_cachemode(const struct kgsl_memdesc *memdesc)
  78. {
  79. return (memdesc->flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT;
  80. }
  81. /*
  82. * kgsl_memdesc_set_align - Set alignment flags of a memdesc
  83. * @memdesc - the memdesc
  84. * @align - alignment requested, as a power of 2 exponent.
  85. */
  86. static inline int
  87. kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
  88. {
  89. if (align > 32) {
  90. KGSL_CORE_ERR("Alignment too big, restricting to 2^32\n");
  91. align = 32;
  92. }
  93. memdesc->flags &= ~KGSL_MEMALIGN_MASK;
  94. memdesc->flags |= (align << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
  95. return 0;
  96. }
  97. static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
  98. {
  99. /*
  100. * Try sg_dma_address first to support ion carveout
  101. * regions which do not work with sg_phys().
  102. */
  103. unsigned int pa = sg_dma_address(sg);
  104. if (pa == 0)
  105. pa = sg_phys(sg);
  106. return pa;
  107. }
  108. int
  109. kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
  110. const struct kgsl_memdesc *memdesc);
  111. /*
  112. * For relatively small sglists, it is preferable to use kzalloc
  113. * rather than going down the vmalloc rat hole. If the size of
  114. * the sglist is < PAGE_SIZE use kzalloc otherwise fallback to
  115. * vmalloc
  116. */
  117. static inline void *kgsl_sg_alloc(unsigned int sglen)
  118. {
  119. if ((sglen == 0) || (sglen >= ULONG_MAX / sizeof(struct scatterlist)))
  120. return NULL;
  121. if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
  122. return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
  123. else
  124. return vmalloc(sglen * sizeof(struct scatterlist));
  125. }
  126. static inline void kgsl_sg_free(void *ptr, unsigned int sglen)
  127. {
  128. if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
  129. kfree(ptr);
  130. else
  131. vfree(ptr);
  132. }
  133. static inline int
  134. memdesc_sg_phys(struct kgsl_memdesc *memdesc,
  135. phys_addr_t physaddr, unsigned int size)
  136. {
  137. memdesc->sg = kgsl_sg_alloc(1);
  138. if (memdesc->sg == NULL)
  139. return -ENOMEM;
  140. kmemleak_not_leak(memdesc->sg);
  141. memdesc->sglen = 1;
  142. sg_init_table(memdesc->sg, 1);
  143. memdesc->sg[0].length = size;
  144. memdesc->sg[0].offset = 0;
  145. memdesc->sg[0].dma_address = physaddr;
  146. return 0;
  147. }
  148. /*
  149. * kgsl_memdesc_is_global - is this a globally mapped buffer?
  150. * @memdesc: the memdesc
  151. *
  152. * Returns nonzero if this is a global mapping, 0 otherwise
  153. */
  154. static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
  155. {
  156. return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
  157. }
  158. /*
  159. * kgsl_memdesc_has_guard_page - is the last page a guard page?
  160. * @memdesc - the memdesc
  161. *
  162. * Returns nonzero if there is a guard page, 0 otherwise
  163. */
  164. static inline int
  165. kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
  166. {
  167. return (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) != 0;
  168. }
  169. /*
  170. * kgsl_memdesc_protflags - get mmu protection flags
  171. * @memdesc - the memdesc
  172. * Returns a mask of GSL_PT_PAGE* or IOMMU* values based
  173. * on the memdesc flags.
  174. */
  175. static inline unsigned int
  176. kgsl_memdesc_protflags(const struct kgsl_memdesc *memdesc)
  177. {
  178. unsigned int protflags = 0;
  179. enum kgsl_mmutype mmutype = kgsl_mmu_get_mmutype();
  180. if (mmutype == KGSL_MMU_TYPE_GPU) {
  181. protflags = GSL_PT_PAGE_RV;
  182. if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
  183. protflags |= GSL_PT_PAGE_WV;
  184. } else if (mmutype == KGSL_MMU_TYPE_IOMMU) {
  185. protflags = IOMMU_READ;
  186. if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
  187. protflags |= IOMMU_WRITE;
  188. }
  189. return protflags;
  190. }
  191. /*
  192. * kgsl_memdesc_use_cpu_map - use the same virtual mapping on CPU and GPU?
  193. * @memdesc - the memdesc
  194. */
  195. static inline int
  196. kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
  197. {
  198. return (memdesc->flags & KGSL_MEMFLAGS_USE_CPU_MAP) != 0;
  199. }
  200. /*
  201. * kgsl_memdesc_mmapsize - get the size of the mmap region
  202. * @memdesc - the memdesc
  203. *
  204. * The entire memdesc must be mapped. Additionally if the
  205. * CPU mapping is going to be mirrored, there must be room
  206. * for the guard page to be mapped so that the address spaces
  207. * match up.
  208. */
  209. static inline unsigned int
  210. kgsl_memdesc_mmapsize(const struct kgsl_memdesc *memdesc)
  211. {
  212. unsigned int size = memdesc->size;
  213. if (kgsl_memdesc_use_cpu_map(memdesc) &&
  214. kgsl_memdesc_has_guard_page(memdesc))
  215. size += SZ_4K;
  216. return size;
  217. }
  218. static inline int
  219. kgsl_allocate(struct kgsl_memdesc *memdesc,
  220. struct kgsl_pagetable *pagetable, size_t size)
  221. {
  222. int ret;
  223. memdesc->priv |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
  224. if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
  225. return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
  226. ret = kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
  227. if (ret)
  228. return ret;
  229. ret = kgsl_mmu_get_gpuaddr(pagetable, memdesc);
  230. if (ret) {
  231. kgsl_sharedmem_free(memdesc);
  232. return ret;
  233. }
  234. ret = kgsl_mmu_map(pagetable, memdesc);
  235. if (ret)
  236. kgsl_sharedmem_free(memdesc);
  237. return ret;
  238. }
  239. static inline int
  240. kgsl_allocate_user(struct kgsl_memdesc *memdesc,
  241. struct kgsl_pagetable *pagetable,
  242. size_t size, unsigned int flags)
  243. {
  244. int ret;
  245. if (size == 0)
  246. return -EINVAL;
  247. memdesc->flags = flags;
  248. if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
  249. ret = kgsl_sharedmem_ebimem_user(memdesc, pagetable, size);
  250. else
  251. ret = kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size);
  252. return ret;
  253. }
  254. static inline int
  255. kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
  256. {
  257. int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
  258. if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
  259. memdesc->gpuaddr = memdesc->physaddr;
  260. memdesc->flags |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
  261. return ret;
  262. }
  263. #endif /* __KGSL_SHAREDMEM_H */