kgsl_sharedmem.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992
  1. /* Copyright (c) 2002,2007-2014,2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/export.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/memory_alloc.h>
  16. #include <asm/cacheflush.h>
  17. #include <linux/slab.h>
  18. #include <linux/kmemleak.h>
  19. #include <linux/highmem.h>
  20. #include "kgsl.h"
  21. #include "kgsl_sharedmem.h"
  22. #include "kgsl_cffdump.h"
  23. #include "kgsl_device.h"
  24. DEFINE_MUTEX(kernel_map_global_lock);
  25. /* An attribute for showing per-process memory statistics */
  26. struct kgsl_mem_entry_attribute {
  27. struct attribute attr;
  28. int memtype;
  29. ssize_t (*show)(struct kgsl_process_private *priv,
  30. int type, char *buf);
  31. };
  32. #define to_mem_entry_attr(a) \
  33. container_of(a, struct kgsl_mem_entry_attribute, attr)
  34. #define __MEM_ENTRY_ATTR(_type, _name, _show) \
  35. { \
  36. .attr = { .name = __stringify(_name), .mode = 0444 }, \
  37. .memtype = _type, \
  38. .show = _show, \
  39. }
  40. /*
  41. * A structure to hold the attributes for a particular memory type.
  42. * For each memory type in each process we store the current and maximum
  43. * memory usage and display the counts in sysfs. This structure and
  44. * the following macro allow us to simplify the definition for those
  45. * adding new memory types
  46. */
  47. struct mem_entry_stats {
  48. int memtype;
  49. struct kgsl_mem_entry_attribute attr;
  50. struct kgsl_mem_entry_attribute max_attr;
  51. };
  52. #define MEM_ENTRY_STAT(_type, _name) \
  53. { \
  54. .memtype = _type, \
  55. .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
  56. .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
  57. mem_entry_max_show), \
  58. }
  59. /**
  60. * Show the current amount of memory allocated for the given memtype
  61. */
  62. static ssize_t
  63. mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
  64. {
  65. return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
  66. }
  67. /**
  68. * Show the maximum memory allocated for the given memtype through the life of
  69. * the process
  70. */
  71. static ssize_t
  72. mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
  73. {
  74. return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
  75. }
  76. static void mem_entry_sysfs_release(struct kobject *kobj)
  77. {
  78. }
  79. static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
  80. struct attribute *attr, char *buf)
  81. {
  82. struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
  83. struct kgsl_process_private *priv;
  84. ssize_t ret;
  85. /*
  86. * 1. sysfs_remove_file waits for reads to complete before the node
  87. * is deleted.
  88. * 2. kgsl_process_init_sysfs takes a refcount to the process_private,
  89. * which is put at the end of kgsl_process_uninit_sysfs.
  90. * These two conditions imply that priv will not be freed until this
  91. * function completes, and no further locking is needed.
  92. */
  93. priv = kobj ? container_of(kobj, struct kgsl_process_private, kobj) :
  94. NULL;
  95. if (priv && pattr->show)
  96. ret = pattr->show(priv, pattr->memtype, buf);
  97. else
  98. ret = -EIO;
  99. return ret;
  100. }
  101. static const struct sysfs_ops mem_entry_sysfs_ops = {
  102. .show = mem_entry_sysfs_show,
  103. };
  104. static struct kobj_type ktype_mem_entry = {
  105. .sysfs_ops = &mem_entry_sysfs_ops,
  106. .default_attrs = NULL,
  107. .release = mem_entry_sysfs_release
  108. };
  109. static struct mem_entry_stats mem_stats[] = {
  110. MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
  111. MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
  112. #ifdef CONFIG_ASHMEM
  113. MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
  114. #endif
  115. MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
  116. #ifdef CONFIG_ION
  117. MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
  118. #endif
  119. };
  120. void
  121. kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
  122. {
  123. int i;
  124. for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
  125. sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
  126. sysfs_remove_file(&private->kobj,
  127. &mem_stats[i].max_attr.attr);
  128. }
  129. kobject_put(&private->kobj);
  130. /* Put the refcount we got in kgsl_process_init_sysfs */
  131. kgsl_process_private_put(private);
  132. }
  133. /**
  134. * kgsl_process_init_sysfs() - Initialize and create sysfs files for a process
  135. *
  136. * @device: Pointer to kgsl device struct
  137. * @private: Pointer to the structure for the process
  138. *
  139. * @returns: 0 on success, error code otherwise
  140. *
  141. * kgsl_process_init_sysfs() is called at the time of creating the
  142. * process struct when a process opens the kgsl device for the first time.
  143. * This function creates the sysfs files for the process.
  144. */
  145. int
  146. kgsl_process_init_sysfs(struct kgsl_device *device,
  147. struct kgsl_process_private *private)
  148. {
  149. unsigned char name[16];
  150. int i, ret = 0;
  151. snprintf(name, sizeof(name), "%d", pid_nr(private->pid));
  152. ret = kobject_init_and_add(&private->kobj, &ktype_mem_entry,
  153. kgsl_driver.prockobj, name);
  154. if (ret)
  155. return ret;
  156. for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
  157. /* We need to check the value of sysfs_create_file, but we
  158. * don't really care if it passed or not */
  159. ret = sysfs_create_file(&private->kobj,
  160. &mem_stats[i].attr.attr);
  161. ret = sysfs_create_file(&private->kobj,
  162. &mem_stats[i].max_attr.attr);
  163. }
  164. /* Keep private valid until the sysfs enries are removed. */
  165. if (!ret)
  166. kgsl_process_private_get(private);
  167. return ret;
  168. }
  169. static int kgsl_drv_memstat_show(struct device *dev,
  170. struct device_attribute *attr,
  171. char *buf)
  172. {
  173. unsigned int val = 0;
  174. if (!strncmp(attr->attr.name, "vmalloc", 7))
  175. val = atomic_read(&kgsl_driver.stats.vmalloc);
  176. else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
  177. val = atomic_read(&kgsl_driver.stats.vmalloc_max);
  178. else if (!strncmp(attr->attr.name, "page_alloc", 10))
  179. val = atomic_read(&kgsl_driver.stats.page_alloc);
  180. else if (!strncmp(attr->attr.name, "page_alloc_max", 14))
  181. val = atomic_read(&kgsl_driver.stats.page_alloc_max);
  182. else if (!strncmp(attr->attr.name, "coherent", 8))
  183. val = atomic_read(&kgsl_driver.stats.coherent);
  184. else if (!strncmp(attr->attr.name, "coherent_max", 12))
  185. val = atomic_read(&kgsl_driver.stats.coherent_max);
  186. else if (!strncmp(attr->attr.name, "mapped", 6))
  187. val = atomic_read(&kgsl_driver.stats.mapped);
  188. else if (!strncmp(attr->attr.name, "mapped_max", 10))
  189. val = atomic_read(&kgsl_driver.stats.mapped_max);
  190. return snprintf(buf, PAGE_SIZE, "%u\n", val);
  191. }
  192. static int kgsl_drv_full_cache_threshold_store(struct device *dev,
  193. struct device_attribute *attr,
  194. const char *buf, size_t count)
  195. {
  196. int ret;
  197. unsigned int thresh = 0;
  198. ret = kgsl_sysfs_store(buf, &thresh);
  199. if (ret)
  200. return ret;
  201. kgsl_driver.full_cache_threshold = thresh;
  202. return count;
  203. }
  204. static int kgsl_drv_full_cache_threshold_show(struct device *dev,
  205. struct device_attribute *attr,
  206. char *buf)
  207. {
  208. return snprintf(buf, PAGE_SIZE, "%d\n",
  209. kgsl_driver.full_cache_threshold);
  210. }
  211. DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
  212. DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
  213. DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
  214. DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
  215. DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
  216. DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
  217. DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
  218. DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
  219. DEVICE_ATTR(full_cache_threshold, 0644,
  220. kgsl_drv_full_cache_threshold_show,
  221. kgsl_drv_full_cache_threshold_store);
  222. static const struct device_attribute *drv_attr_list[] = {
  223. &dev_attr_vmalloc,
  224. &dev_attr_vmalloc_max,
  225. &dev_attr_page_alloc,
  226. &dev_attr_page_alloc_max,
  227. &dev_attr_coherent,
  228. &dev_attr_coherent_max,
  229. &dev_attr_mapped,
  230. &dev_attr_mapped_max,
  231. &dev_attr_full_cache_threshold,
  232. NULL
  233. };
  234. void
  235. kgsl_sharedmem_uninit_sysfs(void)
  236. {
  237. kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
  238. }
  239. int
  240. kgsl_sharedmem_init_sysfs(void)
  241. {
  242. return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
  243. drv_attr_list);
  244. }
  245. static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
  246. struct vm_area_struct *vma,
  247. struct vm_fault *vmf)
  248. {
  249. int i, pgoff;
  250. struct scatterlist *s = memdesc->sg;
  251. unsigned int offset;
  252. offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
  253. if (offset >= memdesc->size)
  254. return VM_FAULT_SIGBUS;
  255. pgoff = offset >> PAGE_SHIFT;
  256. /*
  257. * The sglist might be comprised of mixed blocks of memory depending
  258. * on how many 64K pages were allocated. This means we have to do math
  259. * to find the actual 4K page to map in user space
  260. */
  261. for (i = 0; i < memdesc->sglen; i++) {
  262. int npages = s->length >> PAGE_SHIFT;
  263. if (pgoff < npages) {
  264. struct page *page = sg_page(s);
  265. page = nth_page(page, pgoff);
  266. get_page(page);
  267. vmf->page = page;
  268. return 0;
  269. }
  270. pgoff -= npages;
  271. s = sg_next(s);
  272. }
  273. return VM_FAULT_SIGBUS;
  274. }
  275. static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
  276. {
  277. return VM_RESERVED | VM_DONTEXPAND | VM_DONTCOPY;
  278. }
  279. /*
  280. * kgsl_page_alloc_unmap_kernel() - Unmap the memory in memdesc
  281. *
  282. * @memdesc: The memory descriptor which contains information about the memory
  283. *
  284. * Unmaps the memory mapped into kernel address space
  285. */
  286. static void kgsl_page_alloc_unmap_kernel(struct kgsl_memdesc *memdesc)
  287. {
  288. mutex_lock(&kernel_map_global_lock);
  289. if (!memdesc->hostptr) {
  290. BUG_ON(memdesc->hostptr_count);
  291. goto done;
  292. }
  293. memdesc->hostptr_count--;
  294. if (memdesc->hostptr_count)
  295. goto done;
  296. vunmap(memdesc->hostptr);
  297. atomic_sub(memdesc->size, &kgsl_driver.stats.vmalloc);
  298. memdesc->hostptr = NULL;
  299. done:
  300. mutex_unlock(&kernel_map_global_lock);
  301. }
  302. static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
  303. {
  304. int i = 0;
  305. struct scatterlist *sg;
  306. int sglen = memdesc->sglen;
  307. atomic_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
  308. kgsl_page_alloc_unmap_kernel(memdesc);
  309. /* we certainly do not expect the hostptr to still be mapped */
  310. BUG_ON(memdesc->hostptr);
  311. if (memdesc->sg)
  312. for_each_sg(memdesc->sg, sg, sglen, i)
  313. __free_pages(sg_page(sg), get_order(sg->length));
  314. }
  315. static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
  316. {
  317. return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTCOPY;
  318. }
  319. /*
  320. * kgsl_page_alloc_map_kernel - Map the memory in memdesc to kernel address
  321. * space
  322. *
  323. * @memdesc - The memory descriptor which contains information about the memory
  324. *
  325. * Return: 0 on success else error code
  326. */
  327. static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
  328. {
  329. int ret = 0;
  330. mutex_lock(&kernel_map_global_lock);
  331. if (!memdesc->hostptr) {
  332. pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
  333. struct page **pages = NULL;
  334. struct scatterlist *sg;
  335. int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT;
  336. int sglen = memdesc->sglen;
  337. int i, count = 0;
  338. /* create a list of pages to call vmap */
  339. pages = vmalloc(npages * sizeof(struct page *));
  340. if (!pages) {
  341. KGSL_CORE_ERR("vmalloc(%d) failed\n",
  342. npages * sizeof(struct page *));
  343. ret = -ENOMEM;
  344. goto done;
  345. }
  346. for_each_sg(memdesc->sg, sg, sglen, i) {
  347. struct page *page = sg_page(sg);
  348. int j;
  349. for (j = 0; j < sg->length >> PAGE_SHIFT; j++)
  350. pages[count++] = page++;
  351. }
  352. memdesc->hostptr = vmap(pages, count,
  353. VM_IOREMAP, page_prot);
  354. if (memdesc->hostptr)
  355. KGSL_STATS_ADD(memdesc->size,
  356. &kgsl_driver.stats.vmalloc,
  357. &kgsl_driver.stats.vmalloc_max);
  358. else
  359. ret = -ENOMEM;
  360. vfree(pages);
  361. }
  362. if (memdesc->hostptr)
  363. memdesc->hostptr_count++;
  364. done:
  365. mutex_unlock(&kernel_map_global_lock);
  366. return ret;
  367. }
  368. static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
  369. struct vm_area_struct *vma,
  370. struct vm_fault *vmf)
  371. {
  372. unsigned long offset, pfn;
  373. int ret;
  374. offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
  375. PAGE_SHIFT;
  376. pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
  377. ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
  378. if (ret == -ENOMEM || ret == -EAGAIN)
  379. return VM_FAULT_OOM;
  380. else if (ret == -EFAULT)
  381. return VM_FAULT_SIGBUS;
  382. return VM_FAULT_NOPAGE;
  383. }
  384. static void kgsl_ebimem_unmap_kernel(struct kgsl_memdesc *memdesc)
  385. {
  386. mutex_lock(&kernel_map_global_lock);
  387. if (!memdesc->hostptr) {
  388. BUG_ON(memdesc->hostptr_count);
  389. goto done;
  390. }
  391. memdesc->hostptr_count--;
  392. if (memdesc->hostptr_count)
  393. goto done;
  394. iounmap(memdesc->hostptr);
  395. memdesc->hostptr = NULL;
  396. done:
  397. mutex_unlock(&kernel_map_global_lock);
  398. }
  399. static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
  400. {
  401. atomic_sub(memdesc->size,
  402. &kgsl_driver.stats.coherent);
  403. kgsl_ebimem_unmap_kernel(memdesc);
  404. /* we certainly do not expect the hostptr to still be mapped */
  405. BUG_ON(memdesc->hostptr);
  406. free_contiguous_memory_by_paddr(memdesc->physaddr);
  407. }
  408. static int kgsl_ebimem_map_kernel(struct kgsl_memdesc *memdesc)
  409. {
  410. int ret = 0;
  411. mutex_lock(&kernel_map_global_lock);
  412. if (!memdesc->hostptr) {
  413. memdesc->hostptr = ioremap(memdesc->physaddr, memdesc->size);
  414. if (!memdesc->hostptr) {
  415. KGSL_CORE_ERR("ioremap failed, addr:0x%pK, size:0x%x\n",
  416. memdesc->hostptr, memdesc->size);
  417. ret = -ENOMEM;
  418. goto done;
  419. }
  420. }
  421. memdesc->hostptr_count++;
  422. done:
  423. mutex_unlock(&kernel_map_global_lock);
  424. return ret;
  425. }
  426. static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
  427. {
  428. atomic_sub(memdesc->size,
  429. &kgsl_driver.stats.coherent);
  430. dma_free_coherent(NULL, memdesc->size,
  431. memdesc->hostptr, memdesc->physaddr);
  432. }
  433. /* Global - also used by kgsl_drm.c */
  434. struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
  435. .free = kgsl_page_alloc_free,
  436. .vmflags = kgsl_page_alloc_vmflags,
  437. .vmfault = kgsl_page_alloc_vmfault,
  438. .map_kernel = kgsl_page_alloc_map_kernel,
  439. .unmap_kernel = kgsl_page_alloc_unmap_kernel,
  440. };
  441. EXPORT_SYMBOL(kgsl_page_alloc_ops);
  442. static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
  443. .free = kgsl_ebimem_free,
  444. .vmflags = kgsl_contiguous_vmflags,
  445. .vmfault = kgsl_contiguous_vmfault,
  446. .map_kernel = kgsl_ebimem_map_kernel,
  447. .unmap_kernel = kgsl_ebimem_unmap_kernel,
  448. };
  449. static struct kgsl_memdesc_ops kgsl_coherent_ops = {
  450. .free = kgsl_coherent_free,
  451. };
  452. void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
  453. {
  454. /*
  455. * If the buffer is mapped in the kernel operate on that address
  456. * otherwise use the user address
  457. */
  458. void *addr = (memdesc->hostptr) ?
  459. memdesc->hostptr : (void *) memdesc->useraddr;
  460. int size = memdesc->size;
  461. if (addr != NULL) {
  462. switch (op) {
  463. case KGSL_CACHE_OP_FLUSH:
  464. dmac_flush_range(addr, addr + size);
  465. break;
  466. case KGSL_CACHE_OP_CLEAN:
  467. dmac_clean_range(addr, addr + size);
  468. break;
  469. case KGSL_CACHE_OP_INV:
  470. dmac_inv_range(addr, addr + size);
  471. break;
  472. }
  473. }
  474. }
  475. EXPORT_SYMBOL(kgsl_cache_range_op);
  476. static int
  477. _kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
  478. struct kgsl_pagetable *pagetable,
  479. size_t size)
  480. {
  481. int ret = 0;
  482. int len, page_size, sglen_alloc, sglen = 0;
  483. unsigned int align;
  484. size = PAGE_ALIGN(size);
  485. if (size == 0 || size > UINT_MAX)
  486. return -EINVAL;
  487. align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
  488. page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K)
  489. ? SZ_64K : PAGE_SIZE;
  490. /*
  491. * The alignment cannot be less than the intended page size - it can be
  492. * larger however to accomodate hardware quirks
  493. */
  494. if (ilog2(align) < page_size)
  495. kgsl_memdesc_set_align(memdesc, ilog2(page_size));
  496. /*
  497. * There needs to be enough room in the sg structure to be able to
  498. * service the allocation entirely with PAGE_SIZE sized chunks
  499. */
  500. sglen_alloc = PAGE_ALIGN(size) >> PAGE_SHIFT;
  501. memdesc->pagetable = pagetable;
  502. memdesc->ops = &kgsl_page_alloc_ops;
  503. memdesc->sglen_alloc = sglen_alloc;
  504. memdesc->sg = kgsl_sg_alloc(memdesc->sglen_alloc);
  505. if (memdesc->sg == NULL) {
  506. ret = -ENOMEM;
  507. goto done;
  508. }
  509. sg_init_table(memdesc->sg, memdesc->sglen_alloc);
  510. len = size;
  511. while (len > 0) {
  512. struct page *page;
  513. unsigned int gfp_mask = __GFP_HIGHMEM;
  514. int j;
  515. /* don't waste space at the end of the allocation*/
  516. if (len < page_size)
  517. page_size = PAGE_SIZE;
  518. /*
  519. * Don't do some of the more aggressive memory recovery
  520. * techniques for large order allocations
  521. */
  522. if (page_size != PAGE_SIZE)
  523. gfp_mask |= __GFP_COMP | __GFP_NORETRY |
  524. __GFP_NO_KSWAPD | __GFP_NOWARN;
  525. else
  526. gfp_mask |= GFP_KERNEL;
  527. page = alloc_pages(gfp_mask, get_order(page_size));
  528. if (page == NULL) {
  529. if (page_size != PAGE_SIZE) {
  530. page_size = PAGE_SIZE;
  531. continue;
  532. }
  533. /*
  534. * Update sglen and memdesc size,as requested allocation
  535. * not served fully. So that they can be correctly freed
  536. * in kgsl_sharedmem_free().
  537. */
  538. memdesc->sglen = sglen;
  539. memdesc->size = (size - len);
  540. KGSL_CORE_ERR(
  541. "Out of memory: only allocated %dKB of %dKB requested\n",
  542. (size - len) >> 10, size >> 10);
  543. ret = -ENOMEM;
  544. goto done;
  545. }
  546. /*
  547. * All memory that goes to the user has to be zeroed out before it gets
  548. * exposed to userspace. This means that the memory has to be mapped in
  549. * the kernel, zeroed (memset) and then unmapped. This also means that
  550. * the dcache has to be flushed to ensure coherency between the kernel
  551. * and user pages. We used to pass __GFP_ZERO to alloc_page which mapped
  552. * zeroed and unmaped each individual page, and then we had to turn
  553. * around and call flush_dcache_page() on that page to clear the caches.
  554. * Since __GFP_ZERO will kmap_atomic;clear_page;kunmap_atomic it is faster
  555. * to do everything at once here making things faster for all buffer sizes.
  556. */
  557. for (j = 0; j < page_size >> PAGE_SHIFT; j++) {
  558. struct page *p = nth_page(page, j);
  559. void *kaddr = kmap_atomic(p);
  560. clear_page(kaddr);
  561. dmac_flush_range(kaddr, kaddr + PAGE_SIZE);
  562. kunmap_atomic(kaddr);
  563. }
  564. sg_set_page(&memdesc->sg[sglen++], page, page_size, 0);
  565. len -= page_size;
  566. }
  567. memdesc->sglen = sglen;
  568. memdesc->size = size;
  569. if (sglen > 0)
  570. sg_mark_end(&memdesc->sg[sglen - 1]);
  571. done:
  572. KGSL_STATS_ADD(memdesc->size, &kgsl_driver.stats.page_alloc,
  573. &kgsl_driver.stats.page_alloc_max);
  574. if (ret)
  575. kgsl_sharedmem_free(memdesc);
  576. return ret;
  577. }
  578. int
  579. kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
  580. struct kgsl_pagetable *pagetable, size_t size)
  581. {
  582. int ret = 0;
  583. BUG_ON(size == 0);
  584. size = ALIGN(size, PAGE_SIZE * 2);
  585. if (size == 0)
  586. return -EINVAL;
  587. ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
  588. if (!ret)
  589. ret = kgsl_page_alloc_map_kernel(memdesc);
  590. if (ret)
  591. kgsl_sharedmem_free(memdesc);
  592. return ret;
  593. }
  594. EXPORT_SYMBOL(kgsl_sharedmem_page_alloc);
  595. int
  596. kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
  597. struct kgsl_pagetable *pagetable,
  598. size_t size)
  599. {
  600. size = PAGE_ALIGN(size);
  601. if (size == 0)
  602. return -EINVAL;
  603. return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
  604. }
  605. EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
  606. int
  607. kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
  608. {
  609. int result = 0;
  610. size = ALIGN(size, PAGE_SIZE);
  611. if (size == 0)
  612. return -EINVAL;
  613. memdesc->size = size;
  614. memdesc->ops = &kgsl_coherent_ops;
  615. memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
  616. GFP_KERNEL);
  617. if (memdesc->hostptr == NULL) {
  618. KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
  619. result = -ENOMEM;
  620. goto err;
  621. }
  622. result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
  623. if (result)
  624. goto err;
  625. /* Record statistics */
  626. KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
  627. &kgsl_driver.stats.coherent_max);
  628. err:
  629. if (result)
  630. kgsl_sharedmem_free(memdesc);
  631. return result;
  632. }
  633. EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
  634. void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
  635. {
  636. if (memdesc == NULL || memdesc->size == 0)
  637. return;
  638. if (memdesc->gpuaddr) {
  639. kgsl_mmu_unmap(memdesc->pagetable, memdesc);
  640. kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
  641. }
  642. if (memdesc->ops && memdesc->ops->free)
  643. memdesc->ops->free(memdesc);
  644. kgsl_sg_free(memdesc->sg, memdesc->sglen_alloc);
  645. memset(memdesc, 0, sizeof(*memdesc));
  646. }
  647. EXPORT_SYMBOL(kgsl_sharedmem_free);
  648. static int
  649. _kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
  650. struct kgsl_pagetable *pagetable, size_t size)
  651. {
  652. int result = 0;
  653. memdesc->size = size;
  654. memdesc->pagetable = pagetable;
  655. memdesc->ops = &kgsl_ebimem_ops;
  656. memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
  657. if (memdesc->physaddr == 0) {
  658. KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
  659. size);
  660. return -ENOMEM;
  661. }
  662. result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
  663. if (result)
  664. goto err;
  665. KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
  666. &kgsl_driver.stats.coherent_max);
  667. err:
  668. if (result)
  669. kgsl_sharedmem_free(memdesc);
  670. return result;
  671. }
  672. int
  673. kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
  674. struct kgsl_pagetable *pagetable,
  675. size_t size)
  676. {
  677. size = ALIGN(size, PAGE_SIZE);
  678. if (size == 0)
  679. return -EINVAL;
  680. return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
  681. }
  682. EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
  683. int
  684. kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
  685. struct kgsl_pagetable *pagetable, size_t size)
  686. {
  687. int result;
  688. size = ALIGN(size, 8192);
  689. if (size == 0)
  690. return -EINVAL;
  691. result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
  692. if (result)
  693. return result;
  694. result = kgsl_ebimem_map_kernel(memdesc);
  695. if (result) {
  696. KGSL_CORE_ERR("hostptr mapping failed\n");
  697. kgsl_sharedmem_free(memdesc);
  698. return result;
  699. }
  700. return 0;
  701. }
  702. EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
  703. int
  704. kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
  705. uint32_t *dst,
  706. unsigned int offsetbytes)
  707. {
  708. uint32_t *src;
  709. BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
  710. WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
  711. if (offsetbytes % sizeof(uint32_t) != 0)
  712. return -EINVAL;
  713. WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
  714. if (offsetbytes + sizeof(uint32_t) > memdesc->size)
  715. return -ERANGE;
  716. rmb();
  717. src = (uint32_t *)(memdesc->hostptr + offsetbytes);
  718. *dst = *src;
  719. return 0;
  720. }
  721. EXPORT_SYMBOL(kgsl_sharedmem_readl);
  722. int
  723. kgsl_sharedmem_writel(struct kgsl_device *device,
  724. const struct kgsl_memdesc *memdesc,
  725. unsigned int offsetbytes,
  726. uint32_t src)
  727. {
  728. uint32_t *dst;
  729. BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
  730. WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
  731. if (offsetbytes % sizeof(uint32_t) != 0)
  732. return -EINVAL;
  733. WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
  734. if (offsetbytes + sizeof(uint32_t) > memdesc->size)
  735. return -ERANGE;
  736. kgsl_cffdump_setmem(device,
  737. memdesc->gpuaddr + offsetbytes,
  738. src, sizeof(uint32_t));
  739. dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
  740. *dst = src;
  741. wmb();
  742. return 0;
  743. }
  744. EXPORT_SYMBOL(kgsl_sharedmem_writel);
  745. int
  746. kgsl_sharedmem_set(struct kgsl_device *device,
  747. const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
  748. unsigned int value, unsigned int sizebytes)
  749. {
  750. BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
  751. BUG_ON(offsetbytes + sizebytes > memdesc->size);
  752. kgsl_cffdump_setmem(device,
  753. memdesc->gpuaddr + offsetbytes, value,
  754. sizebytes);
  755. memset(memdesc->hostptr + offsetbytes, value, sizebytes);
  756. return 0;
  757. }
  758. EXPORT_SYMBOL(kgsl_sharedmem_set);
  759. /*
  760. * kgsl_sharedmem_map_vma - Map a user vma to physical memory
  761. *
  762. * @vma - The user vma to map
  763. * @memdesc - The memory descriptor which contains information about the
  764. * physical memory
  765. *
  766. * Return: 0 on success else error code
  767. */
  768. int
  769. kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
  770. const struct kgsl_memdesc *memdesc)
  771. {
  772. unsigned long addr = vma->vm_start;
  773. unsigned long size = vma->vm_end - vma->vm_start;
  774. int ret, i = 0;
  775. if (!memdesc->sg || (size != memdesc->size) ||
  776. (memdesc->sglen != (size / PAGE_SIZE)))
  777. return -EINVAL;
  778. for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) {
  779. ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i]));
  780. if (ret)
  781. return ret;
  782. }
  783. return 0;
  784. }
  785. EXPORT_SYMBOL(kgsl_sharedmem_map_vma);
  786. static const char * const memtype_str[] = {
  787. [KGSL_MEMTYPE_OBJECTANY] = "any(0)",
  788. [KGSL_MEMTYPE_FRAMEBUFFER] = "framebuffer",
  789. [KGSL_MEMTYPE_RENDERBUFFER] = "renderbuffer",
  790. [KGSL_MEMTYPE_ARRAYBUFFER] = "arraybuffer",
  791. [KGSL_MEMTYPE_ELEMENTARRAYBUFFER] = "elementarraybuffer",
  792. [KGSL_MEMTYPE_VERTEXARRAYBUFFER] = "vertexarraybuffer",
  793. [KGSL_MEMTYPE_TEXTURE] = "texture",
  794. [KGSL_MEMTYPE_SURFACE] = "surface",
  795. [KGSL_MEMTYPE_EGL_SURFACE] = "egl_surface",
  796. [KGSL_MEMTYPE_GL] = "gl",
  797. [KGSL_MEMTYPE_CL] = "cl",
  798. [KGSL_MEMTYPE_CL_BUFFER_MAP] = "cl_buffer_map",
  799. [KGSL_MEMTYPE_CL_BUFFER_NOMAP] = "cl_buffer_nomap",
  800. [KGSL_MEMTYPE_CL_IMAGE_MAP] = "cl_image_map",
  801. [KGSL_MEMTYPE_CL_IMAGE_NOMAP] = "cl_image_nomap",
  802. [KGSL_MEMTYPE_CL_KERNEL_STACK] = "cl_kernel_stack",
  803. [KGSL_MEMTYPE_COMMAND] = "command",
  804. [KGSL_MEMTYPE_2D] = "2d",
  805. [KGSL_MEMTYPE_EGL_IMAGE] = "egl_image",
  806. [KGSL_MEMTYPE_EGL_SHADOW] = "egl_shadow",
  807. [KGSL_MEMTYPE_MULTISAMPLE] = "egl_multisample",
  808. /* KGSL_MEMTYPE_KERNEL handled below, to avoid huge array */
  809. };
  810. void kgsl_get_memory_usage(char *name, size_t name_size, unsigned int memflags)
  811. {
  812. unsigned char type;
  813. type = (memflags & KGSL_MEMTYPE_MASK) >> KGSL_MEMTYPE_SHIFT;
  814. if (type == KGSL_MEMTYPE_KERNEL)
  815. strlcpy(name, "kernel", name_size);
  816. else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
  817. strlcpy(name, memtype_str[type], name_size);
  818. else
  819. snprintf(name, name_size, "unknown(%3d)", type);
  820. }
  821. EXPORT_SYMBOL(kgsl_get_memory_usage);