123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992 |
- /* Copyright (c) 2002,2007-2014,2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
- #include <linux/export.h>
- #include <linux/vmalloc.h>
- #include <linux/memory_alloc.h>
- #include <asm/cacheflush.h>
- #include <linux/slab.h>
- #include <linux/kmemleak.h>
- #include <linux/highmem.h>
- #include "kgsl.h"
- #include "kgsl_sharedmem.h"
- #include "kgsl_cffdump.h"
- #include "kgsl_device.h"
- DEFINE_MUTEX(kernel_map_global_lock);
- /* An attribute for showing per-process memory statistics */
- struct kgsl_mem_entry_attribute {
- struct attribute attr;
- int memtype;
- ssize_t (*show)(struct kgsl_process_private *priv,
- int type, char *buf);
- };
- #define to_mem_entry_attr(a) \
- container_of(a, struct kgsl_mem_entry_attribute, attr)
- #define __MEM_ENTRY_ATTR(_type, _name, _show) \
- { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .memtype = _type, \
- .show = _show, \
- }
- /*
- * A structure to hold the attributes for a particular memory type.
- * For each memory type in each process we store the current and maximum
- * memory usage and display the counts in sysfs. This structure and
- * the following macro allow us to simplify the definition for those
- * adding new memory types
- */
- struct mem_entry_stats {
- int memtype;
- struct kgsl_mem_entry_attribute attr;
- struct kgsl_mem_entry_attribute max_attr;
- };
- #define MEM_ENTRY_STAT(_type, _name) \
- { \
- .memtype = _type, \
- .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
- .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
- mem_entry_max_show), \
- }
- /**
- * Show the current amount of memory allocated for the given memtype
- */
- static ssize_t
- mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
- {
- return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
- }
- /**
- * Show the maximum memory allocated for the given memtype through the life of
- * the process
- */
- static ssize_t
- mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
- {
- return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
- }
- static void mem_entry_sysfs_release(struct kobject *kobj)
- {
- }
- static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
- struct kgsl_process_private *priv;
- ssize_t ret;
- /*
- * 1. sysfs_remove_file waits for reads to complete before the node
- * is deleted.
- * 2. kgsl_process_init_sysfs takes a refcount to the process_private,
- * which is put at the end of kgsl_process_uninit_sysfs.
- * These two conditions imply that priv will not be freed until this
- * function completes, and no further locking is needed.
- */
- priv = kobj ? container_of(kobj, struct kgsl_process_private, kobj) :
- NULL;
- if (priv && pattr->show)
- ret = pattr->show(priv, pattr->memtype, buf);
- else
- ret = -EIO;
- return ret;
- }
- static const struct sysfs_ops mem_entry_sysfs_ops = {
- .show = mem_entry_sysfs_show,
- };
- static struct kobj_type ktype_mem_entry = {
- .sysfs_ops = &mem_entry_sysfs_ops,
- .default_attrs = NULL,
- .release = mem_entry_sysfs_release
- };
- static struct mem_entry_stats mem_stats[] = {
- MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
- MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
- #ifdef CONFIG_ASHMEM
- MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
- #endif
- MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
- #ifdef CONFIG_ION
- MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
- #endif
- };
- void
- kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
- {
- int i;
- for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
- sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
- sysfs_remove_file(&private->kobj,
- &mem_stats[i].max_attr.attr);
- }
- kobject_put(&private->kobj);
- /* Put the refcount we got in kgsl_process_init_sysfs */
- kgsl_process_private_put(private);
- }
- /**
- * kgsl_process_init_sysfs() - Initialize and create sysfs files for a process
- *
- * @device: Pointer to kgsl device struct
- * @private: Pointer to the structure for the process
- *
- * @returns: 0 on success, error code otherwise
- *
- * kgsl_process_init_sysfs() is called at the time of creating the
- * process struct when a process opens the kgsl device for the first time.
- * This function creates the sysfs files for the process.
- */
- int
- kgsl_process_init_sysfs(struct kgsl_device *device,
- struct kgsl_process_private *private)
- {
- unsigned char name[16];
- int i, ret = 0;
- snprintf(name, sizeof(name), "%d", pid_nr(private->pid));
- ret = kobject_init_and_add(&private->kobj, &ktype_mem_entry,
- kgsl_driver.prockobj, name);
- if (ret)
- return ret;
- for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
- /* We need to check the value of sysfs_create_file, but we
- * don't really care if it passed or not */
- ret = sysfs_create_file(&private->kobj,
- &mem_stats[i].attr.attr);
- ret = sysfs_create_file(&private->kobj,
- &mem_stats[i].max_attr.attr);
- }
- /* Keep private valid until the sysfs enries are removed. */
- if (!ret)
- kgsl_process_private_get(private);
- return ret;
- }
- static int kgsl_drv_memstat_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- unsigned int val = 0;
- if (!strncmp(attr->attr.name, "vmalloc", 7))
- val = atomic_read(&kgsl_driver.stats.vmalloc);
- else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
- val = atomic_read(&kgsl_driver.stats.vmalloc_max);
- else if (!strncmp(attr->attr.name, "page_alloc", 10))
- val = atomic_read(&kgsl_driver.stats.page_alloc);
- else if (!strncmp(attr->attr.name, "page_alloc_max", 14))
- val = atomic_read(&kgsl_driver.stats.page_alloc_max);
- else if (!strncmp(attr->attr.name, "coherent", 8))
- val = atomic_read(&kgsl_driver.stats.coherent);
- else if (!strncmp(attr->attr.name, "coherent_max", 12))
- val = atomic_read(&kgsl_driver.stats.coherent_max);
- else if (!strncmp(attr->attr.name, "mapped", 6))
- val = atomic_read(&kgsl_driver.stats.mapped);
- else if (!strncmp(attr->attr.name, "mapped_max", 10))
- val = atomic_read(&kgsl_driver.stats.mapped_max);
- return snprintf(buf, PAGE_SIZE, "%u\n", val);
- }
- static int kgsl_drv_full_cache_threshold_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- int ret;
- unsigned int thresh = 0;
- ret = kgsl_sysfs_store(buf, &thresh);
- if (ret)
- return ret;
- kgsl_driver.full_cache_threshold = thresh;
- return count;
- }
- static int kgsl_drv_full_cache_threshold_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- return snprintf(buf, PAGE_SIZE, "%d\n",
- kgsl_driver.full_cache_threshold);
- }
- DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
- DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
- DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
- DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
- DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
- DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
- DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
- DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
- DEVICE_ATTR(full_cache_threshold, 0644,
- kgsl_drv_full_cache_threshold_show,
- kgsl_drv_full_cache_threshold_store);
- static const struct device_attribute *drv_attr_list[] = {
- &dev_attr_vmalloc,
- &dev_attr_vmalloc_max,
- &dev_attr_page_alloc,
- &dev_attr_page_alloc_max,
- &dev_attr_coherent,
- &dev_attr_coherent_max,
- &dev_attr_mapped,
- &dev_attr_mapped_max,
- &dev_attr_full_cache_threshold,
- NULL
- };
- void
- kgsl_sharedmem_uninit_sysfs(void)
- {
- kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
- }
- int
- kgsl_sharedmem_init_sysfs(void)
- {
- return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
- drv_attr_list);
- }
- static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
- struct vm_area_struct *vma,
- struct vm_fault *vmf)
- {
- int i, pgoff;
- struct scatterlist *s = memdesc->sg;
- unsigned int offset;
- offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
- if (offset >= memdesc->size)
- return VM_FAULT_SIGBUS;
- pgoff = offset >> PAGE_SHIFT;
- /*
- * The sglist might be comprised of mixed blocks of memory depending
- * on how many 64K pages were allocated. This means we have to do math
- * to find the actual 4K page to map in user space
- */
- for (i = 0; i < memdesc->sglen; i++) {
- int npages = s->length >> PAGE_SHIFT;
- if (pgoff < npages) {
- struct page *page = sg_page(s);
- page = nth_page(page, pgoff);
- get_page(page);
- vmf->page = page;
- return 0;
- }
- pgoff -= npages;
- s = sg_next(s);
- }
- return VM_FAULT_SIGBUS;
- }
- static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
- {
- return VM_RESERVED | VM_DONTEXPAND | VM_DONTCOPY;
- }
- /*
- * kgsl_page_alloc_unmap_kernel() - Unmap the memory in memdesc
- *
- * @memdesc: The memory descriptor which contains information about the memory
- *
- * Unmaps the memory mapped into kernel address space
- */
- static void kgsl_page_alloc_unmap_kernel(struct kgsl_memdesc *memdesc)
- {
- mutex_lock(&kernel_map_global_lock);
- if (!memdesc->hostptr) {
- BUG_ON(memdesc->hostptr_count);
- goto done;
- }
- memdesc->hostptr_count--;
- if (memdesc->hostptr_count)
- goto done;
- vunmap(memdesc->hostptr);
- atomic_sub(memdesc->size, &kgsl_driver.stats.vmalloc);
- memdesc->hostptr = NULL;
- done:
- mutex_unlock(&kernel_map_global_lock);
- }
- static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
- {
- int i = 0;
- struct scatterlist *sg;
- int sglen = memdesc->sglen;
- atomic_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
- kgsl_page_alloc_unmap_kernel(memdesc);
- /* we certainly do not expect the hostptr to still be mapped */
- BUG_ON(memdesc->hostptr);
- if (memdesc->sg)
- for_each_sg(memdesc->sg, sg, sglen, i)
- __free_pages(sg_page(sg), get_order(sg->length));
- }
- static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
- {
- return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTCOPY;
- }
- /*
- * kgsl_page_alloc_map_kernel - Map the memory in memdesc to kernel address
- * space
- *
- * @memdesc - The memory descriptor which contains information about the memory
- *
- * Return: 0 on success else error code
- */
- static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
- {
- int ret = 0;
- mutex_lock(&kernel_map_global_lock);
- if (!memdesc->hostptr) {
- pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
- struct page **pages = NULL;
- struct scatterlist *sg;
- int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT;
- int sglen = memdesc->sglen;
- int i, count = 0;
- /* create a list of pages to call vmap */
- pages = vmalloc(npages * sizeof(struct page *));
- if (!pages) {
- KGSL_CORE_ERR("vmalloc(%d) failed\n",
- npages * sizeof(struct page *));
- ret = -ENOMEM;
- goto done;
- }
- for_each_sg(memdesc->sg, sg, sglen, i) {
- struct page *page = sg_page(sg);
- int j;
- for (j = 0; j < sg->length >> PAGE_SHIFT; j++)
- pages[count++] = page++;
- }
- memdesc->hostptr = vmap(pages, count,
- VM_IOREMAP, page_prot);
- if (memdesc->hostptr)
- KGSL_STATS_ADD(memdesc->size,
- &kgsl_driver.stats.vmalloc,
- &kgsl_driver.stats.vmalloc_max);
- else
- ret = -ENOMEM;
- vfree(pages);
- }
- if (memdesc->hostptr)
- memdesc->hostptr_count++;
- done:
- mutex_unlock(&kernel_map_global_lock);
- return ret;
- }
- static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
- struct vm_area_struct *vma,
- struct vm_fault *vmf)
- {
- unsigned long offset, pfn;
- int ret;
- offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
- PAGE_SHIFT;
- pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
- ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
- if (ret == -ENOMEM || ret == -EAGAIN)
- return VM_FAULT_OOM;
- else if (ret == -EFAULT)
- return VM_FAULT_SIGBUS;
- return VM_FAULT_NOPAGE;
- }
- static void kgsl_ebimem_unmap_kernel(struct kgsl_memdesc *memdesc)
- {
- mutex_lock(&kernel_map_global_lock);
- if (!memdesc->hostptr) {
- BUG_ON(memdesc->hostptr_count);
- goto done;
- }
- memdesc->hostptr_count--;
- if (memdesc->hostptr_count)
- goto done;
- iounmap(memdesc->hostptr);
- memdesc->hostptr = NULL;
- done:
- mutex_unlock(&kernel_map_global_lock);
- }
- static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
- {
- atomic_sub(memdesc->size,
- &kgsl_driver.stats.coherent);
- kgsl_ebimem_unmap_kernel(memdesc);
- /* we certainly do not expect the hostptr to still be mapped */
- BUG_ON(memdesc->hostptr);
- free_contiguous_memory_by_paddr(memdesc->physaddr);
- }
- static int kgsl_ebimem_map_kernel(struct kgsl_memdesc *memdesc)
- {
- int ret = 0;
- mutex_lock(&kernel_map_global_lock);
- if (!memdesc->hostptr) {
- memdesc->hostptr = ioremap(memdesc->physaddr, memdesc->size);
- if (!memdesc->hostptr) {
- KGSL_CORE_ERR("ioremap failed, addr:0x%pK, size:0x%x\n",
- memdesc->hostptr, memdesc->size);
- ret = -ENOMEM;
- goto done;
- }
- }
- memdesc->hostptr_count++;
- done:
- mutex_unlock(&kernel_map_global_lock);
- return ret;
- }
- static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
- {
- atomic_sub(memdesc->size,
- &kgsl_driver.stats.coherent);
- dma_free_coherent(NULL, memdesc->size,
- memdesc->hostptr, memdesc->physaddr);
- }
- /* Global - also used by kgsl_drm.c */
- struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
- .free = kgsl_page_alloc_free,
- .vmflags = kgsl_page_alloc_vmflags,
- .vmfault = kgsl_page_alloc_vmfault,
- .map_kernel = kgsl_page_alloc_map_kernel,
- .unmap_kernel = kgsl_page_alloc_unmap_kernel,
- };
- EXPORT_SYMBOL(kgsl_page_alloc_ops);
- static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
- .free = kgsl_ebimem_free,
- .vmflags = kgsl_contiguous_vmflags,
- .vmfault = kgsl_contiguous_vmfault,
- .map_kernel = kgsl_ebimem_map_kernel,
- .unmap_kernel = kgsl_ebimem_unmap_kernel,
- };
- static struct kgsl_memdesc_ops kgsl_coherent_ops = {
- .free = kgsl_coherent_free,
- };
- void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
- {
- /*
- * If the buffer is mapped in the kernel operate on that address
- * otherwise use the user address
- */
- void *addr = (memdesc->hostptr) ?
- memdesc->hostptr : (void *) memdesc->useraddr;
- int size = memdesc->size;
- if (addr != NULL) {
- switch (op) {
- case KGSL_CACHE_OP_FLUSH:
- dmac_flush_range(addr, addr + size);
- break;
- case KGSL_CACHE_OP_CLEAN:
- dmac_clean_range(addr, addr + size);
- break;
- case KGSL_CACHE_OP_INV:
- dmac_inv_range(addr, addr + size);
- break;
- }
- }
- }
- EXPORT_SYMBOL(kgsl_cache_range_op);
- static int
- _kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable,
- size_t size)
- {
- int ret = 0;
- int len, page_size, sglen_alloc, sglen = 0;
- unsigned int align;
- size = PAGE_ALIGN(size);
- if (size == 0 || size > UINT_MAX)
- return -EINVAL;
- align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
- page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K)
- ? SZ_64K : PAGE_SIZE;
- /*
- * The alignment cannot be less than the intended page size - it can be
- * larger however to accomodate hardware quirks
- */
- if (ilog2(align) < page_size)
- kgsl_memdesc_set_align(memdesc, ilog2(page_size));
- /*
- * There needs to be enough room in the sg structure to be able to
- * service the allocation entirely with PAGE_SIZE sized chunks
- */
- sglen_alloc = PAGE_ALIGN(size) >> PAGE_SHIFT;
- memdesc->pagetable = pagetable;
- memdesc->ops = &kgsl_page_alloc_ops;
- memdesc->sglen_alloc = sglen_alloc;
- memdesc->sg = kgsl_sg_alloc(memdesc->sglen_alloc);
- if (memdesc->sg == NULL) {
- ret = -ENOMEM;
- goto done;
- }
- sg_init_table(memdesc->sg, memdesc->sglen_alloc);
- len = size;
- while (len > 0) {
- struct page *page;
- unsigned int gfp_mask = __GFP_HIGHMEM;
- int j;
- /* don't waste space at the end of the allocation*/
- if (len < page_size)
- page_size = PAGE_SIZE;
- /*
- * Don't do some of the more aggressive memory recovery
- * techniques for large order allocations
- */
- if (page_size != PAGE_SIZE)
- gfp_mask |= __GFP_COMP | __GFP_NORETRY |
- __GFP_NO_KSWAPD | __GFP_NOWARN;
- else
- gfp_mask |= GFP_KERNEL;
- page = alloc_pages(gfp_mask, get_order(page_size));
- if (page == NULL) {
- if (page_size != PAGE_SIZE) {
- page_size = PAGE_SIZE;
- continue;
- }
- /*
- * Update sglen and memdesc size,as requested allocation
- * not served fully. So that they can be correctly freed
- * in kgsl_sharedmem_free().
- */
- memdesc->sglen = sglen;
- memdesc->size = (size - len);
- KGSL_CORE_ERR(
- "Out of memory: only allocated %dKB of %dKB requested\n",
- (size - len) >> 10, size >> 10);
- ret = -ENOMEM;
- goto done;
- }
- /*
- * All memory that goes to the user has to be zeroed out before it gets
- * exposed to userspace. This means that the memory has to be mapped in
- * the kernel, zeroed (memset) and then unmapped. This also means that
- * the dcache has to be flushed to ensure coherency between the kernel
- * and user pages. We used to pass __GFP_ZERO to alloc_page which mapped
- * zeroed and unmaped each individual page, and then we had to turn
- * around and call flush_dcache_page() on that page to clear the caches.
- * Since __GFP_ZERO will kmap_atomic;clear_page;kunmap_atomic it is faster
- * to do everything at once here making things faster for all buffer sizes.
- */
- for (j = 0; j < page_size >> PAGE_SHIFT; j++) {
- struct page *p = nth_page(page, j);
- void *kaddr = kmap_atomic(p);
- clear_page(kaddr);
- dmac_flush_range(kaddr, kaddr + PAGE_SIZE);
- kunmap_atomic(kaddr);
- }
- sg_set_page(&memdesc->sg[sglen++], page, page_size, 0);
- len -= page_size;
- }
- memdesc->sglen = sglen;
- memdesc->size = size;
- if (sglen > 0)
- sg_mark_end(&memdesc->sg[sglen - 1]);
- done:
- KGSL_STATS_ADD(memdesc->size, &kgsl_driver.stats.page_alloc,
- &kgsl_driver.stats.page_alloc_max);
- if (ret)
- kgsl_sharedmem_free(memdesc);
- return ret;
- }
- int
- kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable, size_t size)
- {
- int ret = 0;
- BUG_ON(size == 0);
- size = ALIGN(size, PAGE_SIZE * 2);
- if (size == 0)
- return -EINVAL;
- ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
- if (!ret)
- ret = kgsl_page_alloc_map_kernel(memdesc);
- if (ret)
- kgsl_sharedmem_free(memdesc);
- return ret;
- }
- EXPORT_SYMBOL(kgsl_sharedmem_page_alloc);
- int
- kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable,
- size_t size)
- {
- size = PAGE_ALIGN(size);
- if (size == 0)
- return -EINVAL;
- return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
- }
- EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
- int
- kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
- {
- int result = 0;
- size = ALIGN(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
- memdesc->size = size;
- memdesc->ops = &kgsl_coherent_ops;
- memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
- GFP_KERNEL);
- if (memdesc->hostptr == NULL) {
- KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
- result = -ENOMEM;
- goto err;
- }
- result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
- if (result)
- goto err;
- /* Record statistics */
- KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
- &kgsl_driver.stats.coherent_max);
- err:
- if (result)
- kgsl_sharedmem_free(memdesc);
- return result;
- }
- EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
- void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
- {
- if (memdesc == NULL || memdesc->size == 0)
- return;
- if (memdesc->gpuaddr) {
- kgsl_mmu_unmap(memdesc->pagetable, memdesc);
- kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
- }
- if (memdesc->ops && memdesc->ops->free)
- memdesc->ops->free(memdesc);
- kgsl_sg_free(memdesc->sg, memdesc->sglen_alloc);
- memset(memdesc, 0, sizeof(*memdesc));
- }
- EXPORT_SYMBOL(kgsl_sharedmem_free);
- static int
- _kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable, size_t size)
- {
- int result = 0;
- memdesc->size = size;
- memdesc->pagetable = pagetable;
- memdesc->ops = &kgsl_ebimem_ops;
- memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
- if (memdesc->physaddr == 0) {
- KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
- size);
- return -ENOMEM;
- }
- result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
- if (result)
- goto err;
- KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
- &kgsl_driver.stats.coherent_max);
- err:
- if (result)
- kgsl_sharedmem_free(memdesc);
- return result;
- }
- int
- kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable,
- size_t size)
- {
- size = ALIGN(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
- return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
- }
- EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
- int
- kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable, size_t size)
- {
- int result;
- size = ALIGN(size, 8192);
- if (size == 0)
- return -EINVAL;
- result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
- if (result)
- return result;
- result = kgsl_ebimem_map_kernel(memdesc);
- if (result) {
- KGSL_CORE_ERR("hostptr mapping failed\n");
- kgsl_sharedmem_free(memdesc);
- return result;
- }
- return 0;
- }
- EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
- int
- kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
- uint32_t *dst,
- unsigned int offsetbytes)
- {
- uint32_t *src;
- BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
- WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
- if (offsetbytes % sizeof(uint32_t) != 0)
- return -EINVAL;
- WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
- if (offsetbytes + sizeof(uint32_t) > memdesc->size)
- return -ERANGE;
- rmb();
- src = (uint32_t *)(memdesc->hostptr + offsetbytes);
- *dst = *src;
- return 0;
- }
- EXPORT_SYMBOL(kgsl_sharedmem_readl);
- int
- kgsl_sharedmem_writel(struct kgsl_device *device,
- const struct kgsl_memdesc *memdesc,
- unsigned int offsetbytes,
- uint32_t src)
- {
- uint32_t *dst;
- BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
- WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
- if (offsetbytes % sizeof(uint32_t) != 0)
- return -EINVAL;
- WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
- if (offsetbytes + sizeof(uint32_t) > memdesc->size)
- return -ERANGE;
- kgsl_cffdump_setmem(device,
- memdesc->gpuaddr + offsetbytes,
- src, sizeof(uint32_t));
- dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
- *dst = src;
- wmb();
- return 0;
- }
- EXPORT_SYMBOL(kgsl_sharedmem_writel);
- int
- kgsl_sharedmem_set(struct kgsl_device *device,
- const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
- unsigned int value, unsigned int sizebytes)
- {
- BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
- BUG_ON(offsetbytes + sizebytes > memdesc->size);
- kgsl_cffdump_setmem(device,
- memdesc->gpuaddr + offsetbytes, value,
- sizebytes);
- memset(memdesc->hostptr + offsetbytes, value, sizebytes);
- return 0;
- }
- EXPORT_SYMBOL(kgsl_sharedmem_set);
- /*
- * kgsl_sharedmem_map_vma - Map a user vma to physical memory
- *
- * @vma - The user vma to map
- * @memdesc - The memory descriptor which contains information about the
- * physical memory
- *
- * Return: 0 on success else error code
- */
- int
- kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
- const struct kgsl_memdesc *memdesc)
- {
- unsigned long addr = vma->vm_start;
- unsigned long size = vma->vm_end - vma->vm_start;
- int ret, i = 0;
- if (!memdesc->sg || (size != memdesc->size) ||
- (memdesc->sglen != (size / PAGE_SIZE)))
- return -EINVAL;
- for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) {
- ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i]));
- if (ret)
- return ret;
- }
- return 0;
- }
- EXPORT_SYMBOL(kgsl_sharedmem_map_vma);
- static const char * const memtype_str[] = {
- [KGSL_MEMTYPE_OBJECTANY] = "any(0)",
- [KGSL_MEMTYPE_FRAMEBUFFER] = "framebuffer",
- [KGSL_MEMTYPE_RENDERBUFFER] = "renderbuffer",
- [KGSL_MEMTYPE_ARRAYBUFFER] = "arraybuffer",
- [KGSL_MEMTYPE_ELEMENTARRAYBUFFER] = "elementarraybuffer",
- [KGSL_MEMTYPE_VERTEXARRAYBUFFER] = "vertexarraybuffer",
- [KGSL_MEMTYPE_TEXTURE] = "texture",
- [KGSL_MEMTYPE_SURFACE] = "surface",
- [KGSL_MEMTYPE_EGL_SURFACE] = "egl_surface",
- [KGSL_MEMTYPE_GL] = "gl",
- [KGSL_MEMTYPE_CL] = "cl",
- [KGSL_MEMTYPE_CL_BUFFER_MAP] = "cl_buffer_map",
- [KGSL_MEMTYPE_CL_BUFFER_NOMAP] = "cl_buffer_nomap",
- [KGSL_MEMTYPE_CL_IMAGE_MAP] = "cl_image_map",
- [KGSL_MEMTYPE_CL_IMAGE_NOMAP] = "cl_image_nomap",
- [KGSL_MEMTYPE_CL_KERNEL_STACK] = "cl_kernel_stack",
- [KGSL_MEMTYPE_COMMAND] = "command",
- [KGSL_MEMTYPE_2D] = "2d",
- [KGSL_MEMTYPE_EGL_IMAGE] = "egl_image",
- [KGSL_MEMTYPE_EGL_SHADOW] = "egl_shadow",
- [KGSL_MEMTYPE_MULTISAMPLE] = "egl_multisample",
- /* KGSL_MEMTYPE_KERNEL handled below, to avoid huge array */
- };
- void kgsl_get_memory_usage(char *name, size_t name_size, unsigned int memflags)
- {
- unsigned char type;
- type = (memflags & KGSL_MEMTYPE_MASK) >> KGSL_MEMTYPE_SHIFT;
- if (type == KGSL_MEMTYPE_KERNEL)
- strlcpy(name, "kernel", name_size);
- else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
- strlcpy(name, memtype_str[type], name_size);
- else
- snprintf(name, name_size, "unknown(%3d)", type);
- }
- EXPORT_SYMBOL(kgsl_get_memory_usage);
|