1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137 |
- /*
- * drivers/gpu/ion/ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
- #include <linux/atomic.h>
- #include <linux/device.h>
- #include <linux/err.h>
- #include <linux/file.h>
- #include <linux/freezer.h>
- #include <linux/fs.h>
- #include <linux/anon_inodes.h>
- #include <linux/ion.h>
- #include <linux/kthread.h>
- #include <linux/list.h>
- #include <linux/list_sort.h>
- #include <linux/memblock.h>
- #include <linux/miscdevice.h>
- #include <linux/export.h>
- #include <linux/mm.h>
- #include <linux/mm_types.h>
- #include <linux/rbtree.h>
- #include <linux/slab.h>
- #include <linux/seq_file.h>
- #include <linux/uaccess.h>
- #include <linux/debugfs.h>
- #include <linux/dma-buf.h>
- #include <linux/idr.h>
- #include <linux/msm_ion.h>
- #include <trace/events/kmem.h>
- #include "ion_priv.h"
- /**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
- struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
- };
- /**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
- struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- char *name;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
- };
- /**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
- struct ion_handle {
- struct kref ref;
- unsigned int user_ref_count;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
- };
- bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
- {
- return ((buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
- }
- bool ion_buffer_cached(struct ion_buffer *buffer)
- {
- return !!(buffer->flags & ION_FLAG_CACHED);
- }
- /* this function should only be called while dev->lock is held */
- static void ion_buffer_add(struct ion_device *dev,
- struct ion_buffer *buffer)
- {
- struct rb_node **p = &dev->buffers.rb_node;
- struct rb_node *parent = NULL;
- struct ion_buffer *entry;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_buffer, node);
- if (buffer < entry) {
- p = &(*p)->rb_left;
- } else if (buffer > entry) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: buffer already found.", __func__);
- BUG();
- }
- }
- rb_link_node(&buffer->node, parent, p);
- rb_insert_color(&buffer->node, &dev->buffers);
- }
- static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
- /* this function should only be called while dev->lock is held */
- static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
- struct ion_device *dev,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
- {
- struct ion_buffer *buffer;
- struct sg_table *table;
- struct scatterlist *sg;
- int i, ret;
- buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
- buffer->heap = heap;
- buffer->flags = flags;
- kref_init(&buffer->ref);
- ret = heap->ops->allocate(heap, buffer, len, align, flags);
- if (ret) {
- if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
- goto err2;
- ion_heap_freelist_drain(heap, 0);
- ret = heap->ops->allocate(heap, buffer, len, align,
- flags);
- if (ret)
- goto err2;
- }
- buffer->dev = dev;
- buffer->size = len;
- table = heap->ops->map_dma(heap, buffer);
- if (IS_ERR_OR_NULL(table)) {
- heap->ops->free(buffer);
- kfree(buffer);
- return ERR_PTR(PTR_ERR(table));
- }
- buffer->sg_table = table;
- if (ion_buffer_fault_user_mappings(buffer)) {
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
- i) {
- if (sg_dma_len(sg) == PAGE_SIZE)
- continue;
- pr_err("%s: cached mappings that will be faulted in "
- "must have pagewise sg_lists\n", __func__);
- ret = -EINVAL;
- goto err;
- }
- ret = ion_buffer_alloc_dirty(buffer);
- if (ret)
- goto err;
- }
- buffer->dev = dev;
- buffer->size = len;
- INIT_LIST_HEAD(&buffer->vmas);
- mutex_init(&buffer->lock);
- /* this will set up dma addresses for the sglist -- it is not
- technically correct as per the dma api -- a specific
- device isn't really taking ownership here. However, in practice on
- our systems the only dma_address space is physical addresses.
- Additionally, we can't afford the overhead of invalidating every
- allocation via dma_map_sg. The implicit contract here is that
- memory comming from the heaps is ready for dma, ie if it has a
- cached mapping that mapping has been invalidated */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- if (sg_dma_address(sg) == 0)
- sg_dma_address(sg) = sg_phys(sg);
- }
- mutex_lock(&dev->buffer_lock);
- ion_buffer_add(dev, buffer);
- mutex_unlock(&dev->buffer_lock);
- return buffer;
- err:
- heap->ops->unmap_dma(heap, buffer);
- heap->ops->free(buffer);
- err2:
- kfree(buffer);
- return ERR_PTR(ret);
- }
- static void ion_delayed_unsecure(struct ion_buffer *buffer)
- {
- if (buffer->heap->ops->unsecure_buffer)
- buffer->heap->ops->unsecure_buffer(buffer, 1);
- }
- void ion_buffer_destroy(struct ion_buffer *buffer)
- {
- if (WARN_ON(buffer->kmap_cnt > 0))
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->heap->ops->unmap_dma(buffer->heap, buffer);
- ion_delayed_unsecure(buffer);
- buffer->heap->ops->free(buffer);
- if (buffer->flags & ION_FLAG_CACHED)
- kfree(buffer->dirty);
- kfree(buffer);
- }
- static void _ion_buffer_destroy(struct kref *kref)
- {
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
- struct ion_heap *heap = buffer->heap;
- struct ion_device *dev = buffer->dev;
- mutex_lock(&dev->buffer_lock);
- rb_erase(&buffer->node, &dev->buffers);
- mutex_unlock(&dev->buffer_lock);
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- ion_heap_freelist_add(heap, buffer);
- else
- ion_buffer_destroy(buffer);
- }
- static void ion_buffer_get(struct ion_buffer *buffer)
- {
- kref_get(&buffer->ref);
- }
- static int ion_buffer_put(struct ion_buffer *buffer)
- {
- return kref_put(&buffer->ref, _ion_buffer_destroy);
- }
- static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
- {
- mutex_lock(&buffer->lock);
- buffer->handle_count++;
- mutex_unlock(&buffer->lock);
- }
- static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
- {
- /*
- * when a buffer is removed from a handle, if it is not in
- * any other handles, copy the taskcomm and the pid of the
- * process it's being removed from into the buffer. At this
- * point there will be no way to track what processes this buffer is
- * being used by, it only exists as a dma_buf file descriptor.
- * The taskcomm and pid can provide a debug hint as to where this fd
- * is in the system
- */
- mutex_lock(&buffer->lock);
- buffer->handle_count--;
- BUG_ON(buffer->handle_count < 0);
- if (!buffer->handle_count) {
- struct task_struct *task;
- task = current->group_leader;
- get_task_comm(buffer->task_comm, task);
- buffer->pid = task_pid_nr(task);
- }
- mutex_unlock(&buffer->lock);
- }
- static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
- {
- struct ion_handle *handle;
- handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
- if (!handle)
- return ERR_PTR(-ENOMEM);
- kref_init(&handle->ref);
- rb_init_node(&handle->node);
- handle->client = client;
- ion_buffer_get(buffer);
- ion_buffer_add_to_handle(buffer);
- handle->buffer = buffer;
- return handle;
- }
- static void ion_handle_kmap_put(struct ion_handle *);
- static void ion_handle_destroy(struct kref *kref)
- {
- struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- struct ion_client *client = handle->client;
- struct ion_buffer *buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- while (handle->kmap_cnt)
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
- idr_remove(&client->idr, handle->id);
- if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &client->handles);
- ion_buffer_remove_from_handle(buffer);
- ion_buffer_put(buffer);
- kfree(handle);
- }
- struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
- {
- return handle->buffer;
- }
- static void ion_handle_get(struct ion_handle *handle)
- {
- kref_get(&handle->ref);
- }
- /* Must hold the client lock */
- static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
- {
- if (atomic_read(&handle->ref.refcount) + 1 == 0)
- return ERR_PTR(-EOVERFLOW);
- ion_handle_get(handle);
- return handle;
- }
- static int ion_handle_put_nolock(struct ion_handle *handle)
- {
- int ret;
- ret = kref_put(&handle->ref, ion_handle_destroy);
- return ret;
- }
- int ion_handle_put(struct ion_handle *handle)
- {
- struct ion_client *client = handle->client;
- int ret;
- mutex_lock(&client->lock);
- ret = ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- return ret;
- }
- /* Must hold the client lock */
- static void user_ion_handle_get(struct ion_handle *handle)
- {
- if (handle->user_ref_count++ == 0)
- kref_get(&handle->ref);
- }
- /* Must hold the client lock */
- static struct ion_handle *user_ion_handle_get_check_overflow(
- struct ion_handle *handle)
- {
- if (handle->user_ref_count + 1 == 0)
- return ERR_PTR(-EOVERFLOW);
- user_ion_handle_get(handle);
- return handle;
- }
- /* passes a kref to the user ref count.
- * We know we're holding a kref to the object before and
- * after this call, so no need to reverify handle. */
- static struct ion_handle *pass_to_user(struct ion_handle *handle)
- {
- struct ion_client *client = handle->client;
- struct ion_handle *ret;
- mutex_lock(&client->lock);
- ret = user_ion_handle_get_check_overflow(handle);
- ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- return ret;
- }
- /* Must hold the client lock */
- static int user_ion_handle_put_nolock(struct ion_handle *handle)
- {
- int ret = 0;
- if (--handle->user_ref_count == 0)
- ret = ion_handle_put_nolock(handle);
- return ret;
- }
- static struct ion_handle *ion_handle_lookup(struct ion_client *client,
- struct ion_buffer *buffer)
- {
- struct rb_node *n;
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- if (handle->buffer == buffer)
- return handle;
- }
- return NULL;
- }
- static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
- {
- struct ion_handle *handle;
- handle = idr_find(&client->idr, id);
- if (handle)
- return ion_handle_get_check_overflow(handle);
- return ERR_PTR(-EINVAL);
- }
- struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
- {
- struct ion_handle *handle;
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, id);
- mutex_unlock(&client->lock);
- return handle;
- }
- static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
- {
- WARN_ON(!mutex_is_locked(&client->lock));
- return (idr_find(&client->idr, handle->id) == handle);
- }
- static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
- {
- int rc;
- struct rb_node **p = &client->handles.rb_node;
- struct rb_node *parent = NULL;
- struct ion_handle *entry;
- do {
- int id;
- rc = idr_pre_get(&client->idr, GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
- rc = idr_get_new_above(&client->idr, handle, 1, &id);
- handle->id = id;
- } while (rc == -EAGAIN);
- if (rc < 0)
- return rc;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_handle, node);
- if (handle < entry)
- p = &(*p)->rb_left;
- else if (handle > entry)
- p = &(*p)->rb_right;
- else
- WARN(1, "%s: buffer already found.", __func__);
- }
- rb_link_node(&handle->node, parent, p);
- rb_insert_color(&handle->node, &client->handles);
- return 0;
- }
- struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags, bool grab_handle)
- {
- struct ion_handle *handle;
- struct ion_device *dev = client->dev;
- struct ion_buffer *buffer = NULL;
- struct ion_heap *heap;
- int ret;
- unsigned long secure_allocation = flags & ION_FLAG_SECURE;
- const unsigned int MAX_DBG_STR_LEN = 64;
- char dbg_str[MAX_DBG_STR_LEN];
- unsigned int dbg_str_idx = 0;
- dbg_str[0] = '\0';
- /*
- * For now, we don't want to fault in pages individually since
- * clients are already doing manual cache maintenance. In
- * other words, the implicit caching infrastructure is in
- * place (in code) but should not be used.
- */
- flags |= ION_FLAG_CACHED_NEEDS_SYNC;
- pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
- len, align, heap_id_mask, flags);
- /*
- * traverse the list of heaps available in this system in priority
- * order. If the heap type is supported by the client, and matches the
- * request of the caller allocate from it. Repeat until allocate has
- * succeeded or all heaps have been tried
- */
- if (WARN_ON(!len))
- return ERR_PTR(-EINVAL);
- len = PAGE_ALIGN(len);
- down_read(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- /* if the caller didn't specify this heap id */
- if (!((1 << heap->id) & heap_id_mask))
- continue;
- /* Do not allow un-secure heap if secure is specified */
- if (secure_allocation &&
- !ion_heap_allow_secure_allocation(heap->type))
- continue;
- trace_ion_alloc_buffer_start(client->name, heap->name, len,
- heap_id_mask, flags);
- buffer = ion_buffer_create(heap, dev, len, align, flags);
- trace_ion_alloc_buffer_end(client->name, heap->name, len,
- heap_id_mask, flags);
- if (!IS_ERR_OR_NULL(buffer))
- break;
- trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
- heap_id_mask, flags,
- PTR_ERR(buffer));
- if (dbg_str_idx < MAX_DBG_STR_LEN) {
- unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
- int ret_value = snprintf(&dbg_str[dbg_str_idx],
- len_left, "%s ", heap->name);
- if (ret_value >= len_left) {
- /* overflow */
- dbg_str[MAX_DBG_STR_LEN-1] = '\0';
- dbg_str_idx = MAX_DBG_STR_LEN;
- } else if (ret_value >= 0) {
- dbg_str_idx += ret_value;
- } else {
- /* error */
- dbg_str[MAX_DBG_STR_LEN-1] = '\0';
- }
- }
- }
- up_read(&dev->lock);
- if (buffer == NULL) {
- trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
- heap_id_mask, flags, -ENODEV);
- return ERR_PTR(-ENODEV);
- }
- if (IS_ERR(buffer)) {
- trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
- heap_id_mask, flags,
- PTR_ERR(buffer));
- pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
- "0x%x) from heap(s) %sfor client %s\n",
- len, align, dbg_str, client->name);
- return ERR_PTR(PTR_ERR(buffer));
- }
- handle = ion_handle_create(client, buffer);
- /*
- * ion_buffer_create will create a buffer with a ref_cnt of 1,
- * and ion_handle_create will take a second reference, drop one here
- */
- ion_buffer_put(buffer);
- if (IS_ERR(handle))
- return handle;
- mutex_lock(&client->lock);
- if (grab_handle)
- ion_handle_get(handle);
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
- return handle;
- }
- struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags)
- {
- return __ion_alloc(client, len, align, heap_id_mask, flags, false);
- }
- EXPORT_SYMBOL(ion_alloc);
- static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
- {
- bool valid_handle;
- BUG_ON(client != handle->client);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to free.\n", __func__);
- return;
- }
- ion_handle_put_nolock(handle);
- }
- static void user_ion_free_nolock(struct ion_client *client,
- struct ion_handle *handle)
- {
- bool valid_handle;
- BUG_ON(client != handle->client);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to free.\n", __func__);
- return;
- }
- if (handle->user_ref_count == 0) {
- WARN(1, "%s: User does not have access!\n", __func__);
- return;
- }
- user_ion_handle_put_nolock(handle);
- }
- void ion_free(struct ion_client *client, struct ion_handle *handle)
- {
- BUG_ON(client != handle->client);
- mutex_lock(&client->lock);
- ion_free_nolock(client, handle);
- mutex_unlock(&client->lock);
- }
- EXPORT_SYMBOL(ion_free);
- int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len)
- {
- struct ion_buffer *buffer;
- int ret;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- buffer = handle->buffer;
- if (!buffer->heap->ops->phys) {
- pr_err("%s: ion_phys is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -ENODEV;
- }
- mutex_unlock(&client->lock);
- ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
- return ret;
- }
- EXPORT_SYMBOL(ion_phys);
- static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
- {
- void *vaddr;
- if (buffer->kmap_cnt) {
- buffer->kmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
- if (IS_ERR_OR_NULL(vaddr))
- return vaddr;
- buffer->vaddr = vaddr;
- buffer->kmap_cnt++;
- return vaddr;
- }
- static void *ion_handle_kmap_get(struct ion_handle *handle)
- {
- struct ion_buffer *buffer = handle->buffer;
- void *vaddr;
- if (handle->kmap_cnt) {
- handle->kmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR_OR_NULL(vaddr))
- return vaddr;
- handle->kmap_cnt++;
- return vaddr;
- }
- static void ion_buffer_kmap_put(struct ion_buffer *buffer)
- {
- buffer->kmap_cnt--;
- if (!buffer->kmap_cnt) {
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->vaddr = NULL;
- }
- }
- static void ion_handle_kmap_put(struct ion_handle *handle)
- {
- struct ion_buffer *buffer = handle->buffer;
- handle->kmap_cnt--;
- if (!handle->kmap_cnt)
- ion_buffer_kmap_put(buffer);
- }
- void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
- {
- struct ion_buffer *buffer;
- void *vaddr;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- if (!handle->buffer->heap->ops->map_kernel) {
- pr_err("%s: map_kernel is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-ENODEV);
- }
- mutex_lock(&buffer->lock);
- vaddr = ion_handle_kmap_get(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return vaddr;
- }
- EXPORT_SYMBOL(ion_map_kernel);
- void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
- {
- struct ion_buffer *buffer;
- mutex_lock(&client->lock);
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- }
- EXPORT_SYMBOL(ion_unmap_kernel);
- static int ion_debug_client_show(struct seq_file *s, void *unused)
- {
- struct ion_client *client = s->private;
- struct rb_node *n;
- seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
- "heap_name", "size_in_bytes", "handle refcount",
- "buffer");
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- seq_printf(s, "%16.16s: %16x : %16d : %12pK",
- handle->buffer->heap->name,
- handle->buffer->size,
- atomic_read(&handle->ref.refcount),
- handle->buffer);
- seq_printf(s, "\n");
- }
- mutex_unlock(&client->lock);
- return 0;
- }
- static int ion_debug_client_open(struct inode *inode, struct file *file)
- {
- return single_open(file, ion_debug_client_show, inode->i_private);
- }
- static const struct file_operations debug_client_fops = {
- .open = ion_debug_client_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- static bool startswith(const char *string, const char *prefix)
- {
- size_t l1 = strlen(string);
- size_t l2 = strlen(prefix);
- return strncmp(string, prefix, min(l1, l2)) == 0;
- }
- static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
- {
- int serial = -1;
- struct rb_node *node;
- for (node = rb_first(root); node; node = rb_next(node)) {
- int n;
- char *serial_string;
- struct ion_client *client = rb_entry(node, struct ion_client,
- node);
- if (!startswith(client->name, name))
- continue;
- serial_string = strrchr(client->name, '-');
- if (!serial_string)
- continue;
- serial_string++;
- sscanf(serial_string, "%d", &n);
- serial = max(serial, n);
- }
- return serial + 1;
- }
- struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name)
- {
- struct ion_client *client;
- struct task_struct *task;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct ion_client *entry;
- pid_t pid;
- int name_len;
- int client_serial;
- if (!name) {
- pr_err("%s: Name cannot be null\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- name_len = strnlen(name, 64);
- /* add some space to accommodate the serial number suffix */
- name_len = min(64, name_len + 11);
- get_task_struct(current->group_leader);
- task_lock(current->group_leader);
- pid = task_pid_nr(current->group_leader);
- /* don't bother to store task struct for kernel threads,
- they can't be killed anyway */
- if (current->group_leader->flags & PF_KTHREAD) {
- put_task_struct(current->group_leader);
- task = NULL;
- } else {
- task = current->group_leader;
- }
- task_unlock(current->group_leader);
- client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
- if (!client) {
- if (task)
- put_task_struct(current->group_leader);
- return ERR_PTR(-ENOMEM);
- }
- client->dev = dev;
- client->handles = RB_ROOT;
- idr_init(&client->idr);
- mutex_init(&client->lock);
- client->name = kzalloc(name_len+1, GFP_KERNEL);
- if (!client->name) {
- put_task_struct(current->group_leader);
- kfree(client);
- return ERR_PTR(-ENOMEM);
- }
- client->task = task;
- client->pid = pid;
- down_write(&dev->lock);
- client_serial = ion_get_client_serial(&dev->clients, name);
- snprintf(client->name, name_len, "%s-%d", name, client_serial);
- p = &dev->clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->clients);
- client->debug_root = debugfs_create_file(client->name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
- if (!client->debug_root) {
- char buf[256], *path;
- path = dentry_path(dev->clients_debug_root, buf, 256);
- pr_err("Failed to created client debugfs at %s/%s\n",
- path, client->name);
- }
- up_write(&dev->lock);
- return client;
- }
- EXPORT_SYMBOL(ion_client_create);
- void ion_client_destroy(struct ion_client *client)
- {
- struct ion_device *dev = client->dev;
- struct rb_node *n;
- pr_debug("%s: %d\n", __func__, __LINE__);
- while ((n = rb_first(&client->handles))) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- ion_handle_destroy(&handle->ref);
- }
- idr_remove_all(&client->idr);
- idr_destroy(&client->idr);
- down_write(&dev->lock);
- if (client->task)
- put_task_struct(client->task);
- rb_erase(&client->node, &dev->clients);
- debugfs_remove_recursive(client->debug_root);
- up_write(&dev->lock);
- kfree(client->name);
- kfree(client);
- }
- EXPORT_SYMBOL(ion_client_destroy);
- int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
- unsigned long *flags)
- {
- struct ion_buffer *buffer;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to %s.\n",
- __func__, __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- *flags = buffer->flags;
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return 0;
- }
- EXPORT_SYMBOL(ion_handle_get_flags);
- int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
- unsigned long *size)
- {
- struct ion_buffer *buffer;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to %s.\n",
- __func__, __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- *size = buffer->size;
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return 0;
- }
- EXPORT_SYMBOL(ion_handle_get_size);
- struct sg_table *ion_sg_table(struct ion_client *client,
- struct ion_handle *handle)
- {
- struct ion_buffer *buffer;
- struct sg_table *table;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_dma.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- table = buffer->sg_table;
- mutex_unlock(&client->lock);
- return table;
- }
- EXPORT_SYMBOL(ion_sg_table);
- struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
- size_t chunk_size, size_t total_size)
- {
- struct sg_table *table;
- int i, n_chunks, ret;
- struct scatterlist *sg;
- table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return ERR_PTR(-ENOMEM);
- n_chunks = DIV_ROUND_UP(total_size, chunk_size);
- pr_debug("creating sg_table with %d chunks\n", n_chunks);
- ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
- if (ret)
- goto err0;
- for_each_sg(table->sgl, sg, table->nents, i) {
- dma_addr_t addr = buffer_base + i * chunk_size;
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = chunk_size;
- }
- return table;
- err0:
- kfree(table);
- return ERR_PTR(ret);
- }
- static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction direction);
- static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
- {
- struct dma_buf *dmabuf = attachment->dmabuf;
- struct ion_buffer *buffer = dmabuf->priv;
- ion_buffer_sync_for_device(buffer, attachment->dev, direction);
- return buffer->sg_table;
- }
- static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *table,
- enum dma_data_direction direction)
- {
- }
- static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
- {
- unsigned long pages = buffer->sg_table->nents;
- unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
- buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
- if (!buffer->dirty)
- return -ENOMEM;
- return 0;
- }
- struct ion_vma_list {
- struct list_head list;
- struct vm_area_struct *vma;
- };
- static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction dir)
- {
- struct scatterlist *sg;
- int i;
- struct ion_vma_list *vma_list;
- pr_debug("%s: syncing for device %s\n", __func__,
- dev ? dev_name(dev) : "null");
- if (!ion_buffer_fault_user_mappings(buffer))
- return;
- mutex_lock(&buffer->lock);
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- if (!test_bit(i, buffer->dirty))
- continue;
- dma_sync_sg_for_device(dev, sg, 1, dir);
- clear_bit(i, buffer->dirty);
- }
- list_for_each_entry(vma_list, &buffer->vmas, list) {
- struct vm_area_struct *vma = vma_list->vma;
- zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
- NULL);
- }
- mutex_unlock(&buffer->lock);
- }
- int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- {
- struct ion_buffer *buffer = vma->vm_private_data;
- struct scatterlist *sg;
- int i;
- mutex_lock(&buffer->lock);
- set_bit(vmf->pgoff, buffer->dirty);
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- if (i != vmf->pgoff)
- continue;
- dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
- vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- sg_page(sg));
- break;
- }
- mutex_unlock(&buffer->lock);
- return VM_FAULT_NOPAGE;
- }
- static void ion_vm_open(struct vm_area_struct *vma)
- {
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list;
- vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
- if (!vma_list)
- return;
- vma_list->vma = vma;
- mutex_lock(&buffer->lock);
- list_add(&vma_list->list, &buffer->vmas);
- mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %pK\n", __func__, vma);
- }
- static void ion_vm_close(struct vm_area_struct *vma)
- {
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list, *tmp;
- pr_debug("%s\n", __func__);
- mutex_lock(&buffer->lock);
- list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
- if (vma_list->vma != vma)
- continue;
- list_del(&vma_list->list);
- kfree(vma_list);
- pr_debug("%s: deleting %pK\n", __func__, vma);
- break;
- }
- mutex_unlock(&buffer->lock);
- if (buffer->heap->ops->unmap_user)
- buffer->heap->ops->unmap_user(buffer->heap, buffer);
- }
- struct vm_operations_struct ion_vma_ops = {
- .open = ion_vm_open,
- .close = ion_vm_close,
- .fault = ion_vm_fault,
- };
- static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
- {
- struct ion_buffer *buffer = dmabuf->priv;
- int ret = 0;
- if (!buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping "
- "to userspace\n", __func__);
- return -EINVAL;
- }
- if (ion_buffer_fault_user_mappings(buffer)) {
- vma->vm_private_data = buffer;
- vma->vm_ops = &ion_vma_ops;
- vma->vm_flags |= VM_MIXEDMAP;
- ion_vm_open(vma);
- return 0;
- }
- if (!(buffer->flags & ION_FLAG_CACHED))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- mutex_lock(&buffer->lock);
- /* now map it to userspace */
- ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
- mutex_unlock(&buffer->lock);
- if (ret)
- pr_err("%s: failure mapping buffer to userspace\n",
- __func__);
- return ret;
- }
- static void ion_dma_buf_release(struct dma_buf *dmabuf)
- {
- struct ion_buffer *buffer = dmabuf->priv;
- ion_buffer_put(buffer);
- }
- static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
- {
- struct ion_buffer *buffer = dmabuf->priv;
- void *vaddr;
- if (!buffer->heap->ops->map_kernel) {
- pr_err("%s: map kernel is not implemented by this heap.\n",
- __func__);
- return ERR_PTR(-ENOTTY);
- }
- mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
- mutex_unlock(&buffer->lock);
- if (IS_ERR(vaddr))
- return vaddr;
- if (!vaddr)
- return ERR_PTR(-ENOMEM);
- return vaddr + offset * PAGE_SIZE;
- }
- static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
- void *ptr)
- {
- struct ion_buffer *buffer = dmabuf->priv;
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
- mutex_unlock(&buffer->lock);
- }
- }
- static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
- enum dma_data_direction direction)
- {
- return 0;
- }
- static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
- enum dma_data_direction direction)
- {
- }
- struct dma_buf_ops dma_buf_ops = {
- .map_dma_buf = ion_map_dma_buf,
- .unmap_dma_buf = ion_unmap_dma_buf,
- .mmap = ion_mmap,
- .release = ion_dma_buf_release,
- .begin_cpu_access = ion_dma_buf_begin_cpu_access,
- .end_cpu_access = ion_dma_buf_end_cpu_access,
- .kmap_atomic = ion_dma_buf_kmap,
- .kunmap_atomic = ion_dma_buf_kunmap,
- .kmap = ion_dma_buf_kmap,
- .kunmap = ion_dma_buf_kunmap,
- };
- static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle,
- bool lock_client)
- {
- struct ion_buffer *buffer;
- struct dma_buf *dmabuf;
- bool valid_handle;
- if (lock_client)
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to share.\n", __func__);
- if (lock_client)
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- ion_buffer_get(buffer);
- if (lock_client)
- mutex_unlock(&client->lock);
- dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
- if (IS_ERR(dmabuf)) {
- ion_buffer_put(buffer);
- return dmabuf;
- }
- return dmabuf;
- }
- struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
- {
- return __ion_share_dma_buf(client, handle, true);
- }
- EXPORT_SYMBOL(ion_share_dma_buf);
- static int __ion_share_dma_buf_fd(struct ion_client *client,
- struct ion_handle *handle, bool lock_client)
- {
- struct dma_buf *dmabuf;
- int fd;
- dmabuf = __ion_share_dma_buf(client, handle, lock_client);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
- fd = dma_buf_fd(dmabuf, O_CLOEXEC);
- if (fd < 0)
- dma_buf_put(dmabuf);
- return fd;
- }
- int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
- {
- return __ion_share_dma_buf_fd(client, handle, true);
- }
- EXPORT_SYMBOL(ion_share_dma_buf_fd);
- static int ion_share_dma_buf_fd_nolock(struct ion_client *client,
- struct ion_handle *handle)
- {
- return __ion_share_dma_buf_fd(client, handle, false);
- }
- struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
- {
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
- struct ion_handle *handle;
- int ret;
- dmabuf = dma_buf_get(fd);
- if (IS_ERR_OR_NULL(dmabuf))
- return ERR_PTR(PTR_ERR(dmabuf));
- /* if this memory came from ion */
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not import dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return ERR_PTR(-EINVAL);
- }
- buffer = dmabuf->priv;
- mutex_lock(&client->lock);
- /* if a handle exists for this buffer just take a reference to it */
- handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR_OR_NULL(handle)) {
- handle = ion_handle_get_check_overflow(handle);
- mutex_unlock(&client->lock);
- goto end;
- }
- mutex_unlock(&client->lock);
- handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
- goto end;
- mutex_lock(&client->lock);
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
- end:
- dma_buf_put(dmabuf);
- return handle;
- }
- EXPORT_SYMBOL(ion_import_dma_buf);
- static int ion_sync_for_device(struct ion_client *client, int fd)
- {
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
- dmabuf = dma_buf_get(fd);
- if (IS_ERR_OR_NULL(dmabuf))
- return PTR_ERR(dmabuf);
- /* if this memory came from ion */
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not sync dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return -EINVAL;
- }
- buffer = dmabuf->priv;
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
- dma_buf_put(dmabuf);
- return 0;
- }
- static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- struct ion_client *client = filp->private_data;
- switch (cmd) {
- case ION_IOC_ALLOC:
- {
- struct ion_allocation_data data;
- struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
- handle = __ion_alloc(client, data.len, data.align,
- data.heap_mask, data.flags, true);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- data.handle = (ion_user_handle_t)handle->id;
- pass_to_user(handle);
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- mutex_lock(&client->lock);
- user_ion_free_nolock(client, handle);
- mutex_unlock(&client->lock);
- ion_handle_put(handle);
- return -EFAULT;
- }
- ion_handle_put(handle);
- break;
- }
- case ION_IOC_FREE:
- {
- struct ion_handle_data data;
- struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_handle_data)))
- return -EFAULT;
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, (int)data.handle);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- return PTR_ERR(handle);
- }
- user_ion_free_nolock(client, handle);
- ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- break;
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- {
- struct ion_fd_data data;
- struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, (int)data.handle);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- return PTR_ERR(handle);
- }
- data.fd = ion_share_dma_buf_fd_nolock(client, handle);
- ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
- return -EFAULT;
- if (data.fd < 0)
- return data.fd;
- break;
- }
- case ION_IOC_IMPORT:
- {
- struct ion_fd_data data;
- struct ion_handle *handle;
- int ret = 0;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- handle = ion_import_dma_buf(client, data.fd);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- } else {
- data.handle = (ion_user_handle_t)handle->id;
- handle = pass_to_user(handle);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- data.handle = 0;
- }
- }
- if (copy_to_user((void __user *)arg, &data,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- if (ret < 0)
- return ret;
- break;
- }
- case ION_IOC_SYNC:
- {
- struct ion_fd_data data;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- ion_sync_for_device(client, data.fd);
- break;
- }
- case ION_IOC_CUSTOM:
- {
- struct ion_device *dev = client->dev;
- struct ion_custom_data data;
- if (!dev->custom_ioctl)
- return -ENOTTY;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_custom_data)))
- return -EFAULT;
- return dev->custom_ioctl(client, data.cmd, data.arg);
- }
- case ION_IOC_CLEAN_CACHES:
- return client->dev->custom_ioctl(client,
- ION_IOC_CLEAN_CACHES, arg);
- case ION_IOC_INV_CACHES:
- return client->dev->custom_ioctl(client,
- ION_IOC_INV_CACHES, arg);
- case ION_IOC_CLEAN_INV_CACHES:
- return client->dev->custom_ioctl(client,
- ION_IOC_CLEAN_INV_CACHES, arg);
- default:
- return -ENOTTY;
- }
- return 0;
- }
- static int ion_release(struct inode *inode, struct file *file)
- {
- struct ion_client *client = file->private_data;
- pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_destroy(client);
- return 0;
- }
- static int ion_open(struct inode *inode, struct file *file)
- {
- struct miscdevice *miscdev = file->private_data;
- struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
- struct ion_client *client;
- char debug_name[64];
- pr_debug("%s: %d\n", __func__, __LINE__);
- snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
- client = ion_client_create(dev, debug_name);
- if (IS_ERR_OR_NULL(client))
- return PTR_ERR(client);
- file->private_data = client;
- return 0;
- }
- static const struct file_operations ion_fops = {
- .owner = THIS_MODULE,
- .open = ion_open,
- .release = ion_release,
- .unlocked_ioctl = ion_ioctl,
- };
- static size_t ion_debug_heap_total(struct ion_client *client,
- unsigned int id)
- {
- size_t size = 0;
- struct rb_node *n;
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n,
- struct ion_handle,
- node);
- if (handle->buffer->heap->id == id)
- size += handle->buffer->size;
- }
- mutex_unlock(&client->lock);
- return size;
- }
- /**
- * Create a mem_map of the heap.
- * @param s seq_file to log error message to.
- * @param heap The heap to create mem_map for.
- * @param mem_map The mem map to be created.
- */
- void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
- struct list_head *mem_map)
- {
- struct ion_device *dev = heap->dev;
- struct rb_node *cnode;
- size_t size;
- struct ion_client *client;
- if (!heap->ops->phys)
- return;
- down_read(&dev->lock);
- for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
- struct rb_node *hnode;
- client = rb_entry(cnode, struct ion_client, node);
- mutex_lock(&client->lock);
- for (hnode = rb_first(&client->handles);
- hnode;
- hnode = rb_next(hnode)) {
- struct ion_handle *handle = rb_entry(
- hnode, struct ion_handle, node);
- if (handle->buffer->heap == heap) {
- struct mem_map_data *data =
- kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- goto inner_error;
- heap->ops->phys(heap, handle->buffer,
- &(data->addr), &size);
- data->size = (unsigned long) size;
- data->addr_end = data->addr + data->size - 1;
- data->client_name = kstrdup(client->name,
- GFP_KERNEL);
- if (!data->client_name) {
- kfree(data);
- goto inner_error;
- }
- list_add(&data->node, mem_map);
- }
- }
- mutex_unlock(&client->lock);
- }
- up_read(&dev->lock);
- return;
- inner_error:
- seq_puts(s,
- "ERROR: out of memory. Part of memory map will not be logged\n");
- mutex_unlock(&client->lock);
- up_read(&dev->lock);
- }
- /**
- * Free the memory allocated by ion_debug_mem_map_create
- * @param mem_map The mem map to free.
- */
- static void ion_debug_mem_map_destroy(struct list_head *mem_map)
- {
- if (mem_map) {
- struct mem_map_data *data, *tmp;
- list_for_each_entry_safe(data, tmp, mem_map, node) {
- list_del(&data->node);
- kfree(data->client_name);
- kfree(data);
- }
- }
- }
- static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
- {
- struct mem_map_data *d1, *d2;
- d1 = list_entry(a, struct mem_map_data, node);
- d2 = list_entry(b, struct mem_map_data, node);
- if (d1->addr == d2->addr)
- return d1->size - d2->size;
- return d1->addr - d2->addr;
- }
- /**
- * Print heap debug information.
- * @param s seq_file to log message to.
- * @param heap pointer to heap that we will print debug information for.
- */
- static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
- {
- if (heap->ops->print_debug) {
- struct list_head mem_map = LIST_HEAD_INIT(mem_map);
- ion_debug_mem_map_create(s, heap, &mem_map);
- list_sort(NULL, &mem_map, mem_map_cmp);
- heap->ops->print_debug(heap, s, &mem_map);
- ion_debug_mem_map_destroy(&mem_map);
- }
- }
- static int ion_debug_heap_show(struct seq_file *s, void *unused)
- {
- struct ion_heap *heap = s->private;
- struct ion_device *dev = heap->dev;
- struct rb_node *n;
- size_t total_size = 0;
- size_t total_orphaned_size = 0;
- seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
- seq_printf(s, "----------------------------------------------------\n");
- down_read(&dev->lock);
- for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
- size_t size = ion_debug_heap_total(client, heap->id);
- if (!size)
- continue;
- if (client->task) {
- char task_comm[TASK_COMM_LEN];
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16.s %16u %16u\n", task_comm,
- client->pid, size);
- } else {
- seq_printf(s, "%16.s %16u %16u\n", client->name,
- client->pid, size);
- }
- }
- up_read(&dev->lock);
- seq_printf(s, "----------------------------------------------------\n");
- seq_printf(s, "orphaned allocations (info is from last known client):"
- "\n");
- mutex_lock(&dev->buffer_lock);
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
- node);
- if (buffer->heap->id != heap->id)
- continue;
- total_size += buffer->size;
- if (!buffer->handle_count) {
- seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
- buffer->pid, buffer->size, buffer->kmap_cnt,
- atomic_read(&buffer->ref.refcount));
- total_orphaned_size += buffer->size;
- }
- }
- mutex_unlock(&dev->buffer_lock);
- seq_printf(s, "----------------------------------------------------\n");
- seq_printf(s, "%16.s %16u\n", "total orphaned",
- total_orphaned_size);
- seq_printf(s, "%16.s %16u\n", "total ", total_size);
- seq_printf(s, "----------------------------------------------------\n");
- if (heap->debug_show)
- heap->debug_show(heap, s, unused);
- ion_heap_print_debug(s, heap);
- return 0;
- }
- static int ion_debug_heap_open(struct inode *inode, struct file *file)
- {
- return single_open(file, ion_debug_heap_show, inode->i_private);
- }
- static const struct file_operations debug_heap_fops = {
- .open = ion_debug_heap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- #ifdef DEBUG_HEAP_SHRINKER
- static int debug_shrink_set(void *data, u64 val)
- {
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
- if (!val)
- return 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- sc.nr_to_scan = objs;
- heap->shrinker.shrink(&heap->shrinker, &sc);
- return 0;
- }
- static int debug_shrink_get(void *data, u64 *val)
- {
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- *val = objs;
- return 0;
- }
- DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
- debug_shrink_set, "%llu\n");
- #endif
- void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
- {
- struct dentry *debug_file;
- if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
- !heap->ops->unmap_dma)
- pr_err("%s: can not add heap with invalid ops struct.\n",
- __func__);
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- ion_heap_init_deferred_free(heap);
- heap->dev = dev;
- down_write(&dev->lock);
- /* use negative heap->id to reverse the priority -- when traversing
- the list later attempt higher id numbers first */
- plist_node_init(&heap->node, -heap->id);
- plist_add(&heap->node, &dev->heaps);
- debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
- if (!debug_file) {
- char buf[256], *path;
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to created heap debugfs at %s/%s\n",
- path, heap->name);
- }
- #ifdef DEBUG_HEAP_SHRINKER
- if (heap->shrinker.shrink) {
- char debug_name[64];
- snprintf(debug_name, 64, "%s_shrink", heap->name);
- debug_file = debugfs_create_file(
- debug_name, 0644, dev->heaps_debug_root, heap,
- &debug_shrink_fops);
- if (!debug_file) {
- char buf[256], *path;
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to created heap shrinker debugfs at %s/%s\n",
- path, debug_name);
- }
- }
- #endif
- up_write(&dev->lock);
- }
- int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
- int version, void *data, int flags)
- {
- int ret = -EINVAL;
- struct ion_heap *heap;
- struct ion_buffer *buffer;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- WARN(1, "%s: invalid handle passed to secure.\n", __func__);
- goto out_unlock;
- }
- buffer = handle->buffer;
- heap = buffer->heap;
- if (!ion_heap_allow_handle_secure(heap->type)) {
- pr_err("%s: cannot secure buffer from non secure heap\n",
- __func__);
- goto out_unlock;
- }
- BUG_ON(!buffer->heap->ops->secure_buffer);
- /*
- * Protect the handle via the client lock to ensure we aren't
- * racing with free
- */
- ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
- out_unlock:
- mutex_unlock(&client->lock);
- return ret;
- }
- int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
- {
- int ret = -EINVAL;
- struct ion_heap *heap;
- struct ion_buffer *buffer;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- WARN(1, "%s: invalid handle passed to secure.\n", __func__);
- goto out_unlock;
- }
- buffer = handle->buffer;
- heap = buffer->heap;
- if (!ion_heap_allow_handle_secure(heap->type)) {
- pr_err("%s: cannot secure buffer from non secure heap\n",
- __func__);
- goto out_unlock;
- }
- BUG_ON(!buffer->heap->ops->unsecure_buffer);
- /*
- * Protect the handle via the client lock to ensure we aren't
- * racing with free
- */
- ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
- out_unlock:
- mutex_unlock(&client->lock);
- return ret;
- }
- int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
- void *data)
- {
- int ret_val = 0;
- struct ion_heap *heap;
- /*
- * traverse the list of heaps available in this system
- * and find the heap that is specified.
- */
- down_write(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- if (!ion_heap_allow_heap_secure(heap->type))
- continue;
- if (ION_HEAP(heap->id) != heap_id)
- continue;
- if (heap->ops->secure_heap)
- ret_val = heap->ops->secure_heap(heap, version, data);
- else
- ret_val = -EINVAL;
- break;
- }
- up_write(&dev->lock);
- return ret_val;
- }
- EXPORT_SYMBOL(ion_secure_heap);
- int ion_walk_heaps(struct ion_client *client, int heap_id, void *data,
- int (*f)(struct ion_heap *heap, void *data))
- {
- int ret_val = -EINVAL;
- struct ion_heap *heap;
- struct ion_device *dev = client->dev;
- /*
- * traverse the list of heaps available in this system
- * and find the heap that is specified.
- */
- down_write(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- if (ION_HEAP(heap->id) != heap_id)
- continue;
- ret_val = f(heap, data);
- break;
- }
- up_write(&dev->lock);
- return ret_val;
- }
- EXPORT_SYMBOL(ion_walk_heaps);
- int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
- void *data)
- {
- int ret_val = 0;
- struct ion_heap *heap;
- /*
- * traverse the list of heaps available in this system
- * and find the heap that is specified.
- */
- down_write(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- if (!ion_heap_allow_heap_secure(heap->type))
- continue;
- if (ION_HEAP(heap->id) != heap_id)
- continue;
- if (heap->ops->secure_heap)
- ret_val = heap->ops->unsecure_heap(heap, version, data);
- else
- ret_val = -EINVAL;
- break;
- }
- up_write(&dev->lock);
- return ret_val;
- }
- EXPORT_SYMBOL(ion_unsecure_heap);
- struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg))
- {
- struct ion_device *idev;
- int ret;
- idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
- if (!idev)
- return ERR_PTR(-ENOMEM);
- idev->dev.minor = MISC_DYNAMIC_MINOR;
- idev->dev.name = "ion";
- idev->dev.fops = &ion_fops;
- idev->dev.parent = NULL;
- ret = misc_register(&idev->dev);
- if (ret) {
- pr_err("ion: failed to register misc device.\n");
- kfree(idev);
- return ERR_PTR(ret);
- }
- idev->debug_root = debugfs_create_dir("ion", NULL);
- if (!idev->debug_root) {
- pr_err("ion: failed to create debugfs root directory.\n");
- goto debugfs_done;
- }
- idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
- if (!idev->heaps_debug_root) {
- pr_err("ion: failed to create debugfs heaps directory.\n");
- goto debugfs_done;
- }
- idev->clients_debug_root = debugfs_create_dir("clients",
- idev->debug_root);
- if (!idev->clients_debug_root)
- pr_err("ion: failed to create debugfs clients directory.\n");
- debugfs_done:
- idev->custom_ioctl = custom_ioctl;
- idev->buffers = RB_ROOT;
- mutex_init(&idev->buffer_lock);
- init_rwsem(&idev->lock);
- plist_head_init(&idev->heaps);
- idev->clients = RB_ROOT;
- return idev;
- }
- void ion_device_destroy(struct ion_device *dev)
- {
- misc_deregister(&dev->dev);
- debugfs_remove_recursive(dev->debug_root);
- /* XXX need to free the heaps and clients ? */
- kfree(dev);
- }
- void __init ion_reserve(struct ion_platform_data *data)
- {
- int i;
- for (i = 0; i < data->nr; i++) {
- if (data->heaps[i].size == 0)
- continue;
- if (data->heaps[i].base == 0) {
- phys_addr_t paddr;
- paddr = memblock_alloc_base(data->heaps[i].size,
- data->heaps[i].align,
- MEMBLOCK_ALLOC_ANYWHERE);
- if (!paddr) {
- pr_err("%s: error allocating memblock for "
- "heap %d\n",
- __func__, i);
- continue;
- }
- data->heaps[i].base = paddr;
- } else {
- int ret = memblock_reserve(data->heaps[i].base,
- data->heaps[i].size);
- if (ret)
- pr_err("memblock reserve of %x@%pa failed\n",
- data->heaps[i].size,
- &data->heaps[i].base);
- }
- pr_info("%s: %s reserved base %pa size %d\n", __func__,
- data->heaps[i].name,
- &data->heaps[i].base,
- data->heaps[i].size);
- }
- }
|