ion.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137
  1. /*
  2. * drivers/gpu/ion/ion.c
  3. *
  4. * Copyright (C) 2011 Google, Inc.
  5. * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/atomic.h>
  18. #include <linux/device.h>
  19. #include <linux/err.h>
  20. #include <linux/file.h>
  21. #include <linux/freezer.h>
  22. #include <linux/fs.h>
  23. #include <linux/anon_inodes.h>
  24. #include <linux/ion.h>
  25. #include <linux/kthread.h>
  26. #include <linux/list.h>
  27. #include <linux/list_sort.h>
  28. #include <linux/memblock.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/export.h>
  31. #include <linux/mm.h>
  32. #include <linux/mm_types.h>
  33. #include <linux/rbtree.h>
  34. #include <linux/slab.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/uaccess.h>
  37. #include <linux/debugfs.h>
  38. #include <linux/dma-buf.h>
  39. #include <linux/idr.h>
  40. #include <linux/msm_ion.h>
  41. #include <trace/events/kmem.h>
  42. #include "ion_priv.h"
  43. /**
  44. * struct ion_device - the metadata of the ion device node
  45. * @dev: the actual misc device
  46. * @buffers: an rb tree of all the existing buffers
  47. * @buffer_lock: lock protecting the tree of buffers
  48. * @lock: rwsem protecting the tree of heaps and clients
  49. * @heaps: list of all the heaps in the system
  50. * @user_clients: list of all the clients created from userspace
  51. */
  52. struct ion_device {
  53. struct miscdevice dev;
  54. struct rb_root buffers;
  55. struct mutex buffer_lock;
  56. struct rw_semaphore lock;
  57. struct plist_head heaps;
  58. long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
  59. unsigned long arg);
  60. struct rb_root clients;
  61. struct dentry *debug_root;
  62. struct dentry *heaps_debug_root;
  63. struct dentry *clients_debug_root;
  64. };
  65. /**
  66. * struct ion_client - a process/hw block local address space
  67. * @node: node in the tree of all clients
  68. * @dev: backpointer to ion device
  69. * @handles: an rb tree of all the handles in this client
  70. * @idr: an idr space for allocating handle ids
  71. * @lock: lock protecting the tree of handles
  72. * @name: used for debugging
  73. * @task: used for debugging
  74. *
  75. * A client represents a list of buffers this client may access.
  76. * The mutex stored here is used to protect both handles tree
  77. * as well as the handles themselves, and should be held while modifying either.
  78. */
  79. struct ion_client {
  80. struct rb_node node;
  81. struct ion_device *dev;
  82. struct rb_root handles;
  83. struct idr idr;
  84. struct mutex lock;
  85. char *name;
  86. struct task_struct *task;
  87. pid_t pid;
  88. struct dentry *debug_root;
  89. };
  90. /**
  91. * ion_handle - a client local reference to a buffer
  92. * @ref: reference count
  93. * @client: back pointer to the client the buffer resides in
  94. * @buffer: pointer to the buffer
  95. * @node: node in the client's handle rbtree
  96. * @kmap_cnt: count of times this client has mapped to kernel
  97. * @id: client-unique id allocated by client->idr
  98. *
  99. * Modifications to node, map_cnt or mapping should be protected by the
  100. * lock in the client. Other fields are never changed after initialization.
  101. */
  102. struct ion_handle {
  103. struct kref ref;
  104. unsigned int user_ref_count;
  105. struct ion_client *client;
  106. struct ion_buffer *buffer;
  107. struct rb_node node;
  108. unsigned int kmap_cnt;
  109. int id;
  110. };
  111. bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
  112. {
  113. return ((buffer->flags & ION_FLAG_CACHED) &&
  114. !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
  115. }
  116. bool ion_buffer_cached(struct ion_buffer *buffer)
  117. {
  118. return !!(buffer->flags & ION_FLAG_CACHED);
  119. }
  120. /* this function should only be called while dev->lock is held */
  121. static void ion_buffer_add(struct ion_device *dev,
  122. struct ion_buffer *buffer)
  123. {
  124. struct rb_node **p = &dev->buffers.rb_node;
  125. struct rb_node *parent = NULL;
  126. struct ion_buffer *entry;
  127. while (*p) {
  128. parent = *p;
  129. entry = rb_entry(parent, struct ion_buffer, node);
  130. if (buffer < entry) {
  131. p = &(*p)->rb_left;
  132. } else if (buffer > entry) {
  133. p = &(*p)->rb_right;
  134. } else {
  135. pr_err("%s: buffer already found.", __func__);
  136. BUG();
  137. }
  138. }
  139. rb_link_node(&buffer->node, parent, p);
  140. rb_insert_color(&buffer->node, &dev->buffers);
  141. }
  142. static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
  143. /* this function should only be called while dev->lock is held */
  144. static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
  145. struct ion_device *dev,
  146. unsigned long len,
  147. unsigned long align,
  148. unsigned long flags)
  149. {
  150. struct ion_buffer *buffer;
  151. struct sg_table *table;
  152. struct scatterlist *sg;
  153. int i, ret;
  154. buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
  155. if (!buffer)
  156. return ERR_PTR(-ENOMEM);
  157. buffer->heap = heap;
  158. buffer->flags = flags;
  159. kref_init(&buffer->ref);
  160. ret = heap->ops->allocate(heap, buffer, len, align, flags);
  161. if (ret) {
  162. if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
  163. goto err2;
  164. ion_heap_freelist_drain(heap, 0);
  165. ret = heap->ops->allocate(heap, buffer, len, align,
  166. flags);
  167. if (ret)
  168. goto err2;
  169. }
  170. buffer->dev = dev;
  171. buffer->size = len;
  172. table = heap->ops->map_dma(heap, buffer);
  173. if (IS_ERR_OR_NULL(table)) {
  174. heap->ops->free(buffer);
  175. kfree(buffer);
  176. return ERR_PTR(PTR_ERR(table));
  177. }
  178. buffer->sg_table = table;
  179. if (ion_buffer_fault_user_mappings(buffer)) {
  180. for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
  181. i) {
  182. if (sg_dma_len(sg) == PAGE_SIZE)
  183. continue;
  184. pr_err("%s: cached mappings that will be faulted in "
  185. "must have pagewise sg_lists\n", __func__);
  186. ret = -EINVAL;
  187. goto err;
  188. }
  189. ret = ion_buffer_alloc_dirty(buffer);
  190. if (ret)
  191. goto err;
  192. }
  193. buffer->dev = dev;
  194. buffer->size = len;
  195. INIT_LIST_HEAD(&buffer->vmas);
  196. mutex_init(&buffer->lock);
  197. /* this will set up dma addresses for the sglist -- it is not
  198. technically correct as per the dma api -- a specific
  199. device isn't really taking ownership here. However, in practice on
  200. our systems the only dma_address space is physical addresses.
  201. Additionally, we can't afford the overhead of invalidating every
  202. allocation via dma_map_sg. The implicit contract here is that
  203. memory comming from the heaps is ready for dma, ie if it has a
  204. cached mapping that mapping has been invalidated */
  205. for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
  206. if (sg_dma_address(sg) == 0)
  207. sg_dma_address(sg) = sg_phys(sg);
  208. }
  209. mutex_lock(&dev->buffer_lock);
  210. ion_buffer_add(dev, buffer);
  211. mutex_unlock(&dev->buffer_lock);
  212. return buffer;
  213. err:
  214. heap->ops->unmap_dma(heap, buffer);
  215. heap->ops->free(buffer);
  216. err2:
  217. kfree(buffer);
  218. return ERR_PTR(ret);
  219. }
  220. static void ion_delayed_unsecure(struct ion_buffer *buffer)
  221. {
  222. if (buffer->heap->ops->unsecure_buffer)
  223. buffer->heap->ops->unsecure_buffer(buffer, 1);
  224. }
  225. void ion_buffer_destroy(struct ion_buffer *buffer)
  226. {
  227. if (WARN_ON(buffer->kmap_cnt > 0))
  228. buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
  229. buffer->heap->ops->unmap_dma(buffer->heap, buffer);
  230. ion_delayed_unsecure(buffer);
  231. buffer->heap->ops->free(buffer);
  232. if (buffer->flags & ION_FLAG_CACHED)
  233. kfree(buffer->dirty);
  234. kfree(buffer);
  235. }
  236. static void _ion_buffer_destroy(struct kref *kref)
  237. {
  238. struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
  239. struct ion_heap *heap = buffer->heap;
  240. struct ion_device *dev = buffer->dev;
  241. mutex_lock(&dev->buffer_lock);
  242. rb_erase(&buffer->node, &dev->buffers);
  243. mutex_unlock(&dev->buffer_lock);
  244. if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
  245. ion_heap_freelist_add(heap, buffer);
  246. else
  247. ion_buffer_destroy(buffer);
  248. }
  249. static void ion_buffer_get(struct ion_buffer *buffer)
  250. {
  251. kref_get(&buffer->ref);
  252. }
  253. static int ion_buffer_put(struct ion_buffer *buffer)
  254. {
  255. return kref_put(&buffer->ref, _ion_buffer_destroy);
  256. }
  257. static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
  258. {
  259. mutex_lock(&buffer->lock);
  260. buffer->handle_count++;
  261. mutex_unlock(&buffer->lock);
  262. }
  263. static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
  264. {
  265. /*
  266. * when a buffer is removed from a handle, if it is not in
  267. * any other handles, copy the taskcomm and the pid of the
  268. * process it's being removed from into the buffer. At this
  269. * point there will be no way to track what processes this buffer is
  270. * being used by, it only exists as a dma_buf file descriptor.
  271. * The taskcomm and pid can provide a debug hint as to where this fd
  272. * is in the system
  273. */
  274. mutex_lock(&buffer->lock);
  275. buffer->handle_count--;
  276. BUG_ON(buffer->handle_count < 0);
  277. if (!buffer->handle_count) {
  278. struct task_struct *task;
  279. task = current->group_leader;
  280. get_task_comm(buffer->task_comm, task);
  281. buffer->pid = task_pid_nr(task);
  282. }
  283. mutex_unlock(&buffer->lock);
  284. }
  285. static struct ion_handle *ion_handle_create(struct ion_client *client,
  286. struct ion_buffer *buffer)
  287. {
  288. struct ion_handle *handle;
  289. handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
  290. if (!handle)
  291. return ERR_PTR(-ENOMEM);
  292. kref_init(&handle->ref);
  293. rb_init_node(&handle->node);
  294. handle->client = client;
  295. ion_buffer_get(buffer);
  296. ion_buffer_add_to_handle(buffer);
  297. handle->buffer = buffer;
  298. return handle;
  299. }
  300. static void ion_handle_kmap_put(struct ion_handle *);
  301. static void ion_handle_destroy(struct kref *kref)
  302. {
  303. struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
  304. struct ion_client *client = handle->client;
  305. struct ion_buffer *buffer = handle->buffer;
  306. mutex_lock(&buffer->lock);
  307. while (handle->kmap_cnt)
  308. ion_handle_kmap_put(handle);
  309. mutex_unlock(&buffer->lock);
  310. idr_remove(&client->idr, handle->id);
  311. if (!RB_EMPTY_NODE(&handle->node))
  312. rb_erase(&handle->node, &client->handles);
  313. ion_buffer_remove_from_handle(buffer);
  314. ion_buffer_put(buffer);
  315. kfree(handle);
  316. }
  317. struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
  318. {
  319. return handle->buffer;
  320. }
  321. static void ion_handle_get(struct ion_handle *handle)
  322. {
  323. kref_get(&handle->ref);
  324. }
  325. /* Must hold the client lock */
  326. static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
  327. {
  328. if (atomic_read(&handle->ref.refcount) + 1 == 0)
  329. return ERR_PTR(-EOVERFLOW);
  330. ion_handle_get(handle);
  331. return handle;
  332. }
  333. static int ion_handle_put_nolock(struct ion_handle *handle)
  334. {
  335. int ret;
  336. ret = kref_put(&handle->ref, ion_handle_destroy);
  337. return ret;
  338. }
  339. int ion_handle_put(struct ion_handle *handle)
  340. {
  341. struct ion_client *client = handle->client;
  342. int ret;
  343. mutex_lock(&client->lock);
  344. ret = ion_handle_put_nolock(handle);
  345. mutex_unlock(&client->lock);
  346. return ret;
  347. }
  348. /* Must hold the client lock */
  349. static void user_ion_handle_get(struct ion_handle *handle)
  350. {
  351. if (handle->user_ref_count++ == 0)
  352. kref_get(&handle->ref);
  353. }
  354. /* Must hold the client lock */
  355. static struct ion_handle *user_ion_handle_get_check_overflow(
  356. struct ion_handle *handle)
  357. {
  358. if (handle->user_ref_count + 1 == 0)
  359. return ERR_PTR(-EOVERFLOW);
  360. user_ion_handle_get(handle);
  361. return handle;
  362. }
  363. /* passes a kref to the user ref count.
  364. * We know we're holding a kref to the object before and
  365. * after this call, so no need to reverify handle. */
  366. static struct ion_handle *pass_to_user(struct ion_handle *handle)
  367. {
  368. struct ion_client *client = handle->client;
  369. struct ion_handle *ret;
  370. mutex_lock(&client->lock);
  371. ret = user_ion_handle_get_check_overflow(handle);
  372. ion_handle_put_nolock(handle);
  373. mutex_unlock(&client->lock);
  374. return ret;
  375. }
  376. /* Must hold the client lock */
  377. static int user_ion_handle_put_nolock(struct ion_handle *handle)
  378. {
  379. int ret = 0;
  380. if (--handle->user_ref_count == 0)
  381. ret = ion_handle_put_nolock(handle);
  382. return ret;
  383. }
  384. static struct ion_handle *ion_handle_lookup(struct ion_client *client,
  385. struct ion_buffer *buffer)
  386. {
  387. struct rb_node *n;
  388. for (n = rb_first(&client->handles); n; n = rb_next(n)) {
  389. struct ion_handle *handle = rb_entry(n, struct ion_handle,
  390. node);
  391. if (handle->buffer == buffer)
  392. return handle;
  393. }
  394. return NULL;
  395. }
  396. static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
  397. int id)
  398. {
  399. struct ion_handle *handle;
  400. handle = idr_find(&client->idr, id);
  401. if (handle)
  402. return ion_handle_get_check_overflow(handle);
  403. return ERR_PTR(-EINVAL);
  404. }
  405. struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
  406. int id)
  407. {
  408. struct ion_handle *handle;
  409. mutex_lock(&client->lock);
  410. handle = ion_handle_get_by_id_nolock(client, id);
  411. mutex_unlock(&client->lock);
  412. return handle;
  413. }
  414. static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
  415. {
  416. WARN_ON(!mutex_is_locked(&client->lock));
  417. return (idr_find(&client->idr, handle->id) == handle);
  418. }
  419. static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
  420. {
  421. int rc;
  422. struct rb_node **p = &client->handles.rb_node;
  423. struct rb_node *parent = NULL;
  424. struct ion_handle *entry;
  425. do {
  426. int id;
  427. rc = idr_pre_get(&client->idr, GFP_KERNEL);
  428. if (!rc)
  429. return -ENOMEM;
  430. rc = idr_get_new_above(&client->idr, handle, 1, &id);
  431. handle->id = id;
  432. } while (rc == -EAGAIN);
  433. if (rc < 0)
  434. return rc;
  435. while (*p) {
  436. parent = *p;
  437. entry = rb_entry(parent, struct ion_handle, node);
  438. if (handle < entry)
  439. p = &(*p)->rb_left;
  440. else if (handle > entry)
  441. p = &(*p)->rb_right;
  442. else
  443. WARN(1, "%s: buffer already found.", __func__);
  444. }
  445. rb_link_node(&handle->node, parent, p);
  446. rb_insert_color(&handle->node, &client->handles);
  447. return 0;
  448. }
  449. struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
  450. size_t align, unsigned int heap_id_mask,
  451. unsigned int flags, bool grab_handle)
  452. {
  453. struct ion_handle *handle;
  454. struct ion_device *dev = client->dev;
  455. struct ion_buffer *buffer = NULL;
  456. struct ion_heap *heap;
  457. int ret;
  458. unsigned long secure_allocation = flags & ION_FLAG_SECURE;
  459. const unsigned int MAX_DBG_STR_LEN = 64;
  460. char dbg_str[MAX_DBG_STR_LEN];
  461. unsigned int dbg_str_idx = 0;
  462. dbg_str[0] = '\0';
  463. /*
  464. * For now, we don't want to fault in pages individually since
  465. * clients are already doing manual cache maintenance. In
  466. * other words, the implicit caching infrastructure is in
  467. * place (in code) but should not be used.
  468. */
  469. flags |= ION_FLAG_CACHED_NEEDS_SYNC;
  470. pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
  471. len, align, heap_id_mask, flags);
  472. /*
  473. * traverse the list of heaps available in this system in priority
  474. * order. If the heap type is supported by the client, and matches the
  475. * request of the caller allocate from it. Repeat until allocate has
  476. * succeeded or all heaps have been tried
  477. */
  478. if (WARN_ON(!len))
  479. return ERR_PTR(-EINVAL);
  480. len = PAGE_ALIGN(len);
  481. down_read(&dev->lock);
  482. plist_for_each_entry(heap, &dev->heaps, node) {
  483. /* if the caller didn't specify this heap id */
  484. if (!((1 << heap->id) & heap_id_mask))
  485. continue;
  486. /* Do not allow un-secure heap if secure is specified */
  487. if (secure_allocation &&
  488. !ion_heap_allow_secure_allocation(heap->type))
  489. continue;
  490. trace_ion_alloc_buffer_start(client->name, heap->name, len,
  491. heap_id_mask, flags);
  492. buffer = ion_buffer_create(heap, dev, len, align, flags);
  493. trace_ion_alloc_buffer_end(client->name, heap->name, len,
  494. heap_id_mask, flags);
  495. if (!IS_ERR_OR_NULL(buffer))
  496. break;
  497. trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
  498. heap_id_mask, flags,
  499. PTR_ERR(buffer));
  500. if (dbg_str_idx < MAX_DBG_STR_LEN) {
  501. unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
  502. int ret_value = snprintf(&dbg_str[dbg_str_idx],
  503. len_left, "%s ", heap->name);
  504. if (ret_value >= len_left) {
  505. /* overflow */
  506. dbg_str[MAX_DBG_STR_LEN-1] = '\0';
  507. dbg_str_idx = MAX_DBG_STR_LEN;
  508. } else if (ret_value >= 0) {
  509. dbg_str_idx += ret_value;
  510. } else {
  511. /* error */
  512. dbg_str[MAX_DBG_STR_LEN-1] = '\0';
  513. }
  514. }
  515. }
  516. up_read(&dev->lock);
  517. if (buffer == NULL) {
  518. trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
  519. heap_id_mask, flags, -ENODEV);
  520. return ERR_PTR(-ENODEV);
  521. }
  522. if (IS_ERR(buffer)) {
  523. trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
  524. heap_id_mask, flags,
  525. PTR_ERR(buffer));
  526. pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
  527. "0x%x) from heap(s) %sfor client %s\n",
  528. len, align, dbg_str, client->name);
  529. return ERR_PTR(PTR_ERR(buffer));
  530. }
  531. handle = ion_handle_create(client, buffer);
  532. /*
  533. * ion_buffer_create will create a buffer with a ref_cnt of 1,
  534. * and ion_handle_create will take a second reference, drop one here
  535. */
  536. ion_buffer_put(buffer);
  537. if (IS_ERR(handle))
  538. return handle;
  539. mutex_lock(&client->lock);
  540. if (grab_handle)
  541. ion_handle_get(handle);
  542. ret = ion_handle_add(client, handle);
  543. mutex_unlock(&client->lock);
  544. if (ret) {
  545. ion_handle_put(handle);
  546. handle = ERR_PTR(ret);
  547. }
  548. return handle;
  549. }
  550. struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
  551. size_t align, unsigned int heap_id_mask,
  552. unsigned int flags)
  553. {
  554. return __ion_alloc(client, len, align, heap_id_mask, flags, false);
  555. }
  556. EXPORT_SYMBOL(ion_alloc);
  557. static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
  558. {
  559. bool valid_handle;
  560. BUG_ON(client != handle->client);
  561. valid_handle = ion_handle_validate(client, handle);
  562. if (!valid_handle) {
  563. WARN(1, "%s: invalid handle passed to free.\n", __func__);
  564. return;
  565. }
  566. ion_handle_put_nolock(handle);
  567. }
  568. static void user_ion_free_nolock(struct ion_client *client,
  569. struct ion_handle *handle)
  570. {
  571. bool valid_handle;
  572. BUG_ON(client != handle->client);
  573. valid_handle = ion_handle_validate(client, handle);
  574. if (!valid_handle) {
  575. WARN(1, "%s: invalid handle passed to free.\n", __func__);
  576. return;
  577. }
  578. if (handle->user_ref_count == 0) {
  579. WARN(1, "%s: User does not have access!\n", __func__);
  580. return;
  581. }
  582. user_ion_handle_put_nolock(handle);
  583. }
  584. void ion_free(struct ion_client *client, struct ion_handle *handle)
  585. {
  586. BUG_ON(client != handle->client);
  587. mutex_lock(&client->lock);
  588. ion_free_nolock(client, handle);
  589. mutex_unlock(&client->lock);
  590. }
  591. EXPORT_SYMBOL(ion_free);
  592. int ion_phys(struct ion_client *client, struct ion_handle *handle,
  593. ion_phys_addr_t *addr, size_t *len)
  594. {
  595. struct ion_buffer *buffer;
  596. int ret;
  597. mutex_lock(&client->lock);
  598. if (!ion_handle_validate(client, handle)) {
  599. mutex_unlock(&client->lock);
  600. return -EINVAL;
  601. }
  602. buffer = handle->buffer;
  603. if (!buffer->heap->ops->phys) {
  604. pr_err("%s: ion_phys is not implemented by this heap.\n",
  605. __func__);
  606. mutex_unlock(&client->lock);
  607. return -ENODEV;
  608. }
  609. mutex_unlock(&client->lock);
  610. ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
  611. return ret;
  612. }
  613. EXPORT_SYMBOL(ion_phys);
  614. static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
  615. {
  616. void *vaddr;
  617. if (buffer->kmap_cnt) {
  618. buffer->kmap_cnt++;
  619. return buffer->vaddr;
  620. }
  621. vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
  622. if (IS_ERR_OR_NULL(vaddr))
  623. return vaddr;
  624. buffer->vaddr = vaddr;
  625. buffer->kmap_cnt++;
  626. return vaddr;
  627. }
  628. static void *ion_handle_kmap_get(struct ion_handle *handle)
  629. {
  630. struct ion_buffer *buffer = handle->buffer;
  631. void *vaddr;
  632. if (handle->kmap_cnt) {
  633. handle->kmap_cnt++;
  634. return buffer->vaddr;
  635. }
  636. vaddr = ion_buffer_kmap_get(buffer);
  637. if (IS_ERR_OR_NULL(vaddr))
  638. return vaddr;
  639. handle->kmap_cnt++;
  640. return vaddr;
  641. }
  642. static void ion_buffer_kmap_put(struct ion_buffer *buffer)
  643. {
  644. buffer->kmap_cnt--;
  645. if (!buffer->kmap_cnt) {
  646. buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
  647. buffer->vaddr = NULL;
  648. }
  649. }
  650. static void ion_handle_kmap_put(struct ion_handle *handle)
  651. {
  652. struct ion_buffer *buffer = handle->buffer;
  653. handle->kmap_cnt--;
  654. if (!handle->kmap_cnt)
  655. ion_buffer_kmap_put(buffer);
  656. }
  657. void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
  658. {
  659. struct ion_buffer *buffer;
  660. void *vaddr;
  661. mutex_lock(&client->lock);
  662. if (!ion_handle_validate(client, handle)) {
  663. pr_err("%s: invalid handle passed to map_kernel.\n",
  664. __func__);
  665. mutex_unlock(&client->lock);
  666. return ERR_PTR(-EINVAL);
  667. }
  668. buffer = handle->buffer;
  669. if (!handle->buffer->heap->ops->map_kernel) {
  670. pr_err("%s: map_kernel is not implemented by this heap.\n",
  671. __func__);
  672. mutex_unlock(&client->lock);
  673. return ERR_PTR(-ENODEV);
  674. }
  675. mutex_lock(&buffer->lock);
  676. vaddr = ion_handle_kmap_get(handle);
  677. mutex_unlock(&buffer->lock);
  678. mutex_unlock(&client->lock);
  679. return vaddr;
  680. }
  681. EXPORT_SYMBOL(ion_map_kernel);
  682. void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
  683. {
  684. struct ion_buffer *buffer;
  685. mutex_lock(&client->lock);
  686. buffer = handle->buffer;
  687. mutex_lock(&buffer->lock);
  688. ion_handle_kmap_put(handle);
  689. mutex_unlock(&buffer->lock);
  690. mutex_unlock(&client->lock);
  691. }
  692. EXPORT_SYMBOL(ion_unmap_kernel);
  693. static int ion_debug_client_show(struct seq_file *s, void *unused)
  694. {
  695. struct ion_client *client = s->private;
  696. struct rb_node *n;
  697. seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
  698. "heap_name", "size_in_bytes", "handle refcount",
  699. "buffer");
  700. mutex_lock(&client->lock);
  701. for (n = rb_first(&client->handles); n; n = rb_next(n)) {
  702. struct ion_handle *handle = rb_entry(n, struct ion_handle,
  703. node);
  704. seq_printf(s, "%16.16s: %16x : %16d : %12pK",
  705. handle->buffer->heap->name,
  706. handle->buffer->size,
  707. atomic_read(&handle->ref.refcount),
  708. handle->buffer);
  709. seq_printf(s, "\n");
  710. }
  711. mutex_unlock(&client->lock);
  712. return 0;
  713. }
  714. static int ion_debug_client_open(struct inode *inode, struct file *file)
  715. {
  716. return single_open(file, ion_debug_client_show, inode->i_private);
  717. }
  718. static const struct file_operations debug_client_fops = {
  719. .open = ion_debug_client_open,
  720. .read = seq_read,
  721. .llseek = seq_lseek,
  722. .release = single_release,
  723. };
  724. static bool startswith(const char *string, const char *prefix)
  725. {
  726. size_t l1 = strlen(string);
  727. size_t l2 = strlen(prefix);
  728. return strncmp(string, prefix, min(l1, l2)) == 0;
  729. }
  730. static int ion_get_client_serial(const struct rb_root *root,
  731. const unsigned char *name)
  732. {
  733. int serial = -1;
  734. struct rb_node *node;
  735. for (node = rb_first(root); node; node = rb_next(node)) {
  736. int n;
  737. char *serial_string;
  738. struct ion_client *client = rb_entry(node, struct ion_client,
  739. node);
  740. if (!startswith(client->name, name))
  741. continue;
  742. serial_string = strrchr(client->name, '-');
  743. if (!serial_string)
  744. continue;
  745. serial_string++;
  746. sscanf(serial_string, "%d", &n);
  747. serial = max(serial, n);
  748. }
  749. return serial + 1;
  750. }
  751. struct ion_client *ion_client_create(struct ion_device *dev,
  752. const char *name)
  753. {
  754. struct ion_client *client;
  755. struct task_struct *task;
  756. struct rb_node **p;
  757. struct rb_node *parent = NULL;
  758. struct ion_client *entry;
  759. pid_t pid;
  760. int name_len;
  761. int client_serial;
  762. if (!name) {
  763. pr_err("%s: Name cannot be null\n", __func__);
  764. return ERR_PTR(-EINVAL);
  765. }
  766. name_len = strnlen(name, 64);
  767. /* add some space to accommodate the serial number suffix */
  768. name_len = min(64, name_len + 11);
  769. get_task_struct(current->group_leader);
  770. task_lock(current->group_leader);
  771. pid = task_pid_nr(current->group_leader);
  772. /* don't bother to store task struct for kernel threads,
  773. they can't be killed anyway */
  774. if (current->group_leader->flags & PF_KTHREAD) {
  775. put_task_struct(current->group_leader);
  776. task = NULL;
  777. } else {
  778. task = current->group_leader;
  779. }
  780. task_unlock(current->group_leader);
  781. client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
  782. if (!client) {
  783. if (task)
  784. put_task_struct(current->group_leader);
  785. return ERR_PTR(-ENOMEM);
  786. }
  787. client->dev = dev;
  788. client->handles = RB_ROOT;
  789. idr_init(&client->idr);
  790. mutex_init(&client->lock);
  791. client->name = kzalloc(name_len+1, GFP_KERNEL);
  792. if (!client->name) {
  793. put_task_struct(current->group_leader);
  794. kfree(client);
  795. return ERR_PTR(-ENOMEM);
  796. }
  797. client->task = task;
  798. client->pid = pid;
  799. down_write(&dev->lock);
  800. client_serial = ion_get_client_serial(&dev->clients, name);
  801. snprintf(client->name, name_len, "%s-%d", name, client_serial);
  802. p = &dev->clients.rb_node;
  803. while (*p) {
  804. parent = *p;
  805. entry = rb_entry(parent, struct ion_client, node);
  806. if (client < entry)
  807. p = &(*p)->rb_left;
  808. else if (client > entry)
  809. p = &(*p)->rb_right;
  810. }
  811. rb_link_node(&client->node, parent, p);
  812. rb_insert_color(&client->node, &dev->clients);
  813. client->debug_root = debugfs_create_file(client->name, 0664,
  814. dev->clients_debug_root,
  815. client, &debug_client_fops);
  816. if (!client->debug_root) {
  817. char buf[256], *path;
  818. path = dentry_path(dev->clients_debug_root, buf, 256);
  819. pr_err("Failed to created client debugfs at %s/%s\n",
  820. path, client->name);
  821. }
  822. up_write(&dev->lock);
  823. return client;
  824. }
  825. EXPORT_SYMBOL(ion_client_create);
  826. void ion_client_destroy(struct ion_client *client)
  827. {
  828. struct ion_device *dev = client->dev;
  829. struct rb_node *n;
  830. pr_debug("%s: %d\n", __func__, __LINE__);
  831. while ((n = rb_first(&client->handles))) {
  832. struct ion_handle *handle = rb_entry(n, struct ion_handle,
  833. node);
  834. ion_handle_destroy(&handle->ref);
  835. }
  836. idr_remove_all(&client->idr);
  837. idr_destroy(&client->idr);
  838. down_write(&dev->lock);
  839. if (client->task)
  840. put_task_struct(client->task);
  841. rb_erase(&client->node, &dev->clients);
  842. debugfs_remove_recursive(client->debug_root);
  843. up_write(&dev->lock);
  844. kfree(client->name);
  845. kfree(client);
  846. }
  847. EXPORT_SYMBOL(ion_client_destroy);
  848. int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
  849. unsigned long *flags)
  850. {
  851. struct ion_buffer *buffer;
  852. mutex_lock(&client->lock);
  853. if (!ion_handle_validate(client, handle)) {
  854. pr_err("%s: invalid handle passed to %s.\n",
  855. __func__, __func__);
  856. mutex_unlock(&client->lock);
  857. return -EINVAL;
  858. }
  859. buffer = handle->buffer;
  860. mutex_lock(&buffer->lock);
  861. *flags = buffer->flags;
  862. mutex_unlock(&buffer->lock);
  863. mutex_unlock(&client->lock);
  864. return 0;
  865. }
  866. EXPORT_SYMBOL(ion_handle_get_flags);
  867. int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
  868. unsigned long *size)
  869. {
  870. struct ion_buffer *buffer;
  871. mutex_lock(&client->lock);
  872. if (!ion_handle_validate(client, handle)) {
  873. pr_err("%s: invalid handle passed to %s.\n",
  874. __func__, __func__);
  875. mutex_unlock(&client->lock);
  876. return -EINVAL;
  877. }
  878. buffer = handle->buffer;
  879. mutex_lock(&buffer->lock);
  880. *size = buffer->size;
  881. mutex_unlock(&buffer->lock);
  882. mutex_unlock(&client->lock);
  883. return 0;
  884. }
  885. EXPORT_SYMBOL(ion_handle_get_size);
  886. struct sg_table *ion_sg_table(struct ion_client *client,
  887. struct ion_handle *handle)
  888. {
  889. struct ion_buffer *buffer;
  890. struct sg_table *table;
  891. mutex_lock(&client->lock);
  892. if (!ion_handle_validate(client, handle)) {
  893. pr_err("%s: invalid handle passed to map_dma.\n",
  894. __func__);
  895. mutex_unlock(&client->lock);
  896. return ERR_PTR(-EINVAL);
  897. }
  898. buffer = handle->buffer;
  899. table = buffer->sg_table;
  900. mutex_unlock(&client->lock);
  901. return table;
  902. }
  903. EXPORT_SYMBOL(ion_sg_table);
  904. struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
  905. size_t chunk_size, size_t total_size)
  906. {
  907. struct sg_table *table;
  908. int i, n_chunks, ret;
  909. struct scatterlist *sg;
  910. table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  911. if (!table)
  912. return ERR_PTR(-ENOMEM);
  913. n_chunks = DIV_ROUND_UP(total_size, chunk_size);
  914. pr_debug("creating sg_table with %d chunks\n", n_chunks);
  915. ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
  916. if (ret)
  917. goto err0;
  918. for_each_sg(table->sgl, sg, table->nents, i) {
  919. dma_addr_t addr = buffer_base + i * chunk_size;
  920. sg_dma_address(sg) = addr;
  921. sg_dma_len(sg) = chunk_size;
  922. }
  923. return table;
  924. err0:
  925. kfree(table);
  926. return ERR_PTR(ret);
  927. }
  928. static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
  929. struct device *dev,
  930. enum dma_data_direction direction);
  931. static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
  932. enum dma_data_direction direction)
  933. {
  934. struct dma_buf *dmabuf = attachment->dmabuf;
  935. struct ion_buffer *buffer = dmabuf->priv;
  936. ion_buffer_sync_for_device(buffer, attachment->dev, direction);
  937. return buffer->sg_table;
  938. }
  939. static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
  940. struct sg_table *table,
  941. enum dma_data_direction direction)
  942. {
  943. }
  944. static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
  945. {
  946. unsigned long pages = buffer->sg_table->nents;
  947. unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
  948. buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
  949. if (!buffer->dirty)
  950. return -ENOMEM;
  951. return 0;
  952. }
  953. struct ion_vma_list {
  954. struct list_head list;
  955. struct vm_area_struct *vma;
  956. };
  957. static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
  958. struct device *dev,
  959. enum dma_data_direction dir)
  960. {
  961. struct scatterlist *sg;
  962. int i;
  963. struct ion_vma_list *vma_list;
  964. pr_debug("%s: syncing for device %s\n", __func__,
  965. dev ? dev_name(dev) : "null");
  966. if (!ion_buffer_fault_user_mappings(buffer))
  967. return;
  968. mutex_lock(&buffer->lock);
  969. for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
  970. if (!test_bit(i, buffer->dirty))
  971. continue;
  972. dma_sync_sg_for_device(dev, sg, 1, dir);
  973. clear_bit(i, buffer->dirty);
  974. }
  975. list_for_each_entry(vma_list, &buffer->vmas, list) {
  976. struct vm_area_struct *vma = vma_list->vma;
  977. zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
  978. NULL);
  979. }
  980. mutex_unlock(&buffer->lock);
  981. }
  982. int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  983. {
  984. struct ion_buffer *buffer = vma->vm_private_data;
  985. struct scatterlist *sg;
  986. int i;
  987. mutex_lock(&buffer->lock);
  988. set_bit(vmf->pgoff, buffer->dirty);
  989. for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
  990. if (i != vmf->pgoff)
  991. continue;
  992. dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
  993. vm_insert_page(vma, (unsigned long)vmf->virtual_address,
  994. sg_page(sg));
  995. break;
  996. }
  997. mutex_unlock(&buffer->lock);
  998. return VM_FAULT_NOPAGE;
  999. }
  1000. static void ion_vm_open(struct vm_area_struct *vma)
  1001. {
  1002. struct ion_buffer *buffer = vma->vm_private_data;
  1003. struct ion_vma_list *vma_list;
  1004. vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
  1005. if (!vma_list)
  1006. return;
  1007. vma_list->vma = vma;
  1008. mutex_lock(&buffer->lock);
  1009. list_add(&vma_list->list, &buffer->vmas);
  1010. mutex_unlock(&buffer->lock);
  1011. pr_debug("%s: adding %pK\n", __func__, vma);
  1012. }
  1013. static void ion_vm_close(struct vm_area_struct *vma)
  1014. {
  1015. struct ion_buffer *buffer = vma->vm_private_data;
  1016. struct ion_vma_list *vma_list, *tmp;
  1017. pr_debug("%s\n", __func__);
  1018. mutex_lock(&buffer->lock);
  1019. list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
  1020. if (vma_list->vma != vma)
  1021. continue;
  1022. list_del(&vma_list->list);
  1023. kfree(vma_list);
  1024. pr_debug("%s: deleting %pK\n", __func__, vma);
  1025. break;
  1026. }
  1027. mutex_unlock(&buffer->lock);
  1028. if (buffer->heap->ops->unmap_user)
  1029. buffer->heap->ops->unmap_user(buffer->heap, buffer);
  1030. }
  1031. struct vm_operations_struct ion_vma_ops = {
  1032. .open = ion_vm_open,
  1033. .close = ion_vm_close,
  1034. .fault = ion_vm_fault,
  1035. };
  1036. static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  1037. {
  1038. struct ion_buffer *buffer = dmabuf->priv;
  1039. int ret = 0;
  1040. if (!buffer->heap->ops->map_user) {
  1041. pr_err("%s: this heap does not define a method for mapping "
  1042. "to userspace\n", __func__);
  1043. return -EINVAL;
  1044. }
  1045. if (ion_buffer_fault_user_mappings(buffer)) {
  1046. vma->vm_private_data = buffer;
  1047. vma->vm_ops = &ion_vma_ops;
  1048. vma->vm_flags |= VM_MIXEDMAP;
  1049. ion_vm_open(vma);
  1050. return 0;
  1051. }
  1052. if (!(buffer->flags & ION_FLAG_CACHED))
  1053. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  1054. mutex_lock(&buffer->lock);
  1055. /* now map it to userspace */
  1056. ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
  1057. mutex_unlock(&buffer->lock);
  1058. if (ret)
  1059. pr_err("%s: failure mapping buffer to userspace\n",
  1060. __func__);
  1061. return ret;
  1062. }
  1063. static void ion_dma_buf_release(struct dma_buf *dmabuf)
  1064. {
  1065. struct ion_buffer *buffer = dmabuf->priv;
  1066. ion_buffer_put(buffer);
  1067. }
  1068. static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
  1069. {
  1070. struct ion_buffer *buffer = dmabuf->priv;
  1071. void *vaddr;
  1072. if (!buffer->heap->ops->map_kernel) {
  1073. pr_err("%s: map kernel is not implemented by this heap.\n",
  1074. __func__);
  1075. return ERR_PTR(-ENOTTY);
  1076. }
  1077. mutex_lock(&buffer->lock);
  1078. vaddr = ion_buffer_kmap_get(buffer);
  1079. mutex_unlock(&buffer->lock);
  1080. if (IS_ERR(vaddr))
  1081. return vaddr;
  1082. if (!vaddr)
  1083. return ERR_PTR(-ENOMEM);
  1084. return vaddr + offset * PAGE_SIZE;
  1085. }
  1086. static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
  1087. void *ptr)
  1088. {
  1089. struct ion_buffer *buffer = dmabuf->priv;
  1090. if (buffer->heap->ops->map_kernel) {
  1091. mutex_lock(&buffer->lock);
  1092. ion_buffer_kmap_put(buffer);
  1093. mutex_unlock(&buffer->lock);
  1094. }
  1095. }
  1096. static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
  1097. size_t len,
  1098. enum dma_data_direction direction)
  1099. {
  1100. return 0;
  1101. }
  1102. static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
  1103. size_t len,
  1104. enum dma_data_direction direction)
  1105. {
  1106. }
  1107. struct dma_buf_ops dma_buf_ops = {
  1108. .map_dma_buf = ion_map_dma_buf,
  1109. .unmap_dma_buf = ion_unmap_dma_buf,
  1110. .mmap = ion_mmap,
  1111. .release = ion_dma_buf_release,
  1112. .begin_cpu_access = ion_dma_buf_begin_cpu_access,
  1113. .end_cpu_access = ion_dma_buf_end_cpu_access,
  1114. .kmap_atomic = ion_dma_buf_kmap,
  1115. .kunmap_atomic = ion_dma_buf_kunmap,
  1116. .kmap = ion_dma_buf_kmap,
  1117. .kunmap = ion_dma_buf_kunmap,
  1118. };
  1119. static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
  1120. struct ion_handle *handle,
  1121. bool lock_client)
  1122. {
  1123. struct ion_buffer *buffer;
  1124. struct dma_buf *dmabuf;
  1125. bool valid_handle;
  1126. if (lock_client)
  1127. mutex_lock(&client->lock);
  1128. valid_handle = ion_handle_validate(client, handle);
  1129. if (!valid_handle) {
  1130. WARN(1, "%s: invalid handle passed to share.\n", __func__);
  1131. if (lock_client)
  1132. mutex_unlock(&client->lock);
  1133. return ERR_PTR(-EINVAL);
  1134. }
  1135. buffer = handle->buffer;
  1136. ion_buffer_get(buffer);
  1137. if (lock_client)
  1138. mutex_unlock(&client->lock);
  1139. dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
  1140. if (IS_ERR(dmabuf)) {
  1141. ion_buffer_put(buffer);
  1142. return dmabuf;
  1143. }
  1144. return dmabuf;
  1145. }
  1146. struct dma_buf *ion_share_dma_buf(struct ion_client *client,
  1147. struct ion_handle *handle)
  1148. {
  1149. return __ion_share_dma_buf(client, handle, true);
  1150. }
  1151. EXPORT_SYMBOL(ion_share_dma_buf);
  1152. static int __ion_share_dma_buf_fd(struct ion_client *client,
  1153. struct ion_handle *handle, bool lock_client)
  1154. {
  1155. struct dma_buf *dmabuf;
  1156. int fd;
  1157. dmabuf = __ion_share_dma_buf(client, handle, lock_client);
  1158. if (IS_ERR(dmabuf))
  1159. return PTR_ERR(dmabuf);
  1160. fd = dma_buf_fd(dmabuf, O_CLOEXEC);
  1161. if (fd < 0)
  1162. dma_buf_put(dmabuf);
  1163. return fd;
  1164. }
  1165. int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
  1166. {
  1167. return __ion_share_dma_buf_fd(client, handle, true);
  1168. }
  1169. EXPORT_SYMBOL(ion_share_dma_buf_fd);
  1170. static int ion_share_dma_buf_fd_nolock(struct ion_client *client,
  1171. struct ion_handle *handle)
  1172. {
  1173. return __ion_share_dma_buf_fd(client, handle, false);
  1174. }
  1175. struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
  1176. {
  1177. struct dma_buf *dmabuf;
  1178. struct ion_buffer *buffer;
  1179. struct ion_handle *handle;
  1180. int ret;
  1181. dmabuf = dma_buf_get(fd);
  1182. if (IS_ERR_OR_NULL(dmabuf))
  1183. return ERR_PTR(PTR_ERR(dmabuf));
  1184. /* if this memory came from ion */
  1185. if (dmabuf->ops != &dma_buf_ops) {
  1186. pr_err("%s: can not import dmabuf from another exporter\n",
  1187. __func__);
  1188. dma_buf_put(dmabuf);
  1189. return ERR_PTR(-EINVAL);
  1190. }
  1191. buffer = dmabuf->priv;
  1192. mutex_lock(&client->lock);
  1193. /* if a handle exists for this buffer just take a reference to it */
  1194. handle = ion_handle_lookup(client, buffer);
  1195. if (!IS_ERR_OR_NULL(handle)) {
  1196. handle = ion_handle_get_check_overflow(handle);
  1197. mutex_unlock(&client->lock);
  1198. goto end;
  1199. }
  1200. mutex_unlock(&client->lock);
  1201. handle = ion_handle_create(client, buffer);
  1202. if (IS_ERR_OR_NULL(handle))
  1203. goto end;
  1204. mutex_lock(&client->lock);
  1205. ret = ion_handle_add(client, handle);
  1206. mutex_unlock(&client->lock);
  1207. if (ret) {
  1208. ion_handle_put(handle);
  1209. handle = ERR_PTR(ret);
  1210. }
  1211. end:
  1212. dma_buf_put(dmabuf);
  1213. return handle;
  1214. }
  1215. EXPORT_SYMBOL(ion_import_dma_buf);
  1216. static int ion_sync_for_device(struct ion_client *client, int fd)
  1217. {
  1218. struct dma_buf *dmabuf;
  1219. struct ion_buffer *buffer;
  1220. dmabuf = dma_buf_get(fd);
  1221. if (IS_ERR_OR_NULL(dmabuf))
  1222. return PTR_ERR(dmabuf);
  1223. /* if this memory came from ion */
  1224. if (dmabuf->ops != &dma_buf_ops) {
  1225. pr_err("%s: can not sync dmabuf from another exporter\n",
  1226. __func__);
  1227. dma_buf_put(dmabuf);
  1228. return -EINVAL;
  1229. }
  1230. buffer = dmabuf->priv;
  1231. dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
  1232. buffer->sg_table->nents, DMA_BIDIRECTIONAL);
  1233. dma_buf_put(dmabuf);
  1234. return 0;
  1235. }
  1236. static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1237. {
  1238. struct ion_client *client = filp->private_data;
  1239. switch (cmd) {
  1240. case ION_IOC_ALLOC:
  1241. {
  1242. struct ion_allocation_data data;
  1243. struct ion_handle *handle;
  1244. if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
  1245. return -EFAULT;
  1246. handle = __ion_alloc(client, data.len, data.align,
  1247. data.heap_mask, data.flags, true);
  1248. if (IS_ERR(handle))
  1249. return PTR_ERR(handle);
  1250. data.handle = (ion_user_handle_t)handle->id;
  1251. pass_to_user(handle);
  1252. if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
  1253. mutex_lock(&client->lock);
  1254. user_ion_free_nolock(client, handle);
  1255. mutex_unlock(&client->lock);
  1256. ion_handle_put(handle);
  1257. return -EFAULT;
  1258. }
  1259. ion_handle_put(handle);
  1260. break;
  1261. }
  1262. case ION_IOC_FREE:
  1263. {
  1264. struct ion_handle_data data;
  1265. struct ion_handle *handle;
  1266. if (copy_from_user(&data, (void __user *)arg,
  1267. sizeof(struct ion_handle_data)))
  1268. return -EFAULT;
  1269. mutex_lock(&client->lock);
  1270. handle = ion_handle_get_by_id_nolock(client, (int)data.handle);
  1271. if (IS_ERR(handle)) {
  1272. mutex_unlock(&client->lock);
  1273. return PTR_ERR(handle);
  1274. }
  1275. user_ion_free_nolock(client, handle);
  1276. ion_handle_put_nolock(handle);
  1277. mutex_unlock(&client->lock);
  1278. break;
  1279. }
  1280. case ION_IOC_SHARE:
  1281. case ION_IOC_MAP:
  1282. {
  1283. struct ion_fd_data data;
  1284. struct ion_handle *handle;
  1285. if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
  1286. return -EFAULT;
  1287. mutex_lock(&client->lock);
  1288. handle = ion_handle_get_by_id_nolock(client, (int)data.handle);
  1289. if (IS_ERR(handle)) {
  1290. mutex_unlock(&client->lock);
  1291. return PTR_ERR(handle);
  1292. }
  1293. data.fd = ion_share_dma_buf_fd_nolock(client, handle);
  1294. ion_handle_put_nolock(handle);
  1295. mutex_unlock(&client->lock);
  1296. if (copy_to_user((void __user *)arg, &data, sizeof(data)))
  1297. return -EFAULT;
  1298. if (data.fd < 0)
  1299. return data.fd;
  1300. break;
  1301. }
  1302. case ION_IOC_IMPORT:
  1303. {
  1304. struct ion_fd_data data;
  1305. struct ion_handle *handle;
  1306. int ret = 0;
  1307. if (copy_from_user(&data, (void __user *)arg,
  1308. sizeof(struct ion_fd_data)))
  1309. return -EFAULT;
  1310. handle = ion_import_dma_buf(client, data.fd);
  1311. if (IS_ERR(handle)) {
  1312. ret = PTR_ERR(handle);
  1313. } else {
  1314. data.handle = (ion_user_handle_t)handle->id;
  1315. handle = pass_to_user(handle);
  1316. if (IS_ERR(handle)) {
  1317. ret = PTR_ERR(handle);
  1318. data.handle = 0;
  1319. }
  1320. }
  1321. if (copy_to_user((void __user *)arg, &data,
  1322. sizeof(struct ion_fd_data)))
  1323. return -EFAULT;
  1324. if (ret < 0)
  1325. return ret;
  1326. break;
  1327. }
  1328. case ION_IOC_SYNC:
  1329. {
  1330. struct ion_fd_data data;
  1331. if (copy_from_user(&data, (void __user *)arg,
  1332. sizeof(struct ion_fd_data)))
  1333. return -EFAULT;
  1334. ion_sync_for_device(client, data.fd);
  1335. break;
  1336. }
  1337. case ION_IOC_CUSTOM:
  1338. {
  1339. struct ion_device *dev = client->dev;
  1340. struct ion_custom_data data;
  1341. if (!dev->custom_ioctl)
  1342. return -ENOTTY;
  1343. if (copy_from_user(&data, (void __user *)arg,
  1344. sizeof(struct ion_custom_data)))
  1345. return -EFAULT;
  1346. return dev->custom_ioctl(client, data.cmd, data.arg);
  1347. }
  1348. case ION_IOC_CLEAN_CACHES:
  1349. return client->dev->custom_ioctl(client,
  1350. ION_IOC_CLEAN_CACHES, arg);
  1351. case ION_IOC_INV_CACHES:
  1352. return client->dev->custom_ioctl(client,
  1353. ION_IOC_INV_CACHES, arg);
  1354. case ION_IOC_CLEAN_INV_CACHES:
  1355. return client->dev->custom_ioctl(client,
  1356. ION_IOC_CLEAN_INV_CACHES, arg);
  1357. default:
  1358. return -ENOTTY;
  1359. }
  1360. return 0;
  1361. }
  1362. static int ion_release(struct inode *inode, struct file *file)
  1363. {
  1364. struct ion_client *client = file->private_data;
  1365. pr_debug("%s: %d\n", __func__, __LINE__);
  1366. ion_client_destroy(client);
  1367. return 0;
  1368. }
  1369. static int ion_open(struct inode *inode, struct file *file)
  1370. {
  1371. struct miscdevice *miscdev = file->private_data;
  1372. struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
  1373. struct ion_client *client;
  1374. char debug_name[64];
  1375. pr_debug("%s: %d\n", __func__, __LINE__);
  1376. snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
  1377. client = ion_client_create(dev, debug_name);
  1378. if (IS_ERR_OR_NULL(client))
  1379. return PTR_ERR(client);
  1380. file->private_data = client;
  1381. return 0;
  1382. }
  1383. static const struct file_operations ion_fops = {
  1384. .owner = THIS_MODULE,
  1385. .open = ion_open,
  1386. .release = ion_release,
  1387. .unlocked_ioctl = ion_ioctl,
  1388. };
  1389. static size_t ion_debug_heap_total(struct ion_client *client,
  1390. unsigned int id)
  1391. {
  1392. size_t size = 0;
  1393. struct rb_node *n;
  1394. mutex_lock(&client->lock);
  1395. for (n = rb_first(&client->handles); n; n = rb_next(n)) {
  1396. struct ion_handle *handle = rb_entry(n,
  1397. struct ion_handle,
  1398. node);
  1399. if (handle->buffer->heap->id == id)
  1400. size += handle->buffer->size;
  1401. }
  1402. mutex_unlock(&client->lock);
  1403. return size;
  1404. }
  1405. /**
  1406. * Create a mem_map of the heap.
  1407. * @param s seq_file to log error message to.
  1408. * @param heap The heap to create mem_map for.
  1409. * @param mem_map The mem map to be created.
  1410. */
  1411. void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
  1412. struct list_head *mem_map)
  1413. {
  1414. struct ion_device *dev = heap->dev;
  1415. struct rb_node *cnode;
  1416. size_t size;
  1417. struct ion_client *client;
  1418. if (!heap->ops->phys)
  1419. return;
  1420. down_read(&dev->lock);
  1421. for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
  1422. struct rb_node *hnode;
  1423. client = rb_entry(cnode, struct ion_client, node);
  1424. mutex_lock(&client->lock);
  1425. for (hnode = rb_first(&client->handles);
  1426. hnode;
  1427. hnode = rb_next(hnode)) {
  1428. struct ion_handle *handle = rb_entry(
  1429. hnode, struct ion_handle, node);
  1430. if (handle->buffer->heap == heap) {
  1431. struct mem_map_data *data =
  1432. kzalloc(sizeof(*data), GFP_KERNEL);
  1433. if (!data)
  1434. goto inner_error;
  1435. heap->ops->phys(heap, handle->buffer,
  1436. &(data->addr), &size);
  1437. data->size = (unsigned long) size;
  1438. data->addr_end = data->addr + data->size - 1;
  1439. data->client_name = kstrdup(client->name,
  1440. GFP_KERNEL);
  1441. if (!data->client_name) {
  1442. kfree(data);
  1443. goto inner_error;
  1444. }
  1445. list_add(&data->node, mem_map);
  1446. }
  1447. }
  1448. mutex_unlock(&client->lock);
  1449. }
  1450. up_read(&dev->lock);
  1451. return;
  1452. inner_error:
  1453. seq_puts(s,
  1454. "ERROR: out of memory. Part of memory map will not be logged\n");
  1455. mutex_unlock(&client->lock);
  1456. up_read(&dev->lock);
  1457. }
  1458. /**
  1459. * Free the memory allocated by ion_debug_mem_map_create
  1460. * @param mem_map The mem map to free.
  1461. */
  1462. static void ion_debug_mem_map_destroy(struct list_head *mem_map)
  1463. {
  1464. if (mem_map) {
  1465. struct mem_map_data *data, *tmp;
  1466. list_for_each_entry_safe(data, tmp, mem_map, node) {
  1467. list_del(&data->node);
  1468. kfree(data->client_name);
  1469. kfree(data);
  1470. }
  1471. }
  1472. }
  1473. static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
  1474. {
  1475. struct mem_map_data *d1, *d2;
  1476. d1 = list_entry(a, struct mem_map_data, node);
  1477. d2 = list_entry(b, struct mem_map_data, node);
  1478. if (d1->addr == d2->addr)
  1479. return d1->size - d2->size;
  1480. return d1->addr - d2->addr;
  1481. }
  1482. /**
  1483. * Print heap debug information.
  1484. * @param s seq_file to log message to.
  1485. * @param heap pointer to heap that we will print debug information for.
  1486. */
  1487. static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
  1488. {
  1489. if (heap->ops->print_debug) {
  1490. struct list_head mem_map = LIST_HEAD_INIT(mem_map);
  1491. ion_debug_mem_map_create(s, heap, &mem_map);
  1492. list_sort(NULL, &mem_map, mem_map_cmp);
  1493. heap->ops->print_debug(heap, s, &mem_map);
  1494. ion_debug_mem_map_destroy(&mem_map);
  1495. }
  1496. }
  1497. static int ion_debug_heap_show(struct seq_file *s, void *unused)
  1498. {
  1499. struct ion_heap *heap = s->private;
  1500. struct ion_device *dev = heap->dev;
  1501. struct rb_node *n;
  1502. size_t total_size = 0;
  1503. size_t total_orphaned_size = 0;
  1504. seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
  1505. seq_printf(s, "----------------------------------------------------\n");
  1506. down_read(&dev->lock);
  1507. for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
  1508. struct ion_client *client = rb_entry(n, struct ion_client,
  1509. node);
  1510. size_t size = ion_debug_heap_total(client, heap->id);
  1511. if (!size)
  1512. continue;
  1513. if (client->task) {
  1514. char task_comm[TASK_COMM_LEN];
  1515. get_task_comm(task_comm, client->task);
  1516. seq_printf(s, "%16.s %16u %16u\n", task_comm,
  1517. client->pid, size);
  1518. } else {
  1519. seq_printf(s, "%16.s %16u %16u\n", client->name,
  1520. client->pid, size);
  1521. }
  1522. }
  1523. up_read(&dev->lock);
  1524. seq_printf(s, "----------------------------------------------------\n");
  1525. seq_printf(s, "orphaned allocations (info is from last known client):"
  1526. "\n");
  1527. mutex_lock(&dev->buffer_lock);
  1528. for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
  1529. struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
  1530. node);
  1531. if (buffer->heap->id != heap->id)
  1532. continue;
  1533. total_size += buffer->size;
  1534. if (!buffer->handle_count) {
  1535. seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
  1536. buffer->pid, buffer->size, buffer->kmap_cnt,
  1537. atomic_read(&buffer->ref.refcount));
  1538. total_orphaned_size += buffer->size;
  1539. }
  1540. }
  1541. mutex_unlock(&dev->buffer_lock);
  1542. seq_printf(s, "----------------------------------------------------\n");
  1543. seq_printf(s, "%16.s %16u\n", "total orphaned",
  1544. total_orphaned_size);
  1545. seq_printf(s, "%16.s %16u\n", "total ", total_size);
  1546. seq_printf(s, "----------------------------------------------------\n");
  1547. if (heap->debug_show)
  1548. heap->debug_show(heap, s, unused);
  1549. ion_heap_print_debug(s, heap);
  1550. return 0;
  1551. }
  1552. static int ion_debug_heap_open(struct inode *inode, struct file *file)
  1553. {
  1554. return single_open(file, ion_debug_heap_show, inode->i_private);
  1555. }
  1556. static const struct file_operations debug_heap_fops = {
  1557. .open = ion_debug_heap_open,
  1558. .read = seq_read,
  1559. .llseek = seq_lseek,
  1560. .release = single_release,
  1561. };
  1562. #ifdef DEBUG_HEAP_SHRINKER
  1563. static int debug_shrink_set(void *data, u64 val)
  1564. {
  1565. struct ion_heap *heap = data;
  1566. struct shrink_control sc;
  1567. int objs;
  1568. sc.gfp_mask = -1;
  1569. sc.nr_to_scan = 0;
  1570. if (!val)
  1571. return 0;
  1572. objs = heap->shrinker.shrink(&heap->shrinker, &sc);
  1573. sc.nr_to_scan = objs;
  1574. heap->shrinker.shrink(&heap->shrinker, &sc);
  1575. return 0;
  1576. }
  1577. static int debug_shrink_get(void *data, u64 *val)
  1578. {
  1579. struct ion_heap *heap = data;
  1580. struct shrink_control sc;
  1581. int objs;
  1582. sc.gfp_mask = -1;
  1583. sc.nr_to_scan = 0;
  1584. objs = heap->shrinker.shrink(&heap->shrinker, &sc);
  1585. *val = objs;
  1586. return 0;
  1587. }
  1588. DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
  1589. debug_shrink_set, "%llu\n");
  1590. #endif
  1591. void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
  1592. {
  1593. struct dentry *debug_file;
  1594. if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
  1595. !heap->ops->unmap_dma)
  1596. pr_err("%s: can not add heap with invalid ops struct.\n",
  1597. __func__);
  1598. if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
  1599. ion_heap_init_deferred_free(heap);
  1600. heap->dev = dev;
  1601. down_write(&dev->lock);
  1602. /* use negative heap->id to reverse the priority -- when traversing
  1603. the list later attempt higher id numbers first */
  1604. plist_node_init(&heap->node, -heap->id);
  1605. plist_add(&heap->node, &dev->heaps);
  1606. debug_file = debugfs_create_file(heap->name, 0664,
  1607. dev->heaps_debug_root, heap,
  1608. &debug_heap_fops);
  1609. if (!debug_file) {
  1610. char buf[256], *path;
  1611. path = dentry_path(dev->heaps_debug_root, buf, 256);
  1612. pr_err("Failed to created heap debugfs at %s/%s\n",
  1613. path, heap->name);
  1614. }
  1615. #ifdef DEBUG_HEAP_SHRINKER
  1616. if (heap->shrinker.shrink) {
  1617. char debug_name[64];
  1618. snprintf(debug_name, 64, "%s_shrink", heap->name);
  1619. debug_file = debugfs_create_file(
  1620. debug_name, 0644, dev->heaps_debug_root, heap,
  1621. &debug_shrink_fops);
  1622. if (!debug_file) {
  1623. char buf[256], *path;
  1624. path = dentry_path(dev->heaps_debug_root, buf, 256);
  1625. pr_err("Failed to created heap shrinker debugfs at %s/%s\n",
  1626. path, debug_name);
  1627. }
  1628. }
  1629. #endif
  1630. up_write(&dev->lock);
  1631. }
  1632. int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
  1633. int version, void *data, int flags)
  1634. {
  1635. int ret = -EINVAL;
  1636. struct ion_heap *heap;
  1637. struct ion_buffer *buffer;
  1638. mutex_lock(&client->lock);
  1639. if (!ion_handle_validate(client, handle)) {
  1640. WARN(1, "%s: invalid handle passed to secure.\n", __func__);
  1641. goto out_unlock;
  1642. }
  1643. buffer = handle->buffer;
  1644. heap = buffer->heap;
  1645. if (!ion_heap_allow_handle_secure(heap->type)) {
  1646. pr_err("%s: cannot secure buffer from non secure heap\n",
  1647. __func__);
  1648. goto out_unlock;
  1649. }
  1650. BUG_ON(!buffer->heap->ops->secure_buffer);
  1651. /*
  1652. * Protect the handle via the client lock to ensure we aren't
  1653. * racing with free
  1654. */
  1655. ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
  1656. out_unlock:
  1657. mutex_unlock(&client->lock);
  1658. return ret;
  1659. }
  1660. int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
  1661. {
  1662. int ret = -EINVAL;
  1663. struct ion_heap *heap;
  1664. struct ion_buffer *buffer;
  1665. mutex_lock(&client->lock);
  1666. if (!ion_handle_validate(client, handle)) {
  1667. WARN(1, "%s: invalid handle passed to secure.\n", __func__);
  1668. goto out_unlock;
  1669. }
  1670. buffer = handle->buffer;
  1671. heap = buffer->heap;
  1672. if (!ion_heap_allow_handle_secure(heap->type)) {
  1673. pr_err("%s: cannot secure buffer from non secure heap\n",
  1674. __func__);
  1675. goto out_unlock;
  1676. }
  1677. BUG_ON(!buffer->heap->ops->unsecure_buffer);
  1678. /*
  1679. * Protect the handle via the client lock to ensure we aren't
  1680. * racing with free
  1681. */
  1682. ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
  1683. out_unlock:
  1684. mutex_unlock(&client->lock);
  1685. return ret;
  1686. }
  1687. int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
  1688. void *data)
  1689. {
  1690. int ret_val = 0;
  1691. struct ion_heap *heap;
  1692. /*
  1693. * traverse the list of heaps available in this system
  1694. * and find the heap that is specified.
  1695. */
  1696. down_write(&dev->lock);
  1697. plist_for_each_entry(heap, &dev->heaps, node) {
  1698. if (!ion_heap_allow_heap_secure(heap->type))
  1699. continue;
  1700. if (ION_HEAP(heap->id) != heap_id)
  1701. continue;
  1702. if (heap->ops->secure_heap)
  1703. ret_val = heap->ops->secure_heap(heap, version, data);
  1704. else
  1705. ret_val = -EINVAL;
  1706. break;
  1707. }
  1708. up_write(&dev->lock);
  1709. return ret_val;
  1710. }
  1711. EXPORT_SYMBOL(ion_secure_heap);
  1712. int ion_walk_heaps(struct ion_client *client, int heap_id, void *data,
  1713. int (*f)(struct ion_heap *heap, void *data))
  1714. {
  1715. int ret_val = -EINVAL;
  1716. struct ion_heap *heap;
  1717. struct ion_device *dev = client->dev;
  1718. /*
  1719. * traverse the list of heaps available in this system
  1720. * and find the heap that is specified.
  1721. */
  1722. down_write(&dev->lock);
  1723. plist_for_each_entry(heap, &dev->heaps, node) {
  1724. if (ION_HEAP(heap->id) != heap_id)
  1725. continue;
  1726. ret_val = f(heap, data);
  1727. break;
  1728. }
  1729. up_write(&dev->lock);
  1730. return ret_val;
  1731. }
  1732. EXPORT_SYMBOL(ion_walk_heaps);
  1733. int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
  1734. void *data)
  1735. {
  1736. int ret_val = 0;
  1737. struct ion_heap *heap;
  1738. /*
  1739. * traverse the list of heaps available in this system
  1740. * and find the heap that is specified.
  1741. */
  1742. down_write(&dev->lock);
  1743. plist_for_each_entry(heap, &dev->heaps, node) {
  1744. if (!ion_heap_allow_heap_secure(heap->type))
  1745. continue;
  1746. if (ION_HEAP(heap->id) != heap_id)
  1747. continue;
  1748. if (heap->ops->secure_heap)
  1749. ret_val = heap->ops->unsecure_heap(heap, version, data);
  1750. else
  1751. ret_val = -EINVAL;
  1752. break;
  1753. }
  1754. up_write(&dev->lock);
  1755. return ret_val;
  1756. }
  1757. EXPORT_SYMBOL(ion_unsecure_heap);
  1758. struct ion_device *ion_device_create(long (*custom_ioctl)
  1759. (struct ion_client *client,
  1760. unsigned int cmd,
  1761. unsigned long arg))
  1762. {
  1763. struct ion_device *idev;
  1764. int ret;
  1765. idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
  1766. if (!idev)
  1767. return ERR_PTR(-ENOMEM);
  1768. idev->dev.minor = MISC_DYNAMIC_MINOR;
  1769. idev->dev.name = "ion";
  1770. idev->dev.fops = &ion_fops;
  1771. idev->dev.parent = NULL;
  1772. ret = misc_register(&idev->dev);
  1773. if (ret) {
  1774. pr_err("ion: failed to register misc device.\n");
  1775. kfree(idev);
  1776. return ERR_PTR(ret);
  1777. }
  1778. idev->debug_root = debugfs_create_dir("ion", NULL);
  1779. if (!idev->debug_root) {
  1780. pr_err("ion: failed to create debugfs root directory.\n");
  1781. goto debugfs_done;
  1782. }
  1783. idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
  1784. if (!idev->heaps_debug_root) {
  1785. pr_err("ion: failed to create debugfs heaps directory.\n");
  1786. goto debugfs_done;
  1787. }
  1788. idev->clients_debug_root = debugfs_create_dir("clients",
  1789. idev->debug_root);
  1790. if (!idev->clients_debug_root)
  1791. pr_err("ion: failed to create debugfs clients directory.\n");
  1792. debugfs_done:
  1793. idev->custom_ioctl = custom_ioctl;
  1794. idev->buffers = RB_ROOT;
  1795. mutex_init(&idev->buffer_lock);
  1796. init_rwsem(&idev->lock);
  1797. plist_head_init(&idev->heaps);
  1798. idev->clients = RB_ROOT;
  1799. return idev;
  1800. }
  1801. void ion_device_destroy(struct ion_device *dev)
  1802. {
  1803. misc_deregister(&dev->dev);
  1804. debugfs_remove_recursive(dev->debug_root);
  1805. /* XXX need to free the heaps and clients ? */
  1806. kfree(dev);
  1807. }
  1808. void __init ion_reserve(struct ion_platform_data *data)
  1809. {
  1810. int i;
  1811. for (i = 0; i < data->nr; i++) {
  1812. if (data->heaps[i].size == 0)
  1813. continue;
  1814. if (data->heaps[i].base == 0) {
  1815. phys_addr_t paddr;
  1816. paddr = memblock_alloc_base(data->heaps[i].size,
  1817. data->heaps[i].align,
  1818. MEMBLOCK_ALLOC_ANYWHERE);
  1819. if (!paddr) {
  1820. pr_err("%s: error allocating memblock for "
  1821. "heap %d\n",
  1822. __func__, i);
  1823. continue;
  1824. }
  1825. data->heaps[i].base = paddr;
  1826. } else {
  1827. int ret = memblock_reserve(data->heaps[i].base,
  1828. data->heaps[i].size);
  1829. if (ret)
  1830. pr_err("memblock reserve of %x@%pa failed\n",
  1831. data->heaps[i].size,
  1832. &data->heaps[i].base);
  1833. }
  1834. pr_info("%s: %s reserved base %pa size %d\n", __func__,
  1835. data->heaps[i].name,
  1836. &data->heaps[i].base,
  1837. data->heaps[i].size);
  1838. }
  1839. }