drm_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <linux/types.h>
  28. #include <linux/slab.h>
  29. #include <linux/mm.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/fs.h>
  32. #include <linux/file.h>
  33. #include <linux/module.h>
  34. #include <linux/mman.h>
  35. #include <linux/pagemap.h>
  36. #include <linux/shmem_fs.h>
  37. #include <linux/dma-buf.h>
  38. #include "drmP.h"
  39. /** @file drm_gem.c
  40. *
  41. * This file provides some of the base ioctls and library routines for
  42. * the graphics memory manager implemented by each device driver.
  43. *
  44. * Because various devices have different requirements in terms of
  45. * synchronization and migration strategies, implementing that is left up to
  46. * the driver, and all that the general API provides should be generic --
  47. * allocating objects, reading/writing data with the cpu, freeing objects.
  48. * Even there, platform-dependent optimizations for reading/writing data with
  49. * the CPU mean we'll likely hook those out to driver-specific calls. However,
  50. * the DRI2 implementation wants to have at least allocate/mmap be generic.
  51. *
  52. * The goal was to have swap-backed object allocation managed through
  53. * struct file. However, file descriptors as handles to a struct file have
  54. * two major failings:
  55. * - Process limits prevent more than 1024 or so being used at a time by
  56. * default.
  57. * - Inability to allocate high fds will aggravate the X Server's select()
  58. * handling, and likely that of many GL client applications as well.
  59. *
  60. * This led to a plan of using our own integer IDs (called handles, following
  61. * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  62. * ioctls. The objects themselves will still include the struct file so
  63. * that we can transition to fds if the required kernel infrastructure shows
  64. * up at a later date, and as our interface with shmfs for memory allocation.
  65. */
  66. /*
  67. * We make up offsets for buffer objects so we can recognize them at
  68. * mmap time.
  69. */
  70. /* pgoff in mmap is an unsigned long, so we need to make sure that
  71. * the faked up offset will fit
  72. */
  73. #if BITS_PER_LONG == 64
  74. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  75. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  76. #else
  77. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  78. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  79. #endif
  80. /**
  81. * Initialize the GEM device fields
  82. */
  83. int
  84. drm_gem_init(struct drm_device *dev)
  85. {
  86. struct drm_gem_mm *mm;
  87. spin_lock_init(&dev->object_name_lock);
  88. idr_init(&dev->object_name_idr);
  89. mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
  90. if (!mm) {
  91. DRM_ERROR("out of memory\n");
  92. return -ENOMEM;
  93. }
  94. dev->mm_private = mm;
  95. if (drm_ht_create(&mm->offset_hash, 12)) {
  96. kfree(mm);
  97. return -ENOMEM;
  98. }
  99. if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
  100. DRM_FILE_PAGE_OFFSET_SIZE)) {
  101. drm_ht_remove(&mm->offset_hash);
  102. kfree(mm);
  103. return -ENOMEM;
  104. }
  105. return 0;
  106. }
  107. void
  108. drm_gem_destroy(struct drm_device *dev)
  109. {
  110. struct drm_gem_mm *mm = dev->mm_private;
  111. drm_mm_takedown(&mm->offset_manager);
  112. drm_ht_remove(&mm->offset_hash);
  113. kfree(mm);
  114. dev->mm_private = NULL;
  115. }
  116. /**
  117. * Initialize an already allocated GEM object of the specified size with
  118. * shmfs backing store.
  119. */
  120. int drm_gem_object_init(struct drm_device *dev,
  121. struct drm_gem_object *obj, size_t size)
  122. {
  123. BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  124. obj->dev = dev;
  125. obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  126. if (IS_ERR(obj->filp))
  127. return PTR_ERR(obj->filp);
  128. kref_init(&obj->refcount);
  129. atomic_set(&obj->handle_count, 0);
  130. obj->size = size;
  131. return 0;
  132. }
  133. EXPORT_SYMBOL(drm_gem_object_init);
  134. /**
  135. * Initialize an already allocated GEM object of the specified size with
  136. * no GEM provided backing store. Instead the caller is responsible for
  137. * backing the object and handling it.
  138. */
  139. int drm_gem_private_object_init(struct drm_device *dev,
  140. struct drm_gem_object *obj, size_t size)
  141. {
  142. BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  143. obj->dev = dev;
  144. obj->filp = NULL;
  145. kref_init(&obj->refcount);
  146. atomic_set(&obj->handle_count, 0);
  147. obj->size = size;
  148. return 0;
  149. }
  150. EXPORT_SYMBOL(drm_gem_private_object_init);
  151. /**
  152. * Allocate a GEM object of the specified size with shmfs backing store
  153. */
  154. struct drm_gem_object *
  155. drm_gem_object_alloc(struct drm_device *dev, size_t size)
  156. {
  157. struct drm_gem_object *obj;
  158. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  159. if (!obj)
  160. goto free;
  161. if (drm_gem_object_init(dev, obj, size) != 0)
  162. goto free;
  163. if (dev->driver->gem_init_object != NULL &&
  164. dev->driver->gem_init_object(obj) != 0) {
  165. goto fput;
  166. }
  167. return obj;
  168. fput:
  169. /* Object_init mangles the global counters - readjust them. */
  170. fput(obj->filp);
  171. free:
  172. kfree(obj);
  173. return NULL;
  174. }
  175. EXPORT_SYMBOL(drm_gem_object_alloc);
  176. /**
  177. * Removes the mapping from handle to filp for this object.
  178. */
  179. int
  180. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  181. {
  182. struct drm_device *dev;
  183. struct drm_gem_object *obj;
  184. /* This is gross. The idr system doesn't let us try a delete and
  185. * return an error code. It just spews if you fail at deleting.
  186. * So, we have to grab a lock around finding the object and then
  187. * doing the delete on it and dropping the refcount, or the user
  188. * could race us to double-decrement the refcount and cause a
  189. * use-after-free later. Given the frequency of our handle lookups,
  190. * we may want to use ida for number allocation and a hash table
  191. * for the pointers, anyway.
  192. */
  193. spin_lock(&filp->table_lock);
  194. /* Check if we currently have a reference on the object */
  195. obj = idr_find(&filp->object_idr, handle);
  196. if (obj == NULL) {
  197. spin_unlock(&filp->table_lock);
  198. return -EINVAL;
  199. }
  200. dev = obj->dev;
  201. /* Release reference and decrement refcount. */
  202. idr_remove(&filp->object_idr, handle);
  203. spin_unlock(&filp->table_lock);
  204. if (obj->import_attach)
  205. drm_prime_remove_imported_buf_handle(&filp->prime,
  206. obj->import_attach->dmabuf);
  207. if (dev->driver->gem_close_object)
  208. dev->driver->gem_close_object(obj, filp);
  209. drm_gem_object_handle_unreference_unlocked(obj);
  210. return 0;
  211. }
  212. EXPORT_SYMBOL(drm_gem_handle_delete);
  213. /**
  214. * Create a handle for this object. This adds a handle reference
  215. * to the object, which includes a regular reference count. Callers
  216. * will likely want to dereference the object afterwards.
  217. */
  218. int
  219. drm_gem_handle_create(struct drm_file *file_priv,
  220. struct drm_gem_object *obj,
  221. u32 *handlep)
  222. {
  223. struct drm_device *dev = obj->dev;
  224. int ret;
  225. /*
  226. * Get the user-visible handle using idr.
  227. */
  228. again:
  229. /* ensure there is space available to allocate a handle */
  230. if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
  231. return -ENOMEM;
  232. /* do the allocation under our spinlock */
  233. spin_lock(&file_priv->table_lock);
  234. ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
  235. spin_unlock(&file_priv->table_lock);
  236. if (ret == -EAGAIN)
  237. goto again;
  238. if (ret != 0)
  239. return ret;
  240. drm_gem_object_handle_reference(obj);
  241. if (dev->driver->gem_open_object) {
  242. ret = dev->driver->gem_open_object(obj, file_priv);
  243. if (ret) {
  244. drm_gem_handle_delete(file_priv, *handlep);
  245. return ret;
  246. }
  247. }
  248. return 0;
  249. }
  250. EXPORT_SYMBOL(drm_gem_handle_create);
  251. /**
  252. * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  253. * @obj: obj in question
  254. *
  255. * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  256. */
  257. void
  258. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  259. {
  260. struct drm_device *dev = obj->dev;
  261. struct drm_gem_mm *mm = dev->mm_private;
  262. struct drm_map_list *list = &obj->map_list;
  263. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  264. drm_mm_put_block(list->file_offset_node);
  265. kfree(list->map);
  266. list->map = NULL;
  267. }
  268. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  269. /**
  270. * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  271. * @obj: obj in question
  272. *
  273. * GEM memory mapping works by handing back to userspace a fake mmap offset
  274. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  275. * up the object based on the offset and sets up the various memory mapping
  276. * structures.
  277. *
  278. * This routine allocates and attaches a fake offset for @obj.
  279. */
  280. int
  281. drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  282. {
  283. struct drm_device *dev = obj->dev;
  284. struct drm_gem_mm *mm = dev->mm_private;
  285. struct drm_map_list *list;
  286. struct drm_local_map *map;
  287. int ret = 0;
  288. /* Set the object up for mmap'ing */
  289. list = &obj->map_list;
  290. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  291. if (!list->map)
  292. return -ENOMEM;
  293. map = list->map;
  294. map->type = _DRM_GEM;
  295. map->size = obj->size;
  296. map->handle = obj;
  297. /* Get a DRM GEM mmap offset allocated... */
  298. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  299. obj->size / PAGE_SIZE, 0, 0);
  300. if (!list->file_offset_node) {
  301. DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  302. ret = -ENOSPC;
  303. goto out_free_list;
  304. }
  305. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  306. obj->size / PAGE_SIZE, 0);
  307. if (!list->file_offset_node) {
  308. ret = -ENOMEM;
  309. goto out_free_list;
  310. }
  311. list->hash.key = list->file_offset_node->start;
  312. ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
  313. if (ret) {
  314. DRM_ERROR("failed to add to map hash\n");
  315. goto out_free_mm;
  316. }
  317. return 0;
  318. out_free_mm:
  319. drm_mm_put_block(list->file_offset_node);
  320. out_free_list:
  321. kfree(list->map);
  322. list->map = NULL;
  323. return ret;
  324. }
  325. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  326. /** Returns a reference to the object named by the handle. */
  327. struct drm_gem_object *
  328. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  329. u32 handle)
  330. {
  331. struct drm_gem_object *obj;
  332. spin_lock(&filp->table_lock);
  333. /* Check if we currently have a reference on the object */
  334. obj = idr_find(&filp->object_idr, handle);
  335. if (obj == NULL) {
  336. spin_unlock(&filp->table_lock);
  337. return NULL;
  338. }
  339. drm_gem_object_reference(obj);
  340. spin_unlock(&filp->table_lock);
  341. return obj;
  342. }
  343. EXPORT_SYMBOL(drm_gem_object_lookup);
  344. /**
  345. * Releases the handle to an mm object.
  346. */
  347. int
  348. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  349. struct drm_file *file_priv)
  350. {
  351. struct drm_gem_close *args = data;
  352. int ret;
  353. if (!(dev->driver->driver_features & DRIVER_GEM))
  354. return -ENODEV;
  355. ret = drm_gem_handle_delete(file_priv, args->handle);
  356. return ret;
  357. }
  358. /**
  359. * Create a global name for an object, returning the name.
  360. *
  361. * Note that the name does not hold a reference; when the object
  362. * is freed, the name goes away.
  363. */
  364. int
  365. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  366. struct drm_file *file_priv)
  367. {
  368. struct drm_gem_flink *args = data;
  369. struct drm_gem_object *obj;
  370. int ret;
  371. if (!(dev->driver->driver_features & DRIVER_GEM))
  372. return -ENODEV;
  373. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  374. if (obj == NULL)
  375. return -ENOENT;
  376. again:
  377. if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
  378. ret = -ENOMEM;
  379. goto err;
  380. }
  381. spin_lock(&dev->object_name_lock);
  382. if (!obj->name) {
  383. ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
  384. &obj->name);
  385. args->name = (uint64_t) obj->name;
  386. spin_unlock(&dev->object_name_lock);
  387. if (ret == -EAGAIN)
  388. goto again;
  389. if (ret != 0)
  390. goto err;
  391. /* Allocate a reference for the name table. */
  392. drm_gem_object_reference(obj);
  393. } else {
  394. args->name = (uint64_t) obj->name;
  395. spin_unlock(&dev->object_name_lock);
  396. ret = 0;
  397. }
  398. err:
  399. drm_gem_object_unreference_unlocked(obj);
  400. return ret;
  401. }
  402. /**
  403. * Open an object using the global name, returning a handle and the size.
  404. *
  405. * This handle (of course) holds a reference to the object, so the object
  406. * will not go away until the handle is deleted.
  407. */
  408. int
  409. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  410. struct drm_file *file_priv)
  411. {
  412. struct drm_gem_open *args = data;
  413. struct drm_gem_object *obj;
  414. int ret;
  415. u32 handle;
  416. if (!(dev->driver->driver_features & DRIVER_GEM))
  417. return -ENODEV;
  418. spin_lock(&dev->object_name_lock);
  419. obj = idr_find(&dev->object_name_idr, (int) args->name);
  420. if (obj)
  421. drm_gem_object_reference(obj);
  422. spin_unlock(&dev->object_name_lock);
  423. if (!obj)
  424. return -ENOENT;
  425. ret = drm_gem_handle_create(file_priv, obj, &handle);
  426. drm_gem_object_unreference_unlocked(obj);
  427. if (ret)
  428. return ret;
  429. args->handle = handle;
  430. args->size = obj->size;
  431. return 0;
  432. }
  433. /**
  434. * Called at device open time, sets up the structure for handling refcounting
  435. * of mm objects.
  436. */
  437. void
  438. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  439. {
  440. idr_init(&file_private->object_idr);
  441. spin_lock_init(&file_private->table_lock);
  442. }
  443. /**
  444. * Called at device close to release the file's
  445. * handle references on objects.
  446. */
  447. static int
  448. drm_gem_object_release_handle(int id, void *ptr, void *data)
  449. {
  450. struct drm_file *file_priv = data;
  451. struct drm_gem_object *obj = ptr;
  452. struct drm_device *dev = obj->dev;
  453. if (obj->import_attach)
  454. drm_prime_remove_imported_buf_handle(&file_priv->prime,
  455. obj->import_attach->dmabuf);
  456. if (dev->driver->gem_close_object)
  457. dev->driver->gem_close_object(obj, file_priv);
  458. drm_gem_object_handle_unreference_unlocked(obj);
  459. return 0;
  460. }
  461. /**
  462. * Called at close time when the filp is going away.
  463. *
  464. * Releases any remaining references on objects by this filp.
  465. */
  466. void
  467. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  468. {
  469. idr_for_each(&file_private->object_idr,
  470. &drm_gem_object_release_handle, file_private);
  471. idr_remove_all(&file_private->object_idr);
  472. idr_destroy(&file_private->object_idr);
  473. }
  474. void
  475. drm_gem_object_release(struct drm_gem_object *obj)
  476. {
  477. if (obj->filp)
  478. fput(obj->filp);
  479. }
  480. EXPORT_SYMBOL(drm_gem_object_release);
  481. /**
  482. * Called after the last reference to the object has been lost.
  483. * Must be called holding struct_ mutex
  484. *
  485. * Frees the object
  486. */
  487. void
  488. drm_gem_object_free(struct kref *kref)
  489. {
  490. struct drm_gem_object *obj = (struct drm_gem_object *) kref;
  491. struct drm_device *dev = obj->dev;
  492. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  493. if (dev->driver->gem_free_object != NULL)
  494. dev->driver->gem_free_object(obj);
  495. }
  496. EXPORT_SYMBOL(drm_gem_object_free);
  497. static void drm_gem_object_ref_bug(struct kref *list_kref)
  498. {
  499. BUG();
  500. }
  501. /**
  502. * Called after the last handle to the object has been closed
  503. *
  504. * Removes any name for the object. Note that this must be
  505. * called before drm_gem_object_free or we'll be touching
  506. * freed memory
  507. */
  508. void drm_gem_object_handle_free(struct drm_gem_object *obj)
  509. {
  510. struct drm_device *dev = obj->dev;
  511. /* Remove any name for this object */
  512. spin_lock(&dev->object_name_lock);
  513. if (obj->name) {
  514. idr_remove(&dev->object_name_idr, obj->name);
  515. obj->name = 0;
  516. spin_unlock(&dev->object_name_lock);
  517. /*
  518. * The object name held a reference to this object, drop
  519. * that now.
  520. *
  521. * This cannot be the last reference, since the handle holds one too.
  522. */
  523. kref_put(&obj->refcount, drm_gem_object_ref_bug);
  524. } else
  525. spin_unlock(&dev->object_name_lock);
  526. }
  527. EXPORT_SYMBOL(drm_gem_object_handle_free);
  528. void drm_gem_vm_open(struct vm_area_struct *vma)
  529. {
  530. struct drm_gem_object *obj = vma->vm_private_data;
  531. drm_gem_object_reference(obj);
  532. mutex_lock(&obj->dev->struct_mutex);
  533. drm_vm_open_locked(vma);
  534. mutex_unlock(&obj->dev->struct_mutex);
  535. }
  536. EXPORT_SYMBOL(drm_gem_vm_open);
  537. void drm_gem_vm_close(struct vm_area_struct *vma)
  538. {
  539. struct drm_gem_object *obj = vma->vm_private_data;
  540. struct drm_device *dev = obj->dev;
  541. mutex_lock(&dev->struct_mutex);
  542. drm_vm_close_locked(vma);
  543. drm_gem_object_unreference(obj);
  544. mutex_unlock(&dev->struct_mutex);
  545. }
  546. EXPORT_SYMBOL(drm_gem_vm_close);
  547. /**
  548. * drm_gem_mmap - memory map routine for GEM objects
  549. * @filp: DRM file pointer
  550. * @vma: VMA for the area to be mapped
  551. *
  552. * If a driver supports GEM object mapping, mmap calls on the DRM file
  553. * descriptor will end up here.
  554. *
  555. * If we find the object based on the offset passed in (vma->vm_pgoff will
  556. * contain the fake offset we created when the GTT map ioctl was called on
  557. * the object), we set up the driver fault handler so that any accesses
  558. * to the object can be trapped, to perform migration, GTT binding, surface
  559. * register allocation, or performance monitoring.
  560. */
  561. int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  562. {
  563. struct drm_file *priv = filp->private_data;
  564. struct drm_device *dev = priv->minor->dev;
  565. struct drm_gem_mm *mm = dev->mm_private;
  566. struct drm_local_map *map = NULL;
  567. struct drm_gem_object *obj;
  568. struct drm_hash_item *hash;
  569. int ret = 0;
  570. if (drm_device_is_unplugged(dev))
  571. return -ENODEV;
  572. mutex_lock(&dev->struct_mutex);
  573. if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
  574. mutex_unlock(&dev->struct_mutex);
  575. return drm_mmap(filp, vma);
  576. }
  577. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  578. if (!map ||
  579. ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
  580. ret = -EPERM;
  581. goto out_unlock;
  582. }
  583. /* Check for valid size. */
  584. if (map->size < vma->vm_end - vma->vm_start) {
  585. ret = -EINVAL;
  586. goto out_unlock;
  587. }
  588. obj = map->handle;
  589. if (!obj->dev->driver->gem_vm_ops) {
  590. ret = -EINVAL;
  591. goto out_unlock;
  592. }
  593. vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
  594. vma->vm_ops = obj->dev->driver->gem_vm_ops;
  595. vma->vm_private_data = map->handle;
  596. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  597. /* Take a ref for this mapping of the object, so that the fault
  598. * handler can dereference the mmap offset's pointer to the object.
  599. * This reference is cleaned up by the corresponding vm_close
  600. * (which should happen whether the vma was created by this call, or
  601. * by a vm_open due to mremap or partial unmap or whatever).
  602. */
  603. drm_gem_object_reference(obj);
  604. drm_vm_open_locked(vma);
  605. out_unlock:
  606. mutex_unlock(&dev->struct_mutex);
  607. return ret;
  608. }
  609. EXPORT_SYMBOL(drm_gem_mmap);