vgem_drv.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /*
  2. * Copyright 2011 Red Hat, Inc.
  3. * Copyright © 2014 The Chromium OS Authors
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software")
  7. * to deal in the software without restriction, including without limitation
  8. * on the rights to use, copy, modify, merge, publish, distribute, sub
  9. * license, and/or sell copies of the Software, and to permit persons to whom
  10. * them Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
  20. * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
  21. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Adam Jackson <ajax@redhat.com>
  25. * Ben Widawsky <ben@bwidawsk.net>
  26. */
  27. /**
  28. * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
  29. * software renderer and the X server for efficient buffer sharing.
  30. */
  31. #include <linux/module.h>
  32. #include <linux/ramfs.h>
  33. #include <linux/shmem_fs.h>
  34. #include <linux/dma-buf.h>
  35. #include "vgem_drv.h"
  36. #define DRIVER_NAME "vgem"
  37. #define DRIVER_DESC "Virtual GEM provider"
  38. #define DRIVER_DATE "20120112"
  39. #define DRIVER_MAJOR 1
  40. #define DRIVER_MINOR 0
  41. static void vgem_gem_free_object(struct drm_gem_object *obj)
  42. {
  43. struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
  44. drm_gem_object_release(obj);
  45. kfree(vgem_obj);
  46. }
  47. static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  48. {
  49. struct drm_vgem_gem_object *obj = vma->vm_private_data;
  50. /* We don't use vmf->pgoff since that has the fake offset */
  51. unsigned long vaddr = (unsigned long)vmf->virtual_address;
  52. struct page *page;
  53. page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
  54. (vaddr - vma->vm_start) >> PAGE_SHIFT);
  55. if (!IS_ERR(page)) {
  56. vmf->page = page;
  57. return 0;
  58. } else switch (PTR_ERR(page)) {
  59. case -ENOSPC:
  60. case -ENOMEM:
  61. return VM_FAULT_OOM;
  62. case -EBUSY:
  63. return VM_FAULT_RETRY;
  64. case -EFAULT:
  65. case -EINVAL:
  66. return VM_FAULT_SIGBUS;
  67. default:
  68. WARN_ON_ONCE(PTR_ERR(page));
  69. return VM_FAULT_SIGBUS;
  70. }
  71. }
  72. static const struct vm_operations_struct vgem_gem_vm_ops = {
  73. .fault = vgem_gem_fault,
  74. .open = drm_gem_vm_open,
  75. .close = drm_gem_vm_close,
  76. };
  77. static int vgem_open(struct drm_device *dev, struct drm_file *file)
  78. {
  79. struct vgem_file *vfile;
  80. int ret;
  81. vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
  82. if (!vfile)
  83. return -ENOMEM;
  84. file->driver_priv = vfile;
  85. ret = vgem_fence_open(vfile);
  86. if (ret) {
  87. kfree(vfile);
  88. return ret;
  89. }
  90. return 0;
  91. }
  92. static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
  93. {
  94. struct vgem_file *vfile = file->driver_priv;
  95. vgem_fence_close(vfile);
  96. kfree(vfile);
  97. }
  98. /* ioctls */
  99. static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
  100. struct drm_file *file,
  101. unsigned int *handle,
  102. unsigned long size)
  103. {
  104. struct drm_vgem_gem_object *obj;
  105. int ret;
  106. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  107. if (!obj)
  108. return ERR_PTR(-ENOMEM);
  109. ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
  110. if (ret)
  111. goto err_free;
  112. ret = drm_gem_handle_create(file, &obj->base, handle);
  113. drm_gem_object_unreference_unlocked(&obj->base);
  114. if (ret)
  115. goto err;
  116. return &obj->base;
  117. err_free:
  118. kfree(obj);
  119. err:
  120. return ERR_PTR(ret);
  121. }
  122. static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  123. struct drm_mode_create_dumb *args)
  124. {
  125. struct drm_gem_object *gem_object;
  126. u64 pitch, size;
  127. pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
  128. size = args->height * pitch;
  129. if (size == 0)
  130. return -EINVAL;
  131. gem_object = vgem_gem_create(dev, file, &args->handle, size);
  132. if (IS_ERR(gem_object))
  133. return PTR_ERR(gem_object);
  134. args->size = gem_object->size;
  135. args->pitch = pitch;
  136. DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
  137. return 0;
  138. }
  139. static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
  140. uint32_t handle, uint64_t *offset)
  141. {
  142. struct drm_gem_object *obj;
  143. int ret;
  144. obj = drm_gem_object_lookup(file, handle);
  145. if (!obj)
  146. return -ENOENT;
  147. if (!obj->filp) {
  148. ret = -EINVAL;
  149. goto unref;
  150. }
  151. ret = drm_gem_create_mmap_offset(obj);
  152. if (ret)
  153. goto unref;
  154. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  155. unref:
  156. drm_gem_object_unreference_unlocked(obj);
  157. return ret;
  158. }
  159. static struct drm_ioctl_desc vgem_ioctls[] = {
  160. DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  161. DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  162. };
  163. static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
  164. {
  165. unsigned long flags = vma->vm_flags;
  166. int ret;
  167. ret = drm_gem_mmap(filp, vma);
  168. if (ret)
  169. return ret;
  170. /* Keep the WC mmaping set by drm_gem_mmap() but our pages
  171. * are ordinary and not special.
  172. */
  173. vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
  174. return 0;
  175. }
  176. static const struct file_operations vgem_driver_fops = {
  177. .owner = THIS_MODULE,
  178. .open = drm_open,
  179. .mmap = vgem_mmap,
  180. .poll = drm_poll,
  181. .read = drm_read,
  182. .unlocked_ioctl = drm_ioctl,
  183. .release = drm_release,
  184. };
  185. static int vgem_prime_pin(struct drm_gem_object *obj)
  186. {
  187. long n_pages = obj->size >> PAGE_SHIFT;
  188. struct page **pages;
  189. /* Flush the object from the CPU cache so that importers can rely
  190. * on coherent indirect access via the exported dma-address.
  191. */
  192. pages = drm_gem_get_pages(obj);
  193. if (IS_ERR(pages))
  194. return PTR_ERR(pages);
  195. drm_clflush_pages(pages, n_pages);
  196. drm_gem_put_pages(obj, pages, true, false);
  197. return 0;
  198. }
  199. static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
  200. {
  201. struct sg_table *st;
  202. struct page **pages;
  203. pages = drm_gem_get_pages(obj);
  204. if (IS_ERR(pages))
  205. return ERR_CAST(pages);
  206. st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
  207. drm_gem_put_pages(obj, pages, false, false);
  208. return st;
  209. }
  210. static void *vgem_prime_vmap(struct drm_gem_object *obj)
  211. {
  212. long n_pages = obj->size >> PAGE_SHIFT;
  213. struct page **pages;
  214. void *addr;
  215. pages = drm_gem_get_pages(obj);
  216. if (IS_ERR(pages))
  217. return NULL;
  218. addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
  219. drm_gem_put_pages(obj, pages, false, false);
  220. return addr;
  221. }
  222. static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  223. {
  224. vunmap(vaddr);
  225. }
  226. static int vgem_prime_mmap(struct drm_gem_object *obj,
  227. struct vm_area_struct *vma)
  228. {
  229. int ret;
  230. if (obj->size < vma->vm_end - vma->vm_start)
  231. return -EINVAL;
  232. if (!obj->filp)
  233. return -ENODEV;
  234. ret = obj->filp->f_op->mmap(obj->filp, vma);
  235. if (ret)
  236. return ret;
  237. fput(vma->vm_file);
  238. vma->vm_file = get_file(obj->filp);
  239. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  240. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  241. return 0;
  242. }
  243. static struct drm_driver vgem_driver = {
  244. .driver_features = DRIVER_GEM | DRIVER_PRIME,
  245. .open = vgem_open,
  246. .preclose = vgem_preclose,
  247. .gem_free_object_unlocked = vgem_gem_free_object,
  248. .gem_vm_ops = &vgem_gem_vm_ops,
  249. .ioctls = vgem_ioctls,
  250. .num_ioctls = ARRAY_SIZE(vgem_ioctls),
  251. .fops = &vgem_driver_fops,
  252. .dumb_create = vgem_gem_dumb_create,
  253. .dumb_map_offset = vgem_gem_dumb_map,
  254. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  255. .gem_prime_pin = vgem_prime_pin,
  256. .gem_prime_export = drm_gem_prime_export,
  257. .gem_prime_get_sg_table = vgem_prime_get_sg_table,
  258. .gem_prime_vmap = vgem_prime_vmap,
  259. .gem_prime_vunmap = vgem_prime_vunmap,
  260. .gem_prime_mmap = vgem_prime_mmap,
  261. .name = DRIVER_NAME,
  262. .desc = DRIVER_DESC,
  263. .date = DRIVER_DATE,
  264. .major = DRIVER_MAJOR,
  265. .minor = DRIVER_MINOR,
  266. };
  267. static struct drm_device *vgem_device;
  268. static int __init vgem_init(void)
  269. {
  270. int ret;
  271. vgem_device = drm_dev_alloc(&vgem_driver, NULL);
  272. if (IS_ERR(vgem_device)) {
  273. ret = PTR_ERR(vgem_device);
  274. goto out;
  275. }
  276. ret = drm_dev_register(vgem_device, 0);
  277. if (ret)
  278. goto out_unref;
  279. return 0;
  280. out_unref:
  281. drm_dev_unref(vgem_device);
  282. out:
  283. return ret;
  284. }
  285. static void __exit vgem_exit(void)
  286. {
  287. drm_dev_unregister(vgem_device);
  288. drm_dev_unref(vgem_device);
  289. }
  290. module_init(vgem_init);
  291. module_exit(vgem_exit);
  292. MODULE_AUTHOR("Red Hat, Inc.");
  293. MODULE_AUTHOR("Intel Corporation");
  294. MODULE_DESCRIPTION(DRIVER_DESC);
  295. MODULE_LICENSE("GPL and additional rights");