armada_gem.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * Copyright (C) 2012 Russell King
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dma-buf.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/shmem_fs.h>
  11. #include <drm/drmP.h>
  12. #include "armada_drm.h"
  13. #include "armada_gem.h"
  14. #include <drm/armada_drm.h>
  15. #include "armada_ioctlP.h"
  16. static int armada_gem_vm_fault(struct vm_fault *vmf)
  17. {
  18. struct drm_gem_object *gobj = vmf->vma->vm_private_data;
  19. struct armada_gem_object *obj = drm_to_armada_gem(gobj);
  20. unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
  21. int ret;
  22. pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
  23. ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
  24. switch (ret) {
  25. case 0:
  26. case -EBUSY:
  27. return VM_FAULT_NOPAGE;
  28. case -ENOMEM:
  29. return VM_FAULT_OOM;
  30. default:
  31. return VM_FAULT_SIGBUS;
  32. }
  33. }
  34. const struct vm_operations_struct armada_gem_vm_ops = {
  35. .fault = armada_gem_vm_fault,
  36. .open = drm_gem_vm_open,
  37. .close = drm_gem_vm_close,
  38. };
  39. static size_t roundup_gem_size(size_t size)
  40. {
  41. return roundup(size, PAGE_SIZE);
  42. }
  43. void armada_gem_free_object(struct drm_gem_object *obj)
  44. {
  45. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  46. struct armada_private *priv = obj->dev->dev_private;
  47. DRM_DEBUG_DRIVER("release obj %p\n", dobj);
  48. drm_gem_free_mmap_offset(&dobj->obj);
  49. might_lock(&priv->linear_lock);
  50. if (dobj->page) {
  51. /* page backed memory */
  52. unsigned int order = get_order(dobj->obj.size);
  53. __free_pages(dobj->page, order);
  54. } else if (dobj->linear) {
  55. /* linear backed memory */
  56. mutex_lock(&priv->linear_lock);
  57. drm_mm_remove_node(dobj->linear);
  58. mutex_unlock(&priv->linear_lock);
  59. kfree(dobj->linear);
  60. if (dobj->addr)
  61. iounmap(dobj->addr);
  62. }
  63. if (dobj->obj.import_attach) {
  64. /* We only ever display imported data */
  65. if (dobj->sgt)
  66. dma_buf_unmap_attachment(dobj->obj.import_attach,
  67. dobj->sgt, DMA_TO_DEVICE);
  68. drm_prime_gem_destroy(&dobj->obj, NULL);
  69. }
  70. drm_gem_object_release(&dobj->obj);
  71. kfree(dobj);
  72. }
  73. int
  74. armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
  75. {
  76. struct armada_private *priv = dev->dev_private;
  77. size_t size = obj->obj.size;
  78. if (obj->page || obj->linear)
  79. return 0;
  80. /*
  81. * If it is a small allocation (typically cursor, which will
  82. * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
  83. * Framebuffers will never be this small (our minimum size for
  84. * framebuffers is larger than this anyway.) Such objects are
  85. * only accessed by the CPU so we don't need any special handing
  86. * here.
  87. */
  88. if (size <= 8192) {
  89. unsigned int order = get_order(size);
  90. struct page *p = alloc_pages(GFP_KERNEL, order);
  91. if (p) {
  92. obj->addr = page_address(p);
  93. obj->phys_addr = page_to_phys(p);
  94. obj->page = p;
  95. memset(obj->addr, 0, PAGE_ALIGN(size));
  96. }
  97. }
  98. /*
  99. * We could grab something from CMA if it's enabled, but that
  100. * involves building in a problem:
  101. *
  102. * CMA's interface uses dma_alloc_coherent(), which provides us
  103. * with an CPU virtual address and a device address.
  104. *
  105. * The CPU virtual address may be either an address in the kernel
  106. * direct mapped region (for example, as it would be on x86) or
  107. * it may be remapped into another part of kernel memory space
  108. * (eg, as it would be on ARM.) This means virt_to_phys() on the
  109. * returned virtual address is invalid depending on the architecture
  110. * implementation.
  111. *
  112. * The device address may also not be a physical address; it may
  113. * be that there is some kind of remapping between the device and
  114. * system RAM, which makes the use of the device address also
  115. * unsafe to re-use as a physical address.
  116. *
  117. * This makes DRM usage of dma_alloc_coherent() in a generic way
  118. * at best very questionable and unsafe.
  119. */
  120. /* Otherwise, grab it from our linear allocation */
  121. if (!obj->page) {
  122. struct drm_mm_node *node;
  123. unsigned align = min_t(unsigned, size, SZ_2M);
  124. void __iomem *ptr;
  125. int ret;
  126. node = kzalloc(sizeof(*node), GFP_KERNEL);
  127. if (!node)
  128. return -ENOSPC;
  129. mutex_lock(&priv->linear_lock);
  130. ret = drm_mm_insert_node_generic(&priv->linear, node,
  131. size, align, 0, 0);
  132. mutex_unlock(&priv->linear_lock);
  133. if (ret) {
  134. kfree(node);
  135. return ret;
  136. }
  137. obj->linear = node;
  138. /* Ensure that the memory we're returning is cleared. */
  139. ptr = ioremap_wc(obj->linear->start, size);
  140. if (!ptr) {
  141. mutex_lock(&priv->linear_lock);
  142. drm_mm_remove_node(obj->linear);
  143. mutex_unlock(&priv->linear_lock);
  144. kfree(obj->linear);
  145. obj->linear = NULL;
  146. return -ENOMEM;
  147. }
  148. memset_io(ptr, 0, size);
  149. iounmap(ptr);
  150. obj->phys_addr = obj->linear->start;
  151. obj->dev_addr = obj->linear->start;
  152. obj->mapped = true;
  153. }
  154. DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
  155. (unsigned long long)obj->phys_addr,
  156. (unsigned long long)obj->dev_addr);
  157. return 0;
  158. }
  159. void *
  160. armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
  161. {
  162. /* only linear objects need to be ioremap'd */
  163. if (!dobj->addr && dobj->linear)
  164. dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
  165. return dobj->addr;
  166. }
  167. struct armada_gem_object *
  168. armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
  169. {
  170. struct armada_gem_object *obj;
  171. size = roundup_gem_size(size);
  172. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  173. if (!obj)
  174. return NULL;
  175. drm_gem_private_object_init(dev, &obj->obj, size);
  176. DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
  177. return obj;
  178. }
  179. static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
  180. size_t size)
  181. {
  182. struct armada_gem_object *obj;
  183. struct address_space *mapping;
  184. size = roundup_gem_size(size);
  185. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  186. if (!obj)
  187. return NULL;
  188. if (drm_gem_object_init(dev, &obj->obj, size)) {
  189. kfree(obj);
  190. return NULL;
  191. }
  192. mapping = obj->obj.filp->f_mapping;
  193. mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
  194. DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
  195. return obj;
  196. }
  197. /* Dumb alloc support */
  198. int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  199. struct drm_mode_create_dumb *args)
  200. {
  201. struct armada_gem_object *dobj;
  202. u32 handle;
  203. size_t size;
  204. int ret;
  205. args->pitch = armada_pitch(args->width, args->bpp);
  206. args->size = size = args->pitch * args->height;
  207. dobj = armada_gem_alloc_private_object(dev, size);
  208. if (dobj == NULL)
  209. return -ENOMEM;
  210. ret = armada_gem_linear_back(dev, dobj);
  211. if (ret)
  212. goto err;
  213. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  214. if (ret)
  215. goto err;
  216. args->handle = handle;
  217. /* drop reference from allocate - handle holds it now */
  218. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  219. err:
  220. drm_gem_object_unreference_unlocked(&dobj->obj);
  221. return ret;
  222. }
  223. int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  224. uint32_t handle, uint64_t *offset)
  225. {
  226. struct armada_gem_object *obj;
  227. int ret = 0;
  228. obj = armada_gem_object_lookup(file, handle);
  229. if (!obj) {
  230. DRM_ERROR("failed to lookup gem object\n");
  231. return -EINVAL;
  232. }
  233. /* Don't allow imported objects to be mapped */
  234. if (obj->obj.import_attach) {
  235. ret = -EINVAL;
  236. goto err_unref;
  237. }
  238. ret = drm_gem_create_mmap_offset(&obj->obj);
  239. if (ret == 0) {
  240. *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
  241. DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
  242. }
  243. err_unref:
  244. drm_gem_object_unreference_unlocked(&obj->obj);
  245. return ret;
  246. }
  247. int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
  248. uint32_t handle)
  249. {
  250. return drm_gem_handle_delete(file, handle);
  251. }
  252. /* Private driver gem ioctls */
  253. int armada_gem_create_ioctl(struct drm_device *dev, void *data,
  254. struct drm_file *file)
  255. {
  256. struct drm_armada_gem_create *args = data;
  257. struct armada_gem_object *dobj;
  258. size_t size;
  259. u32 handle;
  260. int ret;
  261. if (args->size == 0)
  262. return -ENOMEM;
  263. size = args->size;
  264. dobj = armada_gem_alloc_object(dev, size);
  265. if (dobj == NULL)
  266. return -ENOMEM;
  267. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  268. if (ret)
  269. goto err;
  270. args->handle = handle;
  271. /* drop reference from allocate - handle holds it now */
  272. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  273. err:
  274. drm_gem_object_unreference_unlocked(&dobj->obj);
  275. return ret;
  276. }
  277. /* Map a shmem-backed object into process memory space */
  278. int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
  279. struct drm_file *file)
  280. {
  281. struct drm_armada_gem_mmap *args = data;
  282. struct armada_gem_object *dobj;
  283. unsigned long addr;
  284. dobj = armada_gem_object_lookup(file, args->handle);
  285. if (dobj == NULL)
  286. return -ENOENT;
  287. if (!dobj->obj.filp) {
  288. drm_gem_object_unreference_unlocked(&dobj->obj);
  289. return -EINVAL;
  290. }
  291. addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
  292. MAP_SHARED, args->offset);
  293. drm_gem_object_unreference_unlocked(&dobj->obj);
  294. if (IS_ERR_VALUE(addr))
  295. return addr;
  296. args->addr = addr;
  297. return 0;
  298. }
  299. int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  300. struct drm_file *file)
  301. {
  302. struct drm_armada_gem_pwrite *args = data;
  303. struct armada_gem_object *dobj;
  304. char __user *ptr;
  305. int ret;
  306. DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
  307. args->handle, args->offset, args->size, args->ptr);
  308. if (args->size == 0)
  309. return 0;
  310. ptr = (char __user *)(uintptr_t)args->ptr;
  311. if (!access_ok(VERIFY_READ, ptr, args->size))
  312. return -EFAULT;
  313. ret = fault_in_pages_readable(ptr, args->size);
  314. if (ret)
  315. return ret;
  316. dobj = armada_gem_object_lookup(file, args->handle);
  317. if (dobj == NULL)
  318. return -ENOENT;
  319. /* Must be a kernel-mapped object */
  320. if (!dobj->addr)
  321. return -EINVAL;
  322. if (args->offset > dobj->obj.size ||
  323. args->size > dobj->obj.size - args->offset) {
  324. DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
  325. ret = -EINVAL;
  326. goto unref;
  327. }
  328. if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
  329. ret = -EFAULT;
  330. } else if (dobj->update) {
  331. dobj->update(dobj->update_data);
  332. ret = 0;
  333. }
  334. unref:
  335. drm_gem_object_unreference_unlocked(&dobj->obj);
  336. return ret;
  337. }
  338. /* Prime support */
  339. static struct sg_table *
  340. armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  341. enum dma_data_direction dir)
  342. {
  343. struct drm_gem_object *obj = attach->dmabuf->priv;
  344. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  345. struct scatterlist *sg;
  346. struct sg_table *sgt;
  347. int i, num;
  348. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  349. if (!sgt)
  350. return NULL;
  351. if (dobj->obj.filp) {
  352. struct address_space *mapping;
  353. int count;
  354. count = dobj->obj.size / PAGE_SIZE;
  355. if (sg_alloc_table(sgt, count, GFP_KERNEL))
  356. goto free_sgt;
  357. mapping = dobj->obj.filp->f_mapping;
  358. for_each_sg(sgt->sgl, sg, count, i) {
  359. struct page *page;
  360. page = shmem_read_mapping_page(mapping, i);
  361. if (IS_ERR(page)) {
  362. num = i;
  363. goto release;
  364. }
  365. sg_set_page(sg, page, PAGE_SIZE, 0);
  366. }
  367. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
  368. num = sgt->nents;
  369. goto release;
  370. }
  371. } else if (dobj->page) {
  372. /* Single contiguous page */
  373. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  374. goto free_sgt;
  375. sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
  376. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
  377. goto free_table;
  378. } else if (dobj->linear) {
  379. /* Single contiguous physical region - no struct page */
  380. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  381. goto free_sgt;
  382. sg_dma_address(sgt->sgl) = dobj->dev_addr;
  383. sg_dma_len(sgt->sgl) = dobj->obj.size;
  384. } else {
  385. goto free_sgt;
  386. }
  387. return sgt;
  388. release:
  389. for_each_sg(sgt->sgl, sg, num, i)
  390. put_page(sg_page(sg));
  391. free_table:
  392. sg_free_table(sgt);
  393. free_sgt:
  394. kfree(sgt);
  395. return NULL;
  396. }
  397. static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  398. struct sg_table *sgt, enum dma_data_direction dir)
  399. {
  400. struct drm_gem_object *obj = attach->dmabuf->priv;
  401. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  402. int i;
  403. if (!dobj->linear)
  404. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
  405. if (dobj->obj.filp) {
  406. struct scatterlist *sg;
  407. for_each_sg(sgt->sgl, sg, sgt->nents, i)
  408. put_page(sg_page(sg));
  409. }
  410. sg_free_table(sgt);
  411. kfree(sgt);
  412. }
  413. static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
  414. {
  415. return NULL;
  416. }
  417. static void
  418. armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
  419. {
  420. }
  421. static int
  422. armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  423. {
  424. return -EINVAL;
  425. }
  426. static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
  427. .map_dma_buf = armada_gem_prime_map_dma_buf,
  428. .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
  429. .release = drm_gem_dmabuf_release,
  430. .map_atomic = armada_gem_dmabuf_no_kmap,
  431. .unmap_atomic = armada_gem_dmabuf_no_kunmap,
  432. .map = armada_gem_dmabuf_no_kmap,
  433. .unmap = armada_gem_dmabuf_no_kunmap,
  434. .mmap = armada_gem_dmabuf_mmap,
  435. };
  436. struct dma_buf *
  437. armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
  438. int flags)
  439. {
  440. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  441. exp_info.ops = &armada_gem_prime_dmabuf_ops;
  442. exp_info.size = obj->size;
  443. exp_info.flags = O_RDWR;
  444. exp_info.priv = obj;
  445. return drm_gem_dmabuf_export(dev, &exp_info);
  446. }
  447. struct drm_gem_object *
  448. armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
  449. {
  450. struct dma_buf_attachment *attach;
  451. struct armada_gem_object *dobj;
  452. if (buf->ops == &armada_gem_prime_dmabuf_ops) {
  453. struct drm_gem_object *obj = buf->priv;
  454. if (obj->dev == dev) {
  455. /*
  456. * Importing our own dmabuf(s) increases the
  457. * refcount on the gem object itself.
  458. */
  459. drm_gem_object_reference(obj);
  460. return obj;
  461. }
  462. }
  463. attach = dma_buf_attach(buf, dev->dev);
  464. if (IS_ERR(attach))
  465. return ERR_CAST(attach);
  466. dobj = armada_gem_alloc_private_object(dev, buf->size);
  467. if (!dobj) {
  468. dma_buf_detach(buf, attach);
  469. return ERR_PTR(-ENOMEM);
  470. }
  471. dobj->obj.import_attach = attach;
  472. get_dma_buf(buf);
  473. /*
  474. * Don't call dma_buf_map_attachment() here - it maps the
  475. * scatterlist immediately for DMA, and this is not always
  476. * an appropriate thing to do.
  477. */
  478. return &dobj->obj;
  479. }
  480. int armada_gem_map_import(struct armada_gem_object *dobj)
  481. {
  482. int ret;
  483. dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
  484. DMA_TO_DEVICE);
  485. if (IS_ERR(dobj->sgt)) {
  486. ret = PTR_ERR(dobj->sgt);
  487. dobj->sgt = NULL;
  488. DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
  489. return ret;
  490. }
  491. if (dobj->sgt->nents > 1) {
  492. DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
  493. return -EINVAL;
  494. }
  495. if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
  496. DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
  497. return -EINVAL;
  498. }
  499. dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
  500. dobj->mapped = true;
  501. return 0;
  502. }