mtk_drm_gem.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <drm/drmP.h>
  14. #include <drm/drm_gem.h>
  15. #include <linux/dma-buf.h>
  16. #include "mtk_drm_drv.h"
  17. #include "mtk_drm_gem.h"
  18. static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
  19. unsigned long size)
  20. {
  21. struct mtk_drm_gem_obj *mtk_gem_obj;
  22. int ret;
  23. size = round_up(size, PAGE_SIZE);
  24. mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
  25. if (!mtk_gem_obj)
  26. return ERR_PTR(-ENOMEM);
  27. ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
  28. if (ret < 0) {
  29. DRM_ERROR("failed to initialize gem object\n");
  30. kfree(mtk_gem_obj);
  31. return ERR_PTR(ret);
  32. }
  33. return mtk_gem_obj;
  34. }
  35. struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
  36. size_t size, bool alloc_kmap)
  37. {
  38. struct mtk_drm_private *priv = dev->dev_private;
  39. struct mtk_drm_gem_obj *mtk_gem;
  40. struct drm_gem_object *obj;
  41. int ret;
  42. mtk_gem = mtk_drm_gem_init(dev, size);
  43. if (IS_ERR(mtk_gem))
  44. return ERR_CAST(mtk_gem);
  45. obj = &mtk_gem->base;
  46. mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
  47. if (!alloc_kmap)
  48. mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  49. mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
  50. &mtk_gem->dma_addr, GFP_KERNEL,
  51. mtk_gem->dma_attrs);
  52. if (!mtk_gem->cookie) {
  53. DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
  54. ret = -ENOMEM;
  55. goto err_gem_free;
  56. }
  57. if (alloc_kmap)
  58. mtk_gem->kvaddr = mtk_gem->cookie;
  59. DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
  60. mtk_gem->cookie, &mtk_gem->dma_addr,
  61. size);
  62. return mtk_gem;
  63. err_gem_free:
  64. drm_gem_object_release(obj);
  65. kfree(mtk_gem);
  66. return ERR_PTR(ret);
  67. }
  68. void mtk_drm_gem_free_object(struct drm_gem_object *obj)
  69. {
  70. struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  71. struct mtk_drm_private *priv = obj->dev->dev_private;
  72. if (mtk_gem->sg)
  73. drm_prime_gem_destroy(obj, mtk_gem->sg);
  74. else
  75. dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
  76. mtk_gem->dma_addr, mtk_gem->dma_attrs);
  77. /* release file pointer to gem object. */
  78. drm_gem_object_release(obj);
  79. kfree(mtk_gem);
  80. }
  81. int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
  82. struct drm_mode_create_dumb *args)
  83. {
  84. struct mtk_drm_gem_obj *mtk_gem;
  85. int ret;
  86. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  87. args->size = args->pitch * args->height;
  88. mtk_gem = mtk_drm_gem_create(dev, args->size, false);
  89. if (IS_ERR(mtk_gem))
  90. return PTR_ERR(mtk_gem);
  91. /*
  92. * allocate a id of idr table where the obj is registered
  93. * and handle has the id what user can see.
  94. */
  95. ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
  96. if (ret)
  97. goto err_handle_create;
  98. /* drop reference from allocate - handle holds it now. */
  99. drm_gem_object_unreference_unlocked(&mtk_gem->base);
  100. return 0;
  101. err_handle_create:
  102. mtk_drm_gem_free_object(&mtk_gem->base);
  103. return ret;
  104. }
  105. int mtk_drm_gem_dumb_map_offset(struct drm_file *file_priv,
  106. struct drm_device *dev, uint32_t handle,
  107. uint64_t *offset)
  108. {
  109. struct drm_gem_object *obj;
  110. int ret;
  111. obj = drm_gem_object_lookup(file_priv, handle);
  112. if (!obj) {
  113. DRM_ERROR("failed to lookup gem object.\n");
  114. return -EINVAL;
  115. }
  116. ret = drm_gem_create_mmap_offset(obj);
  117. if (ret)
  118. goto out;
  119. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  120. DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
  121. out:
  122. drm_gem_object_unreference_unlocked(obj);
  123. return ret;
  124. }
  125. static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
  126. struct vm_area_struct *vma)
  127. {
  128. int ret;
  129. struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  130. struct mtk_drm_private *priv = obj->dev->dev_private;
  131. /*
  132. * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
  133. * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
  134. */
  135. vma->vm_flags &= ~VM_PFNMAP;
  136. vma->vm_pgoff = 0;
  137. ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
  138. mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
  139. if (ret)
  140. drm_gem_vm_close(vma);
  141. return ret;
  142. }
  143. int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma)
  144. {
  145. int ret;
  146. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  147. if (ret)
  148. return ret;
  149. return mtk_drm_gem_object_mmap(obj, vma);
  150. }
  151. int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  152. {
  153. struct drm_gem_object *obj;
  154. int ret;
  155. ret = drm_gem_mmap(filp, vma);
  156. if (ret)
  157. return ret;
  158. obj = vma->vm_private_data;
  159. return mtk_drm_gem_object_mmap(obj, vma);
  160. }
  161. /*
  162. * Allocate a sg_table for this GEM object.
  163. * Note: Both the table's contents, and the sg_table itself must be freed by
  164. * the caller.
  165. * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
  166. */
  167. struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
  168. {
  169. struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  170. struct mtk_drm_private *priv = obj->dev->dev_private;
  171. struct sg_table *sgt;
  172. int ret;
  173. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  174. if (!sgt)
  175. return ERR_PTR(-ENOMEM);
  176. ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
  177. mtk_gem->dma_addr, obj->size,
  178. mtk_gem->dma_attrs);
  179. if (ret) {
  180. DRM_ERROR("failed to allocate sgt, %d\n", ret);
  181. kfree(sgt);
  182. return ERR_PTR(ret);
  183. }
  184. return sgt;
  185. }
  186. struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
  187. struct dma_buf_attachment *attach, struct sg_table *sg)
  188. {
  189. struct mtk_drm_gem_obj *mtk_gem;
  190. int ret;
  191. struct scatterlist *s;
  192. unsigned int i;
  193. dma_addr_t expected;
  194. mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
  195. if (IS_ERR(mtk_gem))
  196. return ERR_PTR(PTR_ERR(mtk_gem));
  197. expected = sg_dma_address(sg->sgl);
  198. for_each_sg(sg->sgl, s, sg->nents, i) {
  199. if (sg_dma_address(s) != expected) {
  200. DRM_ERROR("sg_table is not contiguous");
  201. ret = -EINVAL;
  202. goto err_gem_free;
  203. }
  204. expected = sg_dma_address(s) + sg_dma_len(s);
  205. }
  206. mtk_gem->dma_addr = sg_dma_address(sg->sgl);
  207. mtk_gem->sg = sg;
  208. return &mtk_gem->base;
  209. err_gem_free:
  210. kfree(mtk_gem);
  211. return ERR_PTR(ret);
  212. }