drm_gem_cma_helper.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. /*
  2. * drm gem CMA (contiguous memory allocator) helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. *
  6. * Based on Samsung Exynos code
  7. *
  8. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/mm.h>
  20. #include <linux/slab.h>
  21. #include <linux/mutex.h>
  22. #include <linux/export.h>
  23. #include <linux/dma-buf.h>
  24. #include <linux/dma-mapping.h>
  25. #include <drm/drmP.h>
  26. #include <drm/drm.h>
  27. #include <drm/drm_gem_cma_helper.h>
  28. #include <drm/drm_vma_manager.h>
  29. /**
  30. * DOC: cma helpers
  31. *
  32. * The Contiguous Memory Allocator reserves a pool of memory at early boot
  33. * that is used to service requests for large blocks of contiguous memory.
  34. *
  35. * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
  36. * objects that are physically contiguous in memory. This is useful for
  37. * display drivers that are unable to map scattered buffers via an IOMMU.
  38. */
  39. /**
  40. * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
  41. * @drm: DRM device
  42. * @size: size of the object to allocate
  43. *
  44. * This function creates and initializes a GEM CMA object of the given size,
  45. * but doesn't allocate any memory to back the object.
  46. *
  47. * Returns:
  48. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  49. * error code on failure.
  50. */
  51. static struct drm_gem_cma_object *
  52. __drm_gem_cma_create(struct drm_device *drm, size_t size)
  53. {
  54. struct drm_gem_cma_object *cma_obj;
  55. struct drm_gem_object *gem_obj;
  56. int ret;
  57. if (drm->driver->gem_create_object)
  58. gem_obj = drm->driver->gem_create_object(drm, size);
  59. else
  60. gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
  61. if (!gem_obj)
  62. return ERR_PTR(-ENOMEM);
  63. cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
  64. ret = drm_gem_object_init(drm, gem_obj, size);
  65. if (ret)
  66. goto error;
  67. ret = drm_gem_create_mmap_offset(gem_obj);
  68. if (ret) {
  69. drm_gem_object_release(gem_obj);
  70. goto error;
  71. }
  72. return cma_obj;
  73. error:
  74. kfree(cma_obj);
  75. return ERR_PTR(ret);
  76. }
  77. /**
  78. * drm_gem_cma_create - allocate an object with the given size
  79. * @drm: DRM device
  80. * @size: size of the object to allocate
  81. *
  82. * This function creates a CMA GEM object and allocates a contiguous chunk of
  83. * memory as backing store. The backing memory has the writecombine attribute
  84. * set.
  85. *
  86. * Returns:
  87. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  88. * error code on failure.
  89. */
  90. struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
  91. size_t size)
  92. {
  93. struct drm_gem_cma_object *cma_obj;
  94. int ret;
  95. size = round_up(size, PAGE_SIZE);
  96. cma_obj = __drm_gem_cma_create(drm, size);
  97. if (IS_ERR(cma_obj))
  98. return cma_obj;
  99. cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
  100. GFP_KERNEL | __GFP_NOWARN);
  101. if (!cma_obj->vaddr) {
  102. dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
  103. size);
  104. ret = -ENOMEM;
  105. goto error;
  106. }
  107. return cma_obj;
  108. error:
  109. drm_gem_object_unreference_unlocked(&cma_obj->base);
  110. return ERR_PTR(ret);
  111. }
  112. EXPORT_SYMBOL_GPL(drm_gem_cma_create);
  113. /**
  114. * drm_gem_cma_create_with_handle - allocate an object with the given size and
  115. * return a GEM handle to it
  116. * @file_priv: DRM file-private structure to register the handle for
  117. * @drm: DRM device
  118. * @size: size of the object to allocate
  119. * @handle: return location for the GEM handle
  120. *
  121. * This function creates a CMA GEM object, allocating a physically contiguous
  122. * chunk of memory as backing store. The GEM object is then added to the list
  123. * of object associated with the given file and a handle to it is returned.
  124. *
  125. * Returns:
  126. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  127. * error code on failure.
  128. */
  129. static struct drm_gem_cma_object *
  130. drm_gem_cma_create_with_handle(struct drm_file *file_priv,
  131. struct drm_device *drm, size_t size,
  132. uint32_t *handle)
  133. {
  134. struct drm_gem_cma_object *cma_obj;
  135. struct drm_gem_object *gem_obj;
  136. int ret;
  137. cma_obj = drm_gem_cma_create(drm, size);
  138. if (IS_ERR(cma_obj))
  139. return cma_obj;
  140. gem_obj = &cma_obj->base;
  141. /*
  142. * allocate a id of idr table where the obj is registered
  143. * and handle has the id what user can see.
  144. */
  145. ret = drm_gem_handle_create(file_priv, gem_obj, handle);
  146. /* drop reference from allocate - handle holds it now. */
  147. drm_gem_object_unreference_unlocked(gem_obj);
  148. if (ret)
  149. return ERR_PTR(ret);
  150. return cma_obj;
  151. }
  152. /**
  153. * drm_gem_cma_free_object - free resources associated with a CMA GEM object
  154. * @gem_obj: GEM object to free
  155. *
  156. * This function frees the backing memory of the CMA GEM object, cleans up the
  157. * GEM object state and frees the memory used to store the object itself.
  158. * Drivers using the CMA helpers should set this as their DRM driver's
  159. * ->gem_free_object() callback.
  160. */
  161. void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
  162. {
  163. struct drm_gem_cma_object *cma_obj;
  164. cma_obj = to_drm_gem_cma_obj(gem_obj);
  165. if (cma_obj->vaddr) {
  166. dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
  167. cma_obj->vaddr, cma_obj->paddr);
  168. } else if (gem_obj->import_attach) {
  169. drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
  170. }
  171. drm_gem_object_release(gem_obj);
  172. kfree(cma_obj);
  173. }
  174. EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
  175. /**
  176. * drm_gem_cma_dumb_create_internal - create a dumb buffer object
  177. * @file_priv: DRM file-private structure to create the dumb buffer for
  178. * @drm: DRM device
  179. * @args: IOCTL data
  180. *
  181. * This aligns the pitch and size arguments to the minimum required. This is
  182. * an internal helper that can be wrapped by a driver to account for hardware
  183. * with more specific alignment requirements. It should not be used directly
  184. * as the ->dumb_create() callback in a DRM driver.
  185. *
  186. * Returns:
  187. * 0 on success or a negative error code on failure.
  188. */
  189. int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
  190. struct drm_device *drm,
  191. struct drm_mode_create_dumb *args)
  192. {
  193. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  194. struct drm_gem_cma_object *cma_obj;
  195. if (args->pitch < min_pitch)
  196. args->pitch = min_pitch;
  197. if (args->size < args->pitch * args->height)
  198. args->size = args->pitch * args->height;
  199. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  200. &args->handle);
  201. return PTR_ERR_OR_ZERO(cma_obj);
  202. }
  203. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
  204. /**
  205. * drm_gem_cma_dumb_create - create a dumb buffer object
  206. * @file_priv: DRM file-private structure to create the dumb buffer for
  207. * @drm: DRM device
  208. * @args: IOCTL data
  209. *
  210. * This function computes the pitch of the dumb buffer and rounds it up to an
  211. * integer number of bytes per pixel. Drivers for hardware that doesn't have
  212. * any additional restrictions on the pitch can directly use this function as
  213. * their ->dumb_create() callback.
  214. *
  215. * For hardware with additional restrictions, drivers can adjust the fields
  216. * set up by userspace and pass the IOCTL data along to the
  217. * drm_gem_cma_dumb_create_internal() function.
  218. *
  219. * Returns:
  220. * 0 on success or a negative error code on failure.
  221. */
  222. int drm_gem_cma_dumb_create(struct drm_file *file_priv,
  223. struct drm_device *drm,
  224. struct drm_mode_create_dumb *args)
  225. {
  226. struct drm_gem_cma_object *cma_obj;
  227. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  228. args->size = args->pitch * args->height;
  229. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  230. &args->handle);
  231. return PTR_ERR_OR_ZERO(cma_obj);
  232. }
  233. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
  234. /**
  235. * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
  236. * object
  237. * @file_priv: DRM file-private structure containing the GEM object
  238. * @drm: DRM device
  239. * @handle: GEM object handle
  240. * @offset: return location for the fake mmap offset
  241. *
  242. * This function look up an object by its handle and returns the fake mmap
  243. * offset associated with it. Drivers using the CMA helpers should set this
  244. * as their DRM driver's ->dumb_map_offset() callback.
  245. *
  246. * Returns:
  247. * 0 on success or a negative error code on failure.
  248. */
  249. int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
  250. struct drm_device *drm, u32 handle,
  251. u64 *offset)
  252. {
  253. struct drm_gem_object *gem_obj;
  254. gem_obj = drm_gem_object_lookup(file_priv, handle);
  255. if (!gem_obj) {
  256. dev_err(drm->dev, "failed to lookup GEM object\n");
  257. return -EINVAL;
  258. }
  259. *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
  260. drm_gem_object_unreference_unlocked(gem_obj);
  261. return 0;
  262. }
  263. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
  264. const struct vm_operations_struct drm_gem_cma_vm_ops = {
  265. .open = drm_gem_vm_open,
  266. .close = drm_gem_vm_close,
  267. };
  268. EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
  269. static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
  270. struct vm_area_struct *vma)
  271. {
  272. int ret;
  273. /*
  274. * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
  275. * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
  276. * the whole buffer.
  277. */
  278. vma->vm_flags &= ~VM_PFNMAP;
  279. vma->vm_pgoff = 0;
  280. ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
  281. cma_obj->paddr, vma->vm_end - vma->vm_start);
  282. if (ret)
  283. drm_gem_vm_close(vma);
  284. return ret;
  285. }
  286. /**
  287. * drm_gem_cma_mmap - memory-map a CMA GEM object
  288. * @filp: file object
  289. * @vma: VMA for the area to be mapped
  290. *
  291. * This function implements an augmented version of the GEM DRM file mmap
  292. * operation for CMA objects: In addition to the usual GEM VMA setup it
  293. * immediately faults in the entire object instead of using on-demaind
  294. * faulting. Drivers which employ the CMA helpers should use this function
  295. * as their ->mmap() handler in the DRM device file's file_operations
  296. * structure.
  297. *
  298. * Returns:
  299. * 0 on success or a negative error code on failure.
  300. */
  301. int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
  302. {
  303. struct drm_gem_cma_object *cma_obj;
  304. struct drm_gem_object *gem_obj;
  305. int ret;
  306. ret = drm_gem_mmap(filp, vma);
  307. if (ret)
  308. return ret;
  309. gem_obj = vma->vm_private_data;
  310. cma_obj = to_drm_gem_cma_obj(gem_obj);
  311. return drm_gem_cma_mmap_obj(cma_obj, vma);
  312. }
  313. EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
  314. #ifdef CONFIG_DEBUG_FS
  315. /**
  316. * drm_gem_cma_describe - describe a CMA GEM object for debugfs
  317. * @cma_obj: CMA GEM object
  318. * @m: debugfs file handle
  319. *
  320. * This function can be used to dump a human-readable representation of the
  321. * CMA GEM object into a synthetic file.
  322. */
  323. void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
  324. struct seq_file *m)
  325. {
  326. struct drm_gem_object *obj = &cma_obj->base;
  327. uint64_t off;
  328. off = drm_vma_node_start(&obj->vma_node);
  329. seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
  330. obj->name, obj->refcount.refcount.counter,
  331. off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
  332. seq_printf(m, "\n");
  333. }
  334. EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
  335. #endif
  336. /**
  337. * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
  338. * pages for a CMA GEM object
  339. * @obj: GEM object
  340. *
  341. * This function exports a scatter/gather table suitable for PRIME usage by
  342. * calling the standard DMA mapping API. Drivers using the CMA helpers should
  343. * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
  344. *
  345. * Returns:
  346. * A pointer to the scatter/gather table of pinned pages or NULL on failure.
  347. */
  348. struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
  349. {
  350. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  351. struct sg_table *sgt;
  352. int ret;
  353. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  354. if (!sgt)
  355. return NULL;
  356. ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
  357. cma_obj->paddr, obj->size);
  358. if (ret < 0)
  359. goto out;
  360. return sgt;
  361. out:
  362. kfree(sgt);
  363. return NULL;
  364. }
  365. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
  366. /**
  367. * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
  368. * driver's scatter/gather table of pinned pages
  369. * @dev: device to import into
  370. * @attach: DMA-BUF attachment
  371. * @sgt: scatter/gather table of pinned pages
  372. *
  373. * This function imports a scatter/gather table exported via DMA-BUF by
  374. * another driver. Imported buffers must be physically contiguous in memory
  375. * (i.e. the scatter/gather table must contain a single entry). Drivers that
  376. * use the CMA helpers should set this as their DRM driver's
  377. * ->gem_prime_import_sg_table() callback.
  378. *
  379. * Returns:
  380. * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
  381. * error code on failure.
  382. */
  383. struct drm_gem_object *
  384. drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
  385. struct dma_buf_attachment *attach,
  386. struct sg_table *sgt)
  387. {
  388. struct drm_gem_cma_object *cma_obj;
  389. if (sgt->nents != 1)
  390. return ERR_PTR(-EINVAL);
  391. /* Create a CMA GEM buffer. */
  392. cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
  393. if (IS_ERR(cma_obj))
  394. return ERR_CAST(cma_obj);
  395. cma_obj->paddr = sg_dma_address(sgt->sgl);
  396. cma_obj->sgt = sgt;
  397. DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
  398. return &cma_obj->base;
  399. }
  400. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
  401. /**
  402. * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
  403. * @obj: GEM object
  404. * @vma: VMA for the area to be mapped
  405. *
  406. * This function maps a buffer imported via DRM PRIME into a userspace
  407. * process's address space. Drivers that use the CMA helpers should set this
  408. * as their DRM driver's ->gem_prime_mmap() callback.
  409. *
  410. * Returns:
  411. * 0 on success or a negative error code on failure.
  412. */
  413. int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
  414. struct vm_area_struct *vma)
  415. {
  416. struct drm_gem_cma_object *cma_obj;
  417. int ret;
  418. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  419. if (ret < 0)
  420. return ret;
  421. cma_obj = to_drm_gem_cma_obj(obj);
  422. return drm_gem_cma_mmap_obj(cma_obj, vma);
  423. }
  424. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
  425. /**
  426. * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
  427. * address space
  428. * @obj: GEM object
  429. *
  430. * This function maps a buffer exported via DRM PRIME into the kernel's
  431. * virtual address space. Since the CMA buffers are already mapped into the
  432. * kernel virtual address space this simply returns the cached virtual
  433. * address. Drivers using the CMA helpers should set this as their DRM
  434. * driver's ->gem_prime_vmap() callback.
  435. *
  436. * Returns:
  437. * The kernel virtual address of the CMA GEM object's backing store.
  438. */
  439. void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
  440. {
  441. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  442. return cma_obj->vaddr;
  443. }
  444. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
  445. /**
  446. * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
  447. * address space
  448. * @obj: GEM object
  449. * @vaddr: kernel virtual address where the CMA GEM object was mapped
  450. *
  451. * This function removes a buffer exported via DRM PRIME from the kernel's
  452. * virtual address space. This is a no-op because CMA buffers cannot be
  453. * unmapped from kernel space. Drivers using the CMA helpers should set this
  454. * as their DRM driver's ->gem_prime_vunmap() callback.
  455. */
  456. void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  457. {
  458. /* Nothing to do */
  459. }
  460. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);