etnaviv_gem.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. /*
  2. * Copyright (C) 2015 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/spinlock.h>
  17. #include <linux/shmem_fs.h>
  18. #include "etnaviv_drv.h"
  19. #include "etnaviv_gem.h"
  20. #include "etnaviv_gpu.h"
  21. #include "etnaviv_mmu.h"
  22. static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  23. {
  24. struct drm_device *dev = etnaviv_obj->base.dev;
  25. struct sg_table *sgt = etnaviv_obj->sgt;
  26. /*
  27. * For non-cached buffers, ensure the new pages are clean
  28. * because display controller, GPU, etc. are not coherent.
  29. */
  30. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  31. dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  32. }
  33. static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  34. {
  35. struct drm_device *dev = etnaviv_obj->base.dev;
  36. struct sg_table *sgt = etnaviv_obj->sgt;
  37. /*
  38. * For non-cached buffers, ensure the new pages are clean
  39. * because display controller, GPU, etc. are not coherent:
  40. *
  41. * WARNING: The DMA API does not support concurrent CPU
  42. * and device access to the memory area. With BIDIRECTIONAL,
  43. * we will clean the cache lines which overlap the region,
  44. * and invalidate all cache lines (partially) contained in
  45. * the region.
  46. *
  47. * If you have dirty data in the overlapping cache lines,
  48. * that will corrupt the GPU-written data. If you have
  49. * written into the remainder of the region, this can
  50. * discard those writes.
  51. */
  52. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  53. dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  54. }
  55. /* called with etnaviv_obj->lock held */
  56. static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  57. {
  58. struct drm_device *dev = etnaviv_obj->base.dev;
  59. struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  60. if (IS_ERR(p)) {
  61. dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  62. return PTR_ERR(p);
  63. }
  64. etnaviv_obj->pages = p;
  65. return 0;
  66. }
  67. static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  68. {
  69. if (etnaviv_obj->sgt) {
  70. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  71. sg_free_table(etnaviv_obj->sgt);
  72. kfree(etnaviv_obj->sgt);
  73. etnaviv_obj->sgt = NULL;
  74. }
  75. if (etnaviv_obj->pages) {
  76. drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  77. true, false);
  78. etnaviv_obj->pages = NULL;
  79. }
  80. }
  81. struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  82. {
  83. int ret;
  84. lockdep_assert_held(&etnaviv_obj->lock);
  85. if (!etnaviv_obj->pages) {
  86. ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  87. if (ret < 0)
  88. return ERR_PTR(ret);
  89. }
  90. if (!etnaviv_obj->sgt) {
  91. struct drm_device *dev = etnaviv_obj->base.dev;
  92. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  93. struct sg_table *sgt;
  94. sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
  95. if (IS_ERR(sgt)) {
  96. dev_err(dev->dev, "failed to allocate sgt: %ld\n",
  97. PTR_ERR(sgt));
  98. return ERR_CAST(sgt);
  99. }
  100. etnaviv_obj->sgt = sgt;
  101. etnaviv_gem_scatter_map(etnaviv_obj);
  102. }
  103. return etnaviv_obj->pages;
  104. }
  105. void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
  106. {
  107. lockdep_assert_held(&etnaviv_obj->lock);
  108. /* when we start tracking the pin count, then do something here */
  109. }
  110. static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  111. struct vm_area_struct *vma)
  112. {
  113. pgprot_t vm_page_prot;
  114. vma->vm_flags &= ~VM_PFNMAP;
  115. vma->vm_flags |= VM_MIXEDMAP;
  116. vm_page_prot = vm_get_page_prot(vma->vm_flags);
  117. if (etnaviv_obj->flags & ETNA_BO_WC) {
  118. vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
  119. } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
  120. vma->vm_page_prot = pgprot_noncached(vm_page_prot);
  121. } else {
  122. /*
  123. * Shunt off cached objs to shmem file so they have their own
  124. * address_space (so unmap_mapping_range does what we want,
  125. * in particular in the case of mmap'd dmabufs)
  126. */
  127. fput(vma->vm_file);
  128. get_file(etnaviv_obj->base.filp);
  129. vma->vm_pgoff = 0;
  130. vma->vm_file = etnaviv_obj->base.filp;
  131. vma->vm_page_prot = vm_page_prot;
  132. }
  133. return 0;
  134. }
  135. int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  136. {
  137. struct etnaviv_gem_object *obj;
  138. int ret;
  139. ret = drm_gem_mmap(filp, vma);
  140. if (ret) {
  141. DBG("mmap failed: %d", ret);
  142. return ret;
  143. }
  144. obj = to_etnaviv_bo(vma->vm_private_data);
  145. return obj->ops->mmap(obj, vma);
  146. }
  147. int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  148. {
  149. struct drm_gem_object *obj = vma->vm_private_data;
  150. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  151. struct page **pages, *page;
  152. pgoff_t pgoff;
  153. int ret;
  154. /*
  155. * Make sure we don't parallel update on a fault, nor move or remove
  156. * something from beneath our feet. Note that vm_insert_page() is
  157. * specifically coded to take care of this, so we don't have to.
  158. */
  159. ret = mutex_lock_interruptible(&etnaviv_obj->lock);
  160. if (ret)
  161. goto out;
  162. /* make sure we have pages attached now */
  163. pages = etnaviv_gem_get_pages(etnaviv_obj);
  164. mutex_unlock(&etnaviv_obj->lock);
  165. if (IS_ERR(pages)) {
  166. ret = PTR_ERR(pages);
  167. goto out;
  168. }
  169. /* We don't use vmf->pgoff since that has the fake offset: */
  170. pgoff = ((unsigned long)vmf->virtual_address -
  171. vma->vm_start) >> PAGE_SHIFT;
  172. page = pages[pgoff];
  173. VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  174. page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
  175. ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
  176. out:
  177. switch (ret) {
  178. case -EAGAIN:
  179. case 0:
  180. case -ERESTARTSYS:
  181. case -EINTR:
  182. case -EBUSY:
  183. /*
  184. * EBUSY is ok: this just means that another thread
  185. * already did the job.
  186. */
  187. return VM_FAULT_NOPAGE;
  188. case -ENOMEM:
  189. return VM_FAULT_OOM;
  190. default:
  191. return VM_FAULT_SIGBUS;
  192. }
  193. }
  194. int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
  195. {
  196. int ret;
  197. /* Make it mmapable */
  198. ret = drm_gem_create_mmap_offset(obj);
  199. if (ret)
  200. dev_err(obj->dev->dev, "could not allocate mmap offset\n");
  201. else
  202. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  203. return ret;
  204. }
  205. static struct etnaviv_vram_mapping *
  206. etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
  207. struct etnaviv_iommu *mmu)
  208. {
  209. struct etnaviv_vram_mapping *mapping;
  210. list_for_each_entry(mapping, &obj->vram_list, obj_node) {
  211. if (mapping->mmu == mmu)
  212. return mapping;
  213. }
  214. return NULL;
  215. }
  216. void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
  217. {
  218. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  219. drm_gem_object_reference(&etnaviv_obj->base);
  220. mutex_lock(&etnaviv_obj->lock);
  221. WARN_ON(mapping->use == 0);
  222. mapping->use += 1;
  223. mutex_unlock(&etnaviv_obj->lock);
  224. }
  225. void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
  226. {
  227. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  228. mutex_lock(&etnaviv_obj->lock);
  229. WARN_ON(mapping->use == 0);
  230. mapping->use -= 1;
  231. mutex_unlock(&etnaviv_obj->lock);
  232. drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
  233. }
  234. struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
  235. struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
  236. {
  237. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  238. struct etnaviv_vram_mapping *mapping;
  239. struct page **pages;
  240. int ret = 0;
  241. mutex_lock(&etnaviv_obj->lock);
  242. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
  243. if (mapping) {
  244. /*
  245. * Holding the object lock prevents the use count changing
  246. * beneath us. If the use count is zero, the MMU might be
  247. * reaping this object, so take the lock and re-check that
  248. * the MMU owns this mapping to close this race.
  249. */
  250. if (mapping->use == 0) {
  251. mutex_lock(&gpu->mmu->lock);
  252. if (mapping->mmu == gpu->mmu)
  253. mapping->use += 1;
  254. else
  255. mapping = NULL;
  256. mutex_unlock(&gpu->mmu->lock);
  257. if (mapping)
  258. goto out;
  259. } else {
  260. mapping->use += 1;
  261. goto out;
  262. }
  263. }
  264. pages = etnaviv_gem_get_pages(etnaviv_obj);
  265. if (IS_ERR(pages)) {
  266. ret = PTR_ERR(pages);
  267. goto out;
  268. }
  269. /*
  270. * See if we have a reaped vram mapping we can re-use before
  271. * allocating a fresh mapping.
  272. */
  273. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
  274. if (!mapping) {
  275. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
  276. if (!mapping) {
  277. ret = -ENOMEM;
  278. goto out;
  279. }
  280. INIT_LIST_HEAD(&mapping->scan_node);
  281. mapping->object = etnaviv_obj;
  282. } else {
  283. list_del(&mapping->obj_node);
  284. }
  285. mapping->mmu = gpu->mmu;
  286. mapping->use = 1;
  287. ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
  288. mapping);
  289. if (ret < 0)
  290. kfree(mapping);
  291. else
  292. list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
  293. out:
  294. mutex_unlock(&etnaviv_obj->lock);
  295. if (ret)
  296. return ERR_PTR(ret);
  297. /* Take a reference on the object */
  298. drm_gem_object_reference(obj);
  299. return mapping;
  300. }
  301. void *etnaviv_gem_vmap(struct drm_gem_object *obj)
  302. {
  303. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  304. if (etnaviv_obj->vaddr)
  305. return etnaviv_obj->vaddr;
  306. mutex_lock(&etnaviv_obj->lock);
  307. /*
  308. * Need to check again, as we might have raced with another thread
  309. * while waiting for the mutex.
  310. */
  311. if (!etnaviv_obj->vaddr)
  312. etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
  313. mutex_unlock(&etnaviv_obj->lock);
  314. return etnaviv_obj->vaddr;
  315. }
  316. static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
  317. {
  318. struct page **pages;
  319. lockdep_assert_held(&obj->lock);
  320. pages = etnaviv_gem_get_pages(obj);
  321. if (IS_ERR(pages))
  322. return NULL;
  323. return vmap(pages, obj->base.size >> PAGE_SHIFT,
  324. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  325. }
  326. static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
  327. {
  328. if (op & ETNA_PREP_READ)
  329. return DMA_FROM_DEVICE;
  330. else if (op & ETNA_PREP_WRITE)
  331. return DMA_TO_DEVICE;
  332. else
  333. return DMA_BIDIRECTIONAL;
  334. }
  335. int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
  336. struct timespec *timeout)
  337. {
  338. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  339. struct drm_device *dev = obj->dev;
  340. bool write = !!(op & ETNA_PREP_WRITE);
  341. int ret;
  342. if (op & ETNA_PREP_NOSYNC) {
  343. if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
  344. write))
  345. return -EBUSY;
  346. } else {
  347. unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
  348. ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
  349. write, true, remain);
  350. if (ret <= 0)
  351. return ret == 0 ? -ETIMEDOUT : ret;
  352. }
  353. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  354. if (!etnaviv_obj->sgt) {
  355. void *ret;
  356. mutex_lock(&etnaviv_obj->lock);
  357. ret = etnaviv_gem_get_pages(etnaviv_obj);
  358. mutex_unlock(&etnaviv_obj->lock);
  359. if (IS_ERR(ret))
  360. return PTR_ERR(ret);
  361. }
  362. dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
  363. etnaviv_obj->sgt->nents,
  364. etnaviv_op_to_dma_dir(op));
  365. etnaviv_obj->last_cpu_prep_op = op;
  366. }
  367. return 0;
  368. }
  369. int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
  370. {
  371. struct drm_device *dev = obj->dev;
  372. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  373. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  374. /* fini without a prep is almost certainly a userspace error */
  375. WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
  376. dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
  377. etnaviv_obj->sgt->nents,
  378. etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
  379. etnaviv_obj->last_cpu_prep_op = 0;
  380. }
  381. return 0;
  382. }
  383. int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
  384. struct timespec *timeout)
  385. {
  386. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  387. return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
  388. }
  389. #ifdef CONFIG_DEBUG_FS
  390. static void etnaviv_gem_describe_fence(struct fence *fence,
  391. const char *type, struct seq_file *m)
  392. {
  393. if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  394. seq_printf(m, "\t%9s: %s %s seq %u\n",
  395. type,
  396. fence->ops->get_driver_name(fence),
  397. fence->ops->get_timeline_name(fence),
  398. fence->seqno);
  399. }
  400. static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  401. {
  402. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  403. struct reservation_object *robj = etnaviv_obj->resv;
  404. struct reservation_object_list *fobj;
  405. struct fence *fence;
  406. unsigned long off = drm_vma_node_start(&obj->vma_node);
  407. seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
  408. etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
  409. obj->name, obj->refcount.refcount.counter,
  410. off, etnaviv_obj->vaddr, obj->size);
  411. rcu_read_lock();
  412. fobj = rcu_dereference(robj->fence);
  413. if (fobj) {
  414. unsigned int i, shared_count = fobj->shared_count;
  415. for (i = 0; i < shared_count; i++) {
  416. fence = rcu_dereference(fobj->shared[i]);
  417. etnaviv_gem_describe_fence(fence, "Shared", m);
  418. }
  419. }
  420. fence = rcu_dereference(robj->fence_excl);
  421. if (fence)
  422. etnaviv_gem_describe_fence(fence, "Exclusive", m);
  423. rcu_read_unlock();
  424. }
  425. void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
  426. struct seq_file *m)
  427. {
  428. struct etnaviv_gem_object *etnaviv_obj;
  429. int count = 0;
  430. size_t size = 0;
  431. mutex_lock(&priv->gem_lock);
  432. list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
  433. struct drm_gem_object *obj = &etnaviv_obj->base;
  434. seq_puts(m, " ");
  435. etnaviv_gem_describe(obj, m);
  436. count++;
  437. size += obj->size;
  438. }
  439. mutex_unlock(&priv->gem_lock);
  440. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  441. }
  442. #endif
  443. static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
  444. {
  445. vunmap(etnaviv_obj->vaddr);
  446. put_pages(etnaviv_obj);
  447. }
  448. static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
  449. .get_pages = etnaviv_gem_shmem_get_pages,
  450. .release = etnaviv_gem_shmem_release,
  451. .vmap = etnaviv_gem_vmap_impl,
  452. .mmap = etnaviv_gem_mmap_obj,
  453. };
  454. void etnaviv_gem_free_object(struct drm_gem_object *obj)
  455. {
  456. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  457. struct etnaviv_drm_private *priv = obj->dev->dev_private;
  458. struct etnaviv_vram_mapping *mapping, *tmp;
  459. /* object should not be active */
  460. WARN_ON(is_active(etnaviv_obj));
  461. mutex_lock(&priv->gem_lock);
  462. list_del(&etnaviv_obj->gem_node);
  463. mutex_unlock(&priv->gem_lock);
  464. list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
  465. obj_node) {
  466. struct etnaviv_iommu *mmu = mapping->mmu;
  467. WARN_ON(mapping->use);
  468. if (mmu)
  469. etnaviv_iommu_unmap_gem(mmu, mapping);
  470. list_del(&mapping->obj_node);
  471. kfree(mapping);
  472. }
  473. drm_gem_free_mmap_offset(obj);
  474. etnaviv_obj->ops->release(etnaviv_obj);
  475. if (etnaviv_obj->resv == &etnaviv_obj->_resv)
  476. reservation_object_fini(&etnaviv_obj->_resv);
  477. drm_gem_object_release(obj);
  478. kfree(etnaviv_obj);
  479. }
  480. int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
  481. {
  482. struct etnaviv_drm_private *priv = dev->dev_private;
  483. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  484. mutex_lock(&priv->gem_lock);
  485. list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
  486. mutex_unlock(&priv->gem_lock);
  487. return 0;
  488. }
  489. static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
  490. struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
  491. struct drm_gem_object **obj)
  492. {
  493. struct etnaviv_gem_object *etnaviv_obj;
  494. unsigned sz = sizeof(*etnaviv_obj);
  495. bool valid = true;
  496. /* validate flags */
  497. switch (flags & ETNA_BO_CACHE_MASK) {
  498. case ETNA_BO_UNCACHED:
  499. case ETNA_BO_CACHED:
  500. case ETNA_BO_WC:
  501. break;
  502. default:
  503. valid = false;
  504. }
  505. if (!valid) {
  506. dev_err(dev->dev, "invalid cache flag: %x\n",
  507. (flags & ETNA_BO_CACHE_MASK));
  508. return -EINVAL;
  509. }
  510. etnaviv_obj = kzalloc(sz, GFP_KERNEL);
  511. if (!etnaviv_obj)
  512. return -ENOMEM;
  513. etnaviv_obj->flags = flags;
  514. etnaviv_obj->ops = ops;
  515. if (robj) {
  516. etnaviv_obj->resv = robj;
  517. } else {
  518. etnaviv_obj->resv = &etnaviv_obj->_resv;
  519. reservation_object_init(&etnaviv_obj->_resv);
  520. }
  521. mutex_init(&etnaviv_obj->lock);
  522. INIT_LIST_HEAD(&etnaviv_obj->vram_list);
  523. *obj = &etnaviv_obj->base;
  524. return 0;
  525. }
  526. static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
  527. u32 size, u32 flags)
  528. {
  529. struct drm_gem_object *obj = NULL;
  530. int ret;
  531. size = PAGE_ALIGN(size);
  532. ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
  533. &etnaviv_gem_shmem_ops, &obj);
  534. if (ret)
  535. goto fail;
  536. ret = drm_gem_object_init(dev, obj, size);
  537. if (ret == 0) {
  538. struct address_space *mapping;
  539. /*
  540. * Our buffers are kept pinned, so allocating them
  541. * from the MOVABLE zone is a really bad idea, and
  542. * conflicts with CMA. See coments above new_inode()
  543. * why this is required _and_ expected if you're
  544. * going to pin these pages.
  545. */
  546. mapping = obj->filp->f_mapping;
  547. mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
  548. }
  549. if (ret)
  550. goto fail;
  551. return obj;
  552. fail:
  553. drm_gem_object_unreference_unlocked(obj);
  554. return ERR_PTR(ret);
  555. }
  556. /* convenience method to construct a GEM buffer object, and userspace handle */
  557. int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  558. u32 size, u32 flags, u32 *handle)
  559. {
  560. struct drm_gem_object *obj;
  561. int ret;
  562. obj = __etnaviv_gem_new(dev, size, flags);
  563. if (IS_ERR(obj))
  564. return PTR_ERR(obj);
  565. ret = etnaviv_gem_obj_add(dev, obj);
  566. if (ret < 0) {
  567. drm_gem_object_unreference_unlocked(obj);
  568. return ret;
  569. }
  570. ret = drm_gem_handle_create(file, obj, handle);
  571. /* drop reference from allocate - handle holds it now */
  572. drm_gem_object_unreference_unlocked(obj);
  573. return ret;
  574. }
  575. struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
  576. u32 size, u32 flags)
  577. {
  578. struct drm_gem_object *obj;
  579. int ret;
  580. obj = __etnaviv_gem_new(dev, size, flags);
  581. if (IS_ERR(obj))
  582. return obj;
  583. ret = etnaviv_gem_obj_add(dev, obj);
  584. if (ret < 0) {
  585. drm_gem_object_unreference_unlocked(obj);
  586. return ERR_PTR(ret);
  587. }
  588. return obj;
  589. }
  590. int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
  591. struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
  592. struct etnaviv_gem_object **res)
  593. {
  594. struct drm_gem_object *obj;
  595. int ret;
  596. ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
  597. if (ret)
  598. return ret;
  599. drm_gem_private_object_init(dev, obj, size);
  600. *res = to_etnaviv_bo(obj);
  601. return 0;
  602. }
  603. struct get_pages_work {
  604. struct work_struct work;
  605. struct mm_struct *mm;
  606. struct task_struct *task;
  607. struct etnaviv_gem_object *etnaviv_obj;
  608. };
  609. static struct page **etnaviv_gem_userptr_do_get_pages(
  610. struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
  611. {
  612. int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  613. struct page **pvec;
  614. uintptr_t ptr;
  615. unsigned int flags = 0;
  616. pvec = drm_malloc_ab(npages, sizeof(struct page *));
  617. if (!pvec)
  618. return ERR_PTR(-ENOMEM);
  619. if (!etnaviv_obj->userptr.ro)
  620. flags |= FOLL_WRITE;
  621. pinned = 0;
  622. ptr = etnaviv_obj->userptr.ptr;
  623. down_read(&mm->mmap_sem);
  624. while (pinned < npages) {
  625. ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
  626. flags, pvec + pinned, NULL);
  627. if (ret < 0)
  628. break;
  629. ptr += ret * PAGE_SIZE;
  630. pinned += ret;
  631. }
  632. up_read(&mm->mmap_sem);
  633. if (ret < 0) {
  634. release_pages(pvec, pinned, 0);
  635. drm_free_large(pvec);
  636. return ERR_PTR(ret);
  637. }
  638. return pvec;
  639. }
  640. static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
  641. {
  642. struct get_pages_work *work = container_of(_work, typeof(*work), work);
  643. struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
  644. struct page **pvec;
  645. pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
  646. mutex_lock(&etnaviv_obj->lock);
  647. if (IS_ERR(pvec)) {
  648. etnaviv_obj->userptr.work = ERR_CAST(pvec);
  649. } else {
  650. etnaviv_obj->userptr.work = NULL;
  651. etnaviv_obj->pages = pvec;
  652. }
  653. mutex_unlock(&etnaviv_obj->lock);
  654. drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
  655. mmput(work->mm);
  656. put_task_struct(work->task);
  657. kfree(work);
  658. }
  659. static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  660. {
  661. struct page **pvec = NULL;
  662. struct get_pages_work *work;
  663. struct mm_struct *mm;
  664. int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  665. if (etnaviv_obj->userptr.work) {
  666. if (IS_ERR(etnaviv_obj->userptr.work)) {
  667. ret = PTR_ERR(etnaviv_obj->userptr.work);
  668. etnaviv_obj->userptr.work = NULL;
  669. } else {
  670. ret = -EAGAIN;
  671. }
  672. return ret;
  673. }
  674. mm = get_task_mm(etnaviv_obj->userptr.task);
  675. pinned = 0;
  676. if (mm == current->mm) {
  677. pvec = drm_malloc_ab(npages, sizeof(struct page *));
  678. if (!pvec) {
  679. mmput(mm);
  680. return -ENOMEM;
  681. }
  682. pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
  683. !etnaviv_obj->userptr.ro, pvec);
  684. if (pinned < 0) {
  685. drm_free_large(pvec);
  686. mmput(mm);
  687. return pinned;
  688. }
  689. if (pinned == npages) {
  690. etnaviv_obj->pages = pvec;
  691. mmput(mm);
  692. return 0;
  693. }
  694. }
  695. release_pages(pvec, pinned, 0);
  696. drm_free_large(pvec);
  697. work = kmalloc(sizeof(*work), GFP_KERNEL);
  698. if (!work) {
  699. mmput(mm);
  700. return -ENOMEM;
  701. }
  702. get_task_struct(current);
  703. drm_gem_object_reference(&etnaviv_obj->base);
  704. work->mm = mm;
  705. work->task = current;
  706. work->etnaviv_obj = etnaviv_obj;
  707. etnaviv_obj->userptr.work = &work->work;
  708. INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
  709. etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
  710. return -EAGAIN;
  711. }
  712. static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
  713. {
  714. if (etnaviv_obj->sgt) {
  715. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  716. sg_free_table(etnaviv_obj->sgt);
  717. kfree(etnaviv_obj->sgt);
  718. }
  719. if (etnaviv_obj->pages) {
  720. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  721. release_pages(etnaviv_obj->pages, npages, 0);
  722. drm_free_large(etnaviv_obj->pages);
  723. }
  724. put_task_struct(etnaviv_obj->userptr.task);
  725. }
  726. static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  727. struct vm_area_struct *vma)
  728. {
  729. return -EINVAL;
  730. }
  731. static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
  732. .get_pages = etnaviv_gem_userptr_get_pages,
  733. .release = etnaviv_gem_userptr_release,
  734. .vmap = etnaviv_gem_vmap_impl,
  735. .mmap = etnaviv_gem_userptr_mmap_obj,
  736. };
  737. int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
  738. uintptr_t ptr, u32 size, u32 flags, u32 *handle)
  739. {
  740. struct etnaviv_gem_object *etnaviv_obj;
  741. int ret;
  742. ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
  743. &etnaviv_gem_userptr_ops, &etnaviv_obj);
  744. if (ret)
  745. return ret;
  746. etnaviv_obj->userptr.ptr = ptr;
  747. etnaviv_obj->userptr.task = current;
  748. etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
  749. get_task_struct(current);
  750. ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
  751. if (ret)
  752. goto unreference;
  753. ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
  754. unreference:
  755. /* drop reference from allocate - handle holds it now */
  756. drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
  757. return ret;
  758. }