drm_vm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /**
  2. * \file drm_vm.c
  3. * Memory mapping for DRM
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. * \author Gareth Hughes <gareth@valinux.com>
  7. */
  8. /*
  9. * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
  10. *
  11. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  12. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13. * All Rights Reserved.
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a
  16. * copy of this software and associated documentation files (the "Software"),
  17. * to deal in the Software without restriction, including without limitation
  18. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19. * and/or sell copies of the Software, and to permit persons to whom the
  20. * Software is furnished to do so, subject to the following conditions:
  21. *
  22. * The above copyright notice and this permission notice (including the next
  23. * paragraph) shall be included in all copies or substantial portions of the
  24. * Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  29. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32. * OTHER DEALINGS IN THE SOFTWARE.
  33. */
  34. #include "drmP.h"
  35. #include <linux/export.h>
  36. #if defined(__ia64__)
  37. #include <linux/efi.h>
  38. #include <linux/slab.h>
  39. #endif
  40. static void drm_vm_open(struct vm_area_struct *vma);
  41. static void drm_vm_close(struct vm_area_struct *vma);
  42. static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
  43. {
  44. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  45. #if defined(__i386__) || defined(__x86_64__)
  46. if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
  47. pgprot_val(tmp) |= _PAGE_PCD;
  48. pgprot_val(tmp) &= ~_PAGE_PWT;
  49. }
  50. #elif defined(__powerpc__)
  51. pgprot_val(tmp) |= _PAGE_NO_CACHE;
  52. if (map_type == _DRM_REGISTERS)
  53. pgprot_val(tmp) |= _PAGE_GUARDED;
  54. #elif defined(__ia64__)
  55. if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  56. vma->vm_start))
  57. tmp = pgprot_writecombine(tmp);
  58. else
  59. tmp = pgprot_noncached(tmp);
  60. #elif defined(__sparc__) || defined(__arm__)
  61. tmp = pgprot_noncached(tmp);
  62. #endif
  63. return tmp;
  64. }
  65. static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  66. {
  67. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  68. #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
  69. tmp |= _PAGE_NO_CACHE;
  70. #endif
  71. return tmp;
  72. }
  73. /**
  74. * \c fault method for AGP virtual memory.
  75. *
  76. * \param vma virtual memory area.
  77. * \param address access address.
  78. * \return pointer to the page structure.
  79. *
  80. * Find the right map and if it's AGP memory find the real physical page to
  81. * map, get the page, increment the use count and return it.
  82. */
  83. #if __OS_HAS_AGP
  84. static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  85. {
  86. struct drm_file *priv = vma->vm_file->private_data;
  87. struct drm_device *dev = priv->minor->dev;
  88. struct drm_local_map *map = NULL;
  89. struct drm_map_list *r_list;
  90. struct drm_hash_item *hash;
  91. /*
  92. * Find the right map
  93. */
  94. if (!drm_core_has_AGP(dev))
  95. goto vm_fault_error;
  96. if (!dev->agp || !dev->agp->cant_use_aperture)
  97. goto vm_fault_error;
  98. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
  99. goto vm_fault_error;
  100. r_list = drm_hash_entry(hash, struct drm_map_list, hash);
  101. map = r_list->map;
  102. if (map && map->type == _DRM_AGP) {
  103. /*
  104. * Using vm_pgoff as a selector forces us to use this unusual
  105. * addressing scheme.
  106. */
  107. resource_size_t offset = (unsigned long)vmf->virtual_address -
  108. vma->vm_start;
  109. resource_size_t baddr = map->offset + offset;
  110. struct drm_agp_mem *agpmem;
  111. struct page *page;
  112. #ifdef __alpha__
  113. /*
  114. * Adjust to a bus-relative address
  115. */
  116. baddr -= dev->hose->mem_space->start;
  117. #endif
  118. /*
  119. * It's AGP memory - find the real physical page to map
  120. */
  121. list_for_each_entry(agpmem, &dev->agp->memory, head) {
  122. if (agpmem->bound <= baddr &&
  123. agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
  124. break;
  125. }
  126. if (&agpmem->head == &dev->agp->memory)
  127. goto vm_fault_error;
  128. /*
  129. * Get the page, inc the use count, and return it
  130. */
  131. offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
  132. page = agpmem->memory->pages[offset];
  133. get_page(page);
  134. vmf->page = page;
  135. DRM_DEBUG
  136. ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
  137. (unsigned long long)baddr,
  138. agpmem->memory->pages[offset],
  139. (unsigned long long)offset,
  140. page_count(page));
  141. return 0;
  142. }
  143. vm_fault_error:
  144. return VM_FAULT_SIGBUS; /* Disallow mremap */
  145. }
  146. #else /* __OS_HAS_AGP */
  147. static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  148. {
  149. return VM_FAULT_SIGBUS;
  150. }
  151. #endif /* __OS_HAS_AGP */
  152. /**
  153. * \c nopage method for shared virtual memory.
  154. *
  155. * \param vma virtual memory area.
  156. * \param address access address.
  157. * \return pointer to the page structure.
  158. *
  159. * Get the mapping, find the real physical page to map, get the page, and
  160. * return it.
  161. */
  162. static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  163. {
  164. struct drm_local_map *map = vma->vm_private_data;
  165. unsigned long offset;
  166. unsigned long i;
  167. struct page *page;
  168. if (!map)
  169. return VM_FAULT_SIGBUS; /* Nothing allocated */
  170. offset = (unsigned long)vmf->virtual_address - vma->vm_start;
  171. i = (unsigned long)map->handle + offset;
  172. page = vmalloc_to_page((void *)i);
  173. if (!page)
  174. return VM_FAULT_SIGBUS;
  175. get_page(page);
  176. vmf->page = page;
  177. DRM_DEBUG("shm_fault 0x%lx\n", offset);
  178. return 0;
  179. }
  180. /**
  181. * \c close method for shared virtual memory.
  182. *
  183. * \param vma virtual memory area.
  184. *
  185. * Deletes map information if we are the last
  186. * person to close a mapping and it's not in the global maplist.
  187. */
  188. static void drm_vm_shm_close(struct vm_area_struct *vma)
  189. {
  190. struct drm_file *priv = vma->vm_file->private_data;
  191. struct drm_device *dev = priv->minor->dev;
  192. struct drm_vma_entry *pt, *temp;
  193. struct drm_local_map *map;
  194. struct drm_map_list *r_list;
  195. int found_maps = 0;
  196. DRM_DEBUG("0x%08lx,0x%08lx\n",
  197. vma->vm_start, vma->vm_end - vma->vm_start);
  198. atomic_dec(&dev->vma_count);
  199. map = vma->vm_private_data;
  200. mutex_lock(&dev->struct_mutex);
  201. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  202. if (pt->vma->vm_private_data == map)
  203. found_maps++;
  204. if (pt->vma == vma) {
  205. list_del(&pt->head);
  206. kfree(pt);
  207. }
  208. }
  209. /* We were the only map that was found */
  210. if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
  211. /* Check to see if we are in the maplist, if we are not, then
  212. * we delete this mappings information.
  213. */
  214. found_maps = 0;
  215. list_for_each_entry(r_list, &dev->maplist, head) {
  216. if (r_list->map == map)
  217. found_maps++;
  218. }
  219. if (!found_maps) {
  220. drm_dma_handle_t dmah;
  221. switch (map->type) {
  222. case _DRM_REGISTERS:
  223. case _DRM_FRAME_BUFFER:
  224. if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
  225. int retcode;
  226. retcode = mtrr_del(map->mtrr,
  227. map->offset,
  228. map->size);
  229. DRM_DEBUG("mtrr_del = %d\n", retcode);
  230. }
  231. iounmap(map->handle);
  232. break;
  233. case _DRM_SHM:
  234. vfree(map->handle);
  235. break;
  236. case _DRM_AGP:
  237. case _DRM_SCATTER_GATHER:
  238. break;
  239. case _DRM_CONSISTENT:
  240. dmah.vaddr = map->handle;
  241. dmah.busaddr = map->offset;
  242. dmah.size = map->size;
  243. __drm_pci_free(dev, &dmah);
  244. break;
  245. case _DRM_GEM:
  246. DRM_ERROR("tried to rmmap GEM object\n");
  247. break;
  248. }
  249. kfree(map);
  250. }
  251. }
  252. mutex_unlock(&dev->struct_mutex);
  253. }
  254. /**
  255. * \c fault method for DMA virtual memory.
  256. *
  257. * \param vma virtual memory area.
  258. * \param address access address.
  259. * \return pointer to the page structure.
  260. *
  261. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  262. */
  263. static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  264. {
  265. struct drm_file *priv = vma->vm_file->private_data;
  266. struct drm_device *dev = priv->minor->dev;
  267. struct drm_device_dma *dma = dev->dma;
  268. unsigned long offset;
  269. unsigned long page_nr;
  270. struct page *page;
  271. if (!dma)
  272. return VM_FAULT_SIGBUS; /* Error */
  273. if (!dma->pagelist)
  274. return VM_FAULT_SIGBUS; /* Nothing allocated */
  275. offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
  276. page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
  277. page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
  278. get_page(page);
  279. vmf->page = page;
  280. DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
  281. return 0;
  282. }
  283. /**
  284. * \c fault method for scatter-gather virtual memory.
  285. *
  286. * \param vma virtual memory area.
  287. * \param address access address.
  288. * \return pointer to the page structure.
  289. *
  290. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  291. */
  292. static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  293. {
  294. struct drm_local_map *map = vma->vm_private_data;
  295. struct drm_file *priv = vma->vm_file->private_data;
  296. struct drm_device *dev = priv->minor->dev;
  297. struct drm_sg_mem *entry = dev->sg;
  298. unsigned long offset;
  299. unsigned long map_offset;
  300. unsigned long page_offset;
  301. struct page *page;
  302. if (!entry)
  303. return VM_FAULT_SIGBUS; /* Error */
  304. if (!entry->pagelist)
  305. return VM_FAULT_SIGBUS; /* Nothing allocated */
  306. offset = (unsigned long)vmf->virtual_address - vma->vm_start;
  307. map_offset = map->offset - (unsigned long)dev->sg->virtual;
  308. page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
  309. page = entry->pagelist[page_offset];
  310. get_page(page);
  311. vmf->page = page;
  312. return 0;
  313. }
  314. static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  315. {
  316. return drm_do_vm_fault(vma, vmf);
  317. }
  318. static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  319. {
  320. return drm_do_vm_shm_fault(vma, vmf);
  321. }
  322. static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  323. {
  324. return drm_do_vm_dma_fault(vma, vmf);
  325. }
  326. static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  327. {
  328. return drm_do_vm_sg_fault(vma, vmf);
  329. }
  330. /** AGP virtual memory operations */
  331. static const struct vm_operations_struct drm_vm_ops = {
  332. .fault = drm_vm_fault,
  333. .open = drm_vm_open,
  334. .close = drm_vm_close,
  335. };
  336. /** Shared virtual memory operations */
  337. static const struct vm_operations_struct drm_vm_shm_ops = {
  338. .fault = drm_vm_shm_fault,
  339. .open = drm_vm_open,
  340. .close = drm_vm_shm_close,
  341. };
  342. /** DMA virtual memory operations */
  343. static const struct vm_operations_struct drm_vm_dma_ops = {
  344. .fault = drm_vm_dma_fault,
  345. .open = drm_vm_open,
  346. .close = drm_vm_close,
  347. };
  348. /** Scatter-gather virtual memory operations */
  349. static const struct vm_operations_struct drm_vm_sg_ops = {
  350. .fault = drm_vm_sg_fault,
  351. .open = drm_vm_open,
  352. .close = drm_vm_close,
  353. };
  354. /**
  355. * \c open method for shared virtual memory.
  356. *
  357. * \param vma virtual memory area.
  358. *
  359. * Create a new drm_vma_entry structure as the \p vma private data entry and
  360. * add it to drm_device::vmalist.
  361. */
  362. void drm_vm_open_locked(struct vm_area_struct *vma)
  363. {
  364. struct drm_file *priv = vma->vm_file->private_data;
  365. struct drm_device *dev = priv->minor->dev;
  366. struct drm_vma_entry *vma_entry;
  367. DRM_DEBUG("0x%08lx,0x%08lx\n",
  368. vma->vm_start, vma->vm_end - vma->vm_start);
  369. atomic_inc(&dev->vma_count);
  370. vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
  371. if (vma_entry) {
  372. vma_entry->vma = vma;
  373. vma_entry->pid = current->pid;
  374. list_add(&vma_entry->head, &dev->vmalist);
  375. }
  376. }
  377. static void drm_vm_open(struct vm_area_struct *vma)
  378. {
  379. struct drm_file *priv = vma->vm_file->private_data;
  380. struct drm_device *dev = priv->minor->dev;
  381. mutex_lock(&dev->struct_mutex);
  382. drm_vm_open_locked(vma);
  383. mutex_unlock(&dev->struct_mutex);
  384. }
  385. void drm_vm_close_locked(struct vm_area_struct *vma)
  386. {
  387. struct drm_file *priv = vma->vm_file->private_data;
  388. struct drm_device *dev = priv->minor->dev;
  389. struct drm_vma_entry *pt, *temp;
  390. DRM_DEBUG("0x%08lx,0x%08lx\n",
  391. vma->vm_start, vma->vm_end - vma->vm_start);
  392. atomic_dec(&dev->vma_count);
  393. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  394. if (pt->vma == vma) {
  395. list_del(&pt->head);
  396. kfree(pt);
  397. break;
  398. }
  399. }
  400. }
  401. /**
  402. * \c close method for all virtual memory types.
  403. *
  404. * \param vma virtual memory area.
  405. *
  406. * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
  407. * free it.
  408. */
  409. static void drm_vm_close(struct vm_area_struct *vma)
  410. {
  411. struct drm_file *priv = vma->vm_file->private_data;
  412. struct drm_device *dev = priv->minor->dev;
  413. mutex_lock(&dev->struct_mutex);
  414. drm_vm_close_locked(vma);
  415. mutex_unlock(&dev->struct_mutex);
  416. }
  417. /**
  418. * mmap DMA memory.
  419. *
  420. * \param file_priv DRM file private.
  421. * \param vma virtual memory area.
  422. * \return zero on success or a negative number on failure.
  423. *
  424. * Sets the virtual memory area operations structure to vm_dma_ops, the file
  425. * pointer, and calls vm_open().
  426. */
  427. static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
  428. {
  429. struct drm_file *priv = filp->private_data;
  430. struct drm_device *dev;
  431. struct drm_device_dma *dma;
  432. unsigned long length = vma->vm_end - vma->vm_start;
  433. dev = priv->minor->dev;
  434. dma = dev->dma;
  435. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  436. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  437. /* Length must match exact page count */
  438. if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
  439. return -EINVAL;
  440. }
  441. if (!capable(CAP_SYS_ADMIN) &&
  442. (dma->flags & _DRM_DMA_USE_PCI_RO)) {
  443. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  444. #if defined(__i386__) || defined(__x86_64__)
  445. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  446. #else
  447. /* Ye gads this is ugly. With more thought
  448. we could move this up higher and use
  449. `protection_map' instead. */
  450. vma->vm_page_prot =
  451. __pgprot(pte_val
  452. (pte_wrprotect
  453. (__pte(pgprot_val(vma->vm_page_prot)))));
  454. #endif
  455. }
  456. vma->vm_ops = &drm_vm_dma_ops;
  457. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  458. vma->vm_flags |= VM_DONTEXPAND;
  459. drm_vm_open_locked(vma);
  460. return 0;
  461. }
  462. static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
  463. {
  464. #ifdef __alpha__
  465. return dev->hose->dense_mem_base;
  466. #else
  467. return 0;
  468. #endif
  469. }
  470. /**
  471. * mmap DMA memory.
  472. *
  473. * \param file_priv DRM file private.
  474. * \param vma virtual memory area.
  475. * \return zero on success or a negative number on failure.
  476. *
  477. * If the virtual memory area has no offset associated with it then it's a DMA
  478. * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
  479. * checks that the restricted flag is not set, sets the virtual memory operations
  480. * according to the mapping type and remaps the pages. Finally sets the file
  481. * pointer and calls vm_open().
  482. */
  483. int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
  484. {
  485. struct drm_file *priv = filp->private_data;
  486. struct drm_device *dev = priv->minor->dev;
  487. struct drm_local_map *map = NULL;
  488. resource_size_t offset = 0;
  489. struct drm_hash_item *hash;
  490. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  491. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  492. if (!priv->authenticated)
  493. return -EACCES;
  494. /* We check for "dma". On Apple's UniNorth, it's valid to have
  495. * the AGP mapped at physical address 0
  496. * --BenH.
  497. */
  498. if (!vma->vm_pgoff
  499. #if __OS_HAS_AGP
  500. && (!dev->agp
  501. || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
  502. #endif
  503. )
  504. return drm_mmap_dma(filp, vma);
  505. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
  506. DRM_ERROR("Could not find map\n");
  507. return -EINVAL;
  508. }
  509. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  510. if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
  511. return -EPERM;
  512. /* Check for valid size. */
  513. if (map->size < vma->vm_end - vma->vm_start)
  514. return -EINVAL;
  515. if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
  516. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  517. #if defined(__i386__) || defined(__x86_64__)
  518. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  519. #else
  520. /* Ye gads this is ugly. With more thought
  521. we could move this up higher and use
  522. `protection_map' instead. */
  523. vma->vm_page_prot =
  524. __pgprot(pte_val
  525. (pte_wrprotect
  526. (__pte(pgprot_val(vma->vm_page_prot)))));
  527. #endif
  528. }
  529. switch (map->type) {
  530. #if !defined(__arm__)
  531. case _DRM_AGP:
  532. if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
  533. /*
  534. * On some platforms we can't talk to bus dma address from the CPU, so for
  535. * memory of type DRM_AGP, we'll deal with sorting out the real physical
  536. * pages and mappings in fault()
  537. */
  538. #if defined(__powerpc__)
  539. pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
  540. #endif
  541. vma->vm_ops = &drm_vm_ops;
  542. break;
  543. }
  544. /* fall through to _DRM_FRAME_BUFFER... */
  545. #endif
  546. case _DRM_FRAME_BUFFER:
  547. case _DRM_REGISTERS:
  548. offset = drm_core_get_reg_ofs(dev);
  549. vma->vm_flags |= VM_IO; /* not in core dump */
  550. vma->vm_page_prot = drm_io_prot(map->type, vma);
  551. #if !defined(__arm__)
  552. if (io_remap_pfn_range(vma, vma->vm_start,
  553. (map->offset + offset) >> PAGE_SHIFT,
  554. vma->vm_end - vma->vm_start,
  555. vma->vm_page_prot))
  556. return -EAGAIN;
  557. #else
  558. if (remap_pfn_range(vma, vma->vm_start,
  559. (map->offset + offset) >> PAGE_SHIFT,
  560. vma->vm_end - vma->vm_start,
  561. vma->vm_page_prot))
  562. return -EAGAIN;
  563. #endif
  564. DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
  565. " offset = 0x%llx\n",
  566. map->type,
  567. vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
  568. vma->vm_ops = &drm_vm_ops;
  569. break;
  570. case _DRM_CONSISTENT:
  571. /* Consistent memory is really like shared memory. But
  572. * it's allocated in a different way, so avoid fault */
  573. if (remap_pfn_range(vma, vma->vm_start,
  574. page_to_pfn(virt_to_page(map->handle)),
  575. vma->vm_end - vma->vm_start, vma->vm_page_prot))
  576. return -EAGAIN;
  577. vma->vm_page_prot = drm_dma_prot(map->type, vma);
  578. /* fall through to _DRM_SHM */
  579. case _DRM_SHM:
  580. vma->vm_ops = &drm_vm_shm_ops;
  581. vma->vm_private_data = (void *)map;
  582. /* Don't let this area swap. Change when
  583. DRM_KERNEL advisory is supported. */
  584. vma->vm_flags |= VM_RESERVED;
  585. break;
  586. case _DRM_SCATTER_GATHER:
  587. vma->vm_ops = &drm_vm_sg_ops;
  588. vma->vm_private_data = (void *)map;
  589. vma->vm_flags |= VM_RESERVED;
  590. vma->vm_page_prot = drm_dma_prot(map->type, vma);
  591. break;
  592. default:
  593. return -EINVAL; /* This should never happen. */
  594. }
  595. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  596. vma->vm_flags |= VM_DONTEXPAND;
  597. drm_vm_open_locked(vma);
  598. return 0;
  599. }
  600. int drm_mmap(struct file *filp, struct vm_area_struct *vma)
  601. {
  602. struct drm_file *priv = filp->private_data;
  603. struct drm_device *dev = priv->minor->dev;
  604. int ret;
  605. if (drm_device_is_unplugged(dev))
  606. return -ENODEV;
  607. mutex_lock(&dev->struct_mutex);
  608. ret = drm_mmap_locked(filp, vma);
  609. mutex_unlock(&dev->struct_mutex);
  610. return ret;
  611. }
  612. EXPORT_SYMBOL(drm_mmap);