gup.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /*
  2. * Lockless get_user_pages_fast for x86
  3. *
  4. * Copyright (C) 2008 Nick Piggin
  5. * Copyright (C) 2008 Novell Inc.
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/vmstat.h>
  10. #include <linux/highmem.h>
  11. #include <linux/swap.h>
  12. #include <linux/memremap.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/pgtable.h>
  15. static inline pte_t gup_get_pte(pte_t *ptep)
  16. {
  17. #ifndef CONFIG_X86_PAE
  18. return READ_ONCE(*ptep);
  19. #else
  20. /*
  21. * With get_user_pages_fast, we walk down the pagetables without taking
  22. * any locks. For this we would like to load the pointers atomically,
  23. * but that is not possible (without expensive cmpxchg8b) on PAE. What
  24. * we do have is the guarantee that a pte will only either go from not
  25. * present to present, or present to not present or both -- it will not
  26. * switch to a completely different present page without a TLB flush in
  27. * between; something that we are blocking by holding interrupts off.
  28. *
  29. * Setting ptes from not present to present goes:
  30. * ptep->pte_high = h;
  31. * smp_wmb();
  32. * ptep->pte_low = l;
  33. *
  34. * And present to not present goes:
  35. * ptep->pte_low = 0;
  36. * smp_wmb();
  37. * ptep->pte_high = 0;
  38. *
  39. * We must ensure here that the load of pte_low sees l iff pte_high
  40. * sees h. We load pte_high *after* loading pte_low, which ensures we
  41. * don't see an older value of pte_high. *Then* we recheck pte_low,
  42. * which ensures that we haven't picked up a changed pte high. We might
  43. * have got rubbish values from pte_low and pte_high, but we are
  44. * guaranteed that pte_low will not have the present bit set *unless*
  45. * it is 'l'. And get_user_pages_fast only operates on present ptes, so
  46. * we're safe.
  47. *
  48. * gup_get_pte should not be used or copied outside gup.c without being
  49. * very careful -- it does not atomically load the pte or anything that
  50. * is likely to be useful for you.
  51. */
  52. pte_t pte;
  53. retry:
  54. pte.pte_low = ptep->pte_low;
  55. smp_rmb();
  56. pte.pte_high = ptep->pte_high;
  57. smp_rmb();
  58. if (unlikely(pte.pte_low != ptep->pte_low))
  59. goto retry;
  60. return pte;
  61. #endif
  62. }
  63. static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
  64. {
  65. while ((*nr) - nr_start) {
  66. struct page *page = pages[--(*nr)];
  67. ClearPageReferenced(page);
  68. put_page(page);
  69. }
  70. }
  71. /*
  72. * 'pteval' can come from a pte, pmd or pud. We only check
  73. * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
  74. * same value on all 3 types.
  75. */
  76. static inline int pte_allows_gup(unsigned long pteval, int write)
  77. {
  78. unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
  79. if (write)
  80. need_pte_bits |= _PAGE_RW;
  81. if ((pteval & need_pte_bits) != need_pte_bits)
  82. return 0;
  83. /* Check memory protection keys permissions. */
  84. if (!__pkru_allows_pkey(pte_flags_pkey(pteval), write))
  85. return 0;
  86. return 1;
  87. }
  88. /*
  89. * The performance critical leaf functions are made noinline otherwise gcc
  90. * inlines everything into a single function which results in too much
  91. * register pressure.
  92. */
  93. static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
  94. unsigned long end, int write, struct page **pages, int *nr)
  95. {
  96. struct dev_pagemap *pgmap = NULL;
  97. int nr_start = *nr;
  98. pte_t *ptep;
  99. ptep = pte_offset_map(&pmd, addr);
  100. do {
  101. pte_t pte = gup_get_pte(ptep);
  102. struct page *page;
  103. /* Similar to the PMD case, NUMA hinting must take slow path */
  104. if (pte_protnone(pte)) {
  105. pte_unmap(ptep);
  106. return 0;
  107. }
  108. if (!pte_allows_gup(pte_val(pte), write)) {
  109. pte_unmap(ptep);
  110. return 0;
  111. }
  112. if (pte_devmap(pte)) {
  113. pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
  114. if (unlikely(!pgmap)) {
  115. undo_dev_pagemap(nr, nr_start, pages);
  116. pte_unmap(ptep);
  117. return 0;
  118. }
  119. } else if (pte_special(pte)) {
  120. pte_unmap(ptep);
  121. return 0;
  122. }
  123. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  124. page = pte_page(pte);
  125. get_page(page);
  126. put_dev_pagemap(pgmap);
  127. SetPageReferenced(page);
  128. pages[*nr] = page;
  129. (*nr)++;
  130. } while (ptep++, addr += PAGE_SIZE, addr != end);
  131. pte_unmap(ptep - 1);
  132. return 1;
  133. }
  134. static inline void get_head_page_multiple(struct page *page, int nr)
  135. {
  136. VM_BUG_ON_PAGE(page != compound_head(page), page);
  137. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  138. page_ref_add(page, nr);
  139. SetPageReferenced(page);
  140. }
  141. static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
  142. unsigned long end, struct page **pages, int *nr)
  143. {
  144. int nr_start = *nr;
  145. unsigned long pfn = pmd_pfn(pmd);
  146. struct dev_pagemap *pgmap = NULL;
  147. pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
  148. do {
  149. struct page *page = pfn_to_page(pfn);
  150. pgmap = get_dev_pagemap(pfn, pgmap);
  151. if (unlikely(!pgmap)) {
  152. undo_dev_pagemap(nr, nr_start, pages);
  153. return 0;
  154. }
  155. SetPageReferenced(page);
  156. pages[*nr] = page;
  157. get_page(page);
  158. put_dev_pagemap(pgmap);
  159. (*nr)++;
  160. pfn++;
  161. } while (addr += PAGE_SIZE, addr != end);
  162. return 1;
  163. }
  164. static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
  165. unsigned long end, int write, struct page **pages, int *nr)
  166. {
  167. struct page *head, *page;
  168. int refs;
  169. if (!pte_allows_gup(pmd_val(pmd), write))
  170. return 0;
  171. VM_BUG_ON(!pfn_valid(pmd_pfn(pmd)));
  172. if (pmd_devmap(pmd))
  173. return __gup_device_huge_pmd(pmd, addr, end, pages, nr);
  174. /* hugepages are never "special" */
  175. VM_BUG_ON(pmd_flags(pmd) & _PAGE_SPECIAL);
  176. refs = 0;
  177. head = pmd_page(pmd);
  178. page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  179. do {
  180. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  181. pages[*nr] = page;
  182. (*nr)++;
  183. page++;
  184. refs++;
  185. } while (addr += PAGE_SIZE, addr != end);
  186. get_head_page_multiple(head, refs);
  187. return 1;
  188. }
  189. static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  190. int write, struct page **pages, int *nr)
  191. {
  192. unsigned long next;
  193. pmd_t *pmdp;
  194. pmdp = pmd_offset(&pud, addr);
  195. do {
  196. pmd_t pmd = *pmdp;
  197. next = pmd_addr_end(addr, end);
  198. if (pmd_none(pmd))
  199. return 0;
  200. if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
  201. /*
  202. * NUMA hinting faults need to be handled in the GUP
  203. * slowpath for accounting purposes and so that they
  204. * can be serialised against THP migration.
  205. */
  206. if (pmd_protnone(pmd))
  207. return 0;
  208. if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
  209. return 0;
  210. } else {
  211. if (!gup_pte_range(pmd, addr, next, write, pages, nr))
  212. return 0;
  213. }
  214. } while (pmdp++, addr = next, addr != end);
  215. return 1;
  216. }
  217. static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
  218. unsigned long end, int write, struct page **pages, int *nr)
  219. {
  220. struct page *head, *page;
  221. int refs;
  222. if (!pte_allows_gup(pud_val(pud), write))
  223. return 0;
  224. /* hugepages are never "special" */
  225. VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);
  226. VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
  227. refs = 0;
  228. head = pud_page(pud);
  229. page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  230. do {
  231. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  232. pages[*nr] = page;
  233. (*nr)++;
  234. page++;
  235. refs++;
  236. } while (addr += PAGE_SIZE, addr != end);
  237. get_head_page_multiple(head, refs);
  238. return 1;
  239. }
  240. static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  241. int write, struct page **pages, int *nr)
  242. {
  243. unsigned long next;
  244. pud_t *pudp;
  245. pudp = pud_offset(&pgd, addr);
  246. do {
  247. pud_t pud = *pudp;
  248. next = pud_addr_end(addr, end);
  249. if (pud_none(pud))
  250. return 0;
  251. if (unlikely(pud_large(pud))) {
  252. if (!gup_huge_pud(pud, addr, next, write, pages, nr))
  253. return 0;
  254. } else {
  255. if (!gup_pmd_range(pud, addr, next, write, pages, nr))
  256. return 0;
  257. }
  258. } while (pudp++, addr = next, addr != end);
  259. return 1;
  260. }
  261. /*
  262. * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
  263. * back to the regular GUP.
  264. */
  265. int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  266. struct page **pages)
  267. {
  268. struct mm_struct *mm = current->mm;
  269. unsigned long addr, len, end;
  270. unsigned long next;
  271. unsigned long flags;
  272. pgd_t *pgdp;
  273. int nr = 0;
  274. start &= PAGE_MASK;
  275. addr = start;
  276. len = (unsigned long) nr_pages << PAGE_SHIFT;
  277. end = start + len;
  278. if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
  279. (void __user *)start, len)))
  280. return 0;
  281. /*
  282. * XXX: batch / limit 'nr', to avoid large irq off latency
  283. * needs some instrumenting to determine the common sizes used by
  284. * important workloads (eg. DB2), and whether limiting the batch size
  285. * will decrease performance.
  286. *
  287. * It seems like we're in the clear for the moment. Direct-IO is
  288. * the main guy that batches up lots of get_user_pages, and even
  289. * they are limited to 64-at-a-time which is not so many.
  290. */
  291. /*
  292. * This doesn't prevent pagetable teardown, but does prevent
  293. * the pagetables and pages from being freed on x86.
  294. *
  295. * So long as we atomically load page table pointers versus teardown
  296. * (which we do on x86, with the above PAE exception), we can follow the
  297. * address down to the the page and take a ref on it.
  298. */
  299. local_irq_save(flags);
  300. pgdp = pgd_offset(mm, addr);
  301. do {
  302. pgd_t pgd = *pgdp;
  303. next = pgd_addr_end(addr, end);
  304. if (pgd_none(pgd))
  305. break;
  306. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  307. break;
  308. } while (pgdp++, addr = next, addr != end);
  309. local_irq_restore(flags);
  310. return nr;
  311. }
  312. /**
  313. * get_user_pages_fast() - pin user pages in memory
  314. * @start: starting user address
  315. * @nr_pages: number of pages from start to pin
  316. * @write: whether pages will be written to
  317. * @pages: array that receives pointers to the pages pinned.
  318. * Should be at least nr_pages long.
  319. *
  320. * Attempt to pin user pages in memory without taking mm->mmap_sem.
  321. * If not successful, it will fall back to taking the lock and
  322. * calling get_user_pages().
  323. *
  324. * Returns number of pages pinned. This may be fewer than the number
  325. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  326. * were pinned, returns -errno.
  327. */
  328. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  329. struct page **pages)
  330. {
  331. struct mm_struct *mm = current->mm;
  332. unsigned long addr, len, end;
  333. unsigned long next;
  334. pgd_t *pgdp;
  335. int nr = 0;
  336. start &= PAGE_MASK;
  337. addr = start;
  338. len = (unsigned long) nr_pages << PAGE_SHIFT;
  339. end = start + len;
  340. if (end < start)
  341. goto slow_irqon;
  342. #ifdef CONFIG_X86_64
  343. if (end >> __VIRTUAL_MASK_SHIFT)
  344. goto slow_irqon;
  345. #endif
  346. /*
  347. * XXX: batch / limit 'nr', to avoid large irq off latency
  348. * needs some instrumenting to determine the common sizes used by
  349. * important workloads (eg. DB2), and whether limiting the batch size
  350. * will decrease performance.
  351. *
  352. * It seems like we're in the clear for the moment. Direct-IO is
  353. * the main guy that batches up lots of get_user_pages, and even
  354. * they are limited to 64-at-a-time which is not so many.
  355. */
  356. /*
  357. * This doesn't prevent pagetable teardown, but does prevent
  358. * the pagetables and pages from being freed on x86.
  359. *
  360. * So long as we atomically load page table pointers versus teardown
  361. * (which we do on x86, with the above PAE exception), we can follow the
  362. * address down to the the page and take a ref on it.
  363. */
  364. local_irq_disable();
  365. pgdp = pgd_offset(mm, addr);
  366. do {
  367. pgd_t pgd = *pgdp;
  368. next = pgd_addr_end(addr, end);
  369. if (pgd_none(pgd))
  370. goto slow;
  371. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  372. goto slow;
  373. } while (pgdp++, addr = next, addr != end);
  374. local_irq_enable();
  375. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  376. return nr;
  377. {
  378. int ret;
  379. slow:
  380. local_irq_enable();
  381. slow_irqon:
  382. /* Try to get the remaining pages with get_user_pages */
  383. start += nr << PAGE_SHIFT;
  384. pages += nr;
  385. ret = get_user_pages_unlocked(start,
  386. (end - start) >> PAGE_SHIFT,
  387. pages, write ? FOLL_WRITE : 0);
  388. /* Have to be a bit careful with return values */
  389. if (nr > 0) {
  390. if (ret < 0)
  391. ret = nr;
  392. else
  393. ret += nr;
  394. }
  395. return ret;
  396. }
  397. }