gup.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /*
  2. * Lockless get_user_pages_fast for MIPS
  3. *
  4. * Copyright (C) 2008 Nick Piggin
  5. * Copyright (C) 2008 Novell Inc.
  6. * Copyright (C) 2011 Ralf Baechle
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/mm.h>
  10. #include <linux/vmstat.h>
  11. #include <linux/highmem.h>
  12. #include <linux/swap.h>
  13. #include <linux/hugetlb.h>
  14. #include <asm/cpu-features.h>
  15. #include <asm/pgtable.h>
  16. static inline pte_t gup_get_pte(pte_t *ptep)
  17. {
  18. #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
  19. pte_t pte;
  20. retry:
  21. pte.pte_low = ptep->pte_low;
  22. smp_rmb();
  23. pte.pte_high = ptep->pte_high;
  24. smp_rmb();
  25. if (unlikely(pte.pte_low != ptep->pte_low))
  26. goto retry;
  27. return pte;
  28. #else
  29. return READ_ONCE(*ptep);
  30. #endif
  31. }
  32. static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
  33. int write, struct page **pages, int *nr)
  34. {
  35. pte_t *ptep = pte_offset_map(&pmd, addr);
  36. do {
  37. pte_t pte = gup_get_pte(ptep);
  38. struct page *page;
  39. if (!pte_present(pte) ||
  40. pte_special(pte) || (write && !pte_write(pte))) {
  41. pte_unmap(ptep);
  42. return 0;
  43. }
  44. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  45. page = pte_page(pte);
  46. get_page(page);
  47. SetPageReferenced(page);
  48. pages[*nr] = page;
  49. (*nr)++;
  50. } while (ptep++, addr += PAGE_SIZE, addr != end);
  51. pte_unmap(ptep - 1);
  52. return 1;
  53. }
  54. static inline void get_head_page_multiple(struct page *page, int nr)
  55. {
  56. VM_BUG_ON(page != compound_head(page));
  57. VM_BUG_ON(page_count(page) == 0);
  58. page_ref_add(page, nr);
  59. SetPageReferenced(page);
  60. }
  61. static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end,
  62. int write, struct page **pages, int *nr)
  63. {
  64. pte_t pte = *(pte_t *)&pmd;
  65. struct page *head, *page;
  66. int refs;
  67. if (write && !pte_write(pte))
  68. return 0;
  69. /* hugepages are never "special" */
  70. VM_BUG_ON(pte_special(pte));
  71. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  72. refs = 0;
  73. head = pte_page(pte);
  74. page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  75. do {
  76. VM_BUG_ON(compound_head(page) != head);
  77. pages[*nr] = page;
  78. (*nr)++;
  79. page++;
  80. refs++;
  81. } while (addr += PAGE_SIZE, addr != end);
  82. get_head_page_multiple(head, refs);
  83. return 1;
  84. }
  85. static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  86. int write, struct page **pages, int *nr)
  87. {
  88. unsigned long next;
  89. pmd_t *pmdp;
  90. pmdp = pmd_offset(&pud, addr);
  91. do {
  92. pmd_t pmd = *pmdp;
  93. next = pmd_addr_end(addr, end);
  94. if (pmd_none(pmd))
  95. return 0;
  96. if (unlikely(pmd_huge(pmd))) {
  97. if (!gup_huge_pmd(pmd, addr, next, write, pages,nr))
  98. return 0;
  99. } else {
  100. if (!gup_pte_range(pmd, addr, next, write, pages,nr))
  101. return 0;
  102. }
  103. } while (pmdp++, addr = next, addr != end);
  104. return 1;
  105. }
  106. static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
  107. int write, struct page **pages, int *nr)
  108. {
  109. pte_t pte = *(pte_t *)&pud;
  110. struct page *head, *page;
  111. int refs;
  112. if (write && !pte_write(pte))
  113. return 0;
  114. /* hugepages are never "special" */
  115. VM_BUG_ON(pte_special(pte));
  116. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  117. refs = 0;
  118. head = pte_page(pte);
  119. page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  120. do {
  121. VM_BUG_ON(compound_head(page) != head);
  122. pages[*nr] = page;
  123. (*nr)++;
  124. page++;
  125. refs++;
  126. } while (addr += PAGE_SIZE, addr != end);
  127. get_head_page_multiple(head, refs);
  128. return 1;
  129. }
  130. static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  131. int write, struct page **pages, int *nr)
  132. {
  133. unsigned long next;
  134. pud_t *pudp;
  135. pudp = pud_offset(&pgd, addr);
  136. do {
  137. pud_t pud = *pudp;
  138. next = pud_addr_end(addr, end);
  139. if (pud_none(pud))
  140. return 0;
  141. if (unlikely(pud_huge(pud))) {
  142. if (!gup_huge_pud(pud, addr, next, write, pages,nr))
  143. return 0;
  144. } else {
  145. if (!gup_pmd_range(pud, addr, next, write, pages,nr))
  146. return 0;
  147. }
  148. } while (pudp++, addr = next, addr != end);
  149. return 1;
  150. }
  151. /*
  152. * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
  153. * back to the regular GUP.
  154. */
  155. int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  156. struct page **pages)
  157. {
  158. struct mm_struct *mm = current->mm;
  159. unsigned long addr, len, end;
  160. unsigned long next;
  161. unsigned long flags;
  162. pgd_t *pgdp;
  163. int nr = 0;
  164. start &= PAGE_MASK;
  165. addr = start;
  166. len = (unsigned long) nr_pages << PAGE_SHIFT;
  167. end = start + len;
  168. if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
  169. (void __user *)start, len)))
  170. return 0;
  171. /*
  172. * XXX: batch / limit 'nr', to avoid large irq off latency
  173. * needs some instrumenting to determine the common sizes used by
  174. * important workloads (eg. DB2), and whether limiting the batch
  175. * size will decrease performance.
  176. *
  177. * It seems like we're in the clear for the moment. Direct-IO is
  178. * the main guy that batches up lots of get_user_pages, and even
  179. * they are limited to 64-at-a-time which is not so many.
  180. */
  181. /*
  182. * This doesn't prevent pagetable teardown, but does prevent
  183. * the pagetables and pages from being freed.
  184. *
  185. * So long as we atomically load page table pointers versus teardown,
  186. * we can follow the address down to the page and take a ref on it.
  187. */
  188. local_irq_save(flags);
  189. pgdp = pgd_offset(mm, addr);
  190. do {
  191. pgd_t pgd = *pgdp;
  192. next = pgd_addr_end(addr, end);
  193. if (pgd_none(pgd))
  194. break;
  195. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  196. break;
  197. } while (pgdp++, addr = next, addr != end);
  198. local_irq_restore(flags);
  199. return nr;
  200. }
  201. /**
  202. * get_user_pages_fast() - pin user pages in memory
  203. * @start: starting user address
  204. * @nr_pages: number of pages from start to pin
  205. * @write: whether pages will be written to
  206. * @pages: array that receives pointers to the pages pinned.
  207. * Should be at least nr_pages long.
  208. *
  209. * Attempt to pin user pages in memory without taking mm->mmap_sem.
  210. * If not successful, it will fall back to taking the lock and
  211. * calling get_user_pages().
  212. *
  213. * Returns number of pages pinned. This may be fewer than the number
  214. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  215. * were pinned, returns -errno.
  216. */
  217. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  218. struct page **pages)
  219. {
  220. struct mm_struct *mm = current->mm;
  221. unsigned long addr, len, end;
  222. unsigned long next;
  223. pgd_t *pgdp;
  224. int ret, nr = 0;
  225. start &= PAGE_MASK;
  226. addr = start;
  227. len = (unsigned long) nr_pages << PAGE_SHIFT;
  228. end = start + len;
  229. if (end < start || cpu_has_dc_aliases)
  230. goto slow_irqon;
  231. /* XXX: batch / limit 'nr' */
  232. local_irq_disable();
  233. pgdp = pgd_offset(mm, addr);
  234. do {
  235. pgd_t pgd = *pgdp;
  236. next = pgd_addr_end(addr, end);
  237. if (pgd_none(pgd))
  238. goto slow;
  239. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  240. goto slow;
  241. } while (pgdp++, addr = next, addr != end);
  242. local_irq_enable();
  243. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  244. return nr;
  245. slow:
  246. local_irq_enable();
  247. slow_irqon:
  248. /* Try to get the remaining pages with get_user_pages */
  249. start += nr << PAGE_SHIFT;
  250. pages += nr;
  251. ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
  252. pages, write ? FOLL_WRITE : 0);
  253. /* Have to be a bit careful with return values */
  254. if (nr > 0) {
  255. if (ret < 0)
  256. ret = nr;
  257. else
  258. ret += nr;
  259. }
  260. return ret;
  261. }