gup.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Lockless get_user_pages_fast for sparc, cribbed from powerpc
  4. *
  5. * Copyright (C) 2008 Nick Piggin
  6. * Copyright (C) 2008 Novell Inc.
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/mm.h>
  10. #include <linux/vmstat.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/rwsem.h>
  13. #include <asm/pgtable.h>
  14. /*
  15. * The performance critical leaf functions are made noinline otherwise gcc
  16. * inlines everything into a single function which results in too much
  17. * register pressure.
  18. */
  19. static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
  20. unsigned long end, int write, struct page **pages, int *nr)
  21. {
  22. unsigned long mask, result;
  23. pte_t *ptep;
  24. if (tlb_type == hypervisor) {
  25. result = _PAGE_PRESENT_4V|_PAGE_P_4V;
  26. if (write)
  27. result |= _PAGE_WRITE_4V;
  28. } else {
  29. result = _PAGE_PRESENT_4U|_PAGE_P_4U;
  30. if (write)
  31. result |= _PAGE_WRITE_4U;
  32. }
  33. mask = result | _PAGE_SPECIAL;
  34. ptep = pte_offset_kernel(&pmd, addr);
  35. do {
  36. struct page *page, *head;
  37. pte_t pte = *ptep;
  38. if ((pte_val(pte) & mask) != result)
  39. return 0;
  40. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  41. /* The hugepage case is simplified on sparc64 because
  42. * we encode the sub-page pfn offsets into the
  43. * hugepage PTEs. We could optimize this in the future
  44. * use page_cache_add_speculative() for the hugepage case.
  45. */
  46. page = pte_page(pte);
  47. head = compound_head(page);
  48. if (!page_cache_get_speculative(head))
  49. return 0;
  50. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  51. put_page(head);
  52. return 0;
  53. }
  54. pages[*nr] = page;
  55. (*nr)++;
  56. } while (ptep++, addr += PAGE_SIZE, addr != end);
  57. return 1;
  58. }
  59. static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
  60. unsigned long end, int write, struct page **pages,
  61. int *nr)
  62. {
  63. struct page *head, *page;
  64. int refs;
  65. if (!(pmd_val(pmd) & _PAGE_VALID))
  66. return 0;
  67. if (write && !pmd_write(pmd))
  68. return 0;
  69. refs = 0;
  70. page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  71. head = compound_head(page);
  72. do {
  73. VM_BUG_ON(compound_head(page) != head);
  74. pages[*nr] = page;
  75. (*nr)++;
  76. page++;
  77. refs++;
  78. } while (addr += PAGE_SIZE, addr != end);
  79. if (!page_cache_add_speculative(head, refs)) {
  80. *nr -= refs;
  81. return 0;
  82. }
  83. if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
  84. *nr -= refs;
  85. while (refs--)
  86. put_page(head);
  87. return 0;
  88. }
  89. return 1;
  90. }
  91. static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
  92. unsigned long end, int write, struct page **pages,
  93. int *nr)
  94. {
  95. struct page *head, *page;
  96. int refs;
  97. if (!(pud_val(pud) & _PAGE_VALID))
  98. return 0;
  99. if (write && !pud_write(pud))
  100. return 0;
  101. refs = 0;
  102. page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  103. head = compound_head(page);
  104. do {
  105. VM_BUG_ON(compound_head(page) != head);
  106. pages[*nr] = page;
  107. (*nr)++;
  108. page++;
  109. refs++;
  110. } while (addr += PAGE_SIZE, addr != end);
  111. if (!page_cache_add_speculative(head, refs)) {
  112. *nr -= refs;
  113. return 0;
  114. }
  115. if (unlikely(pud_val(pud) != pud_val(*pudp))) {
  116. *nr -= refs;
  117. while (refs--)
  118. put_page(head);
  119. return 0;
  120. }
  121. return 1;
  122. }
  123. static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  124. int write, struct page **pages, int *nr)
  125. {
  126. unsigned long next;
  127. pmd_t *pmdp;
  128. pmdp = pmd_offset(&pud, addr);
  129. do {
  130. pmd_t pmd = *pmdp;
  131. next = pmd_addr_end(addr, end);
  132. if (pmd_none(pmd))
  133. return 0;
  134. if (unlikely(pmd_large(pmd))) {
  135. if (!gup_huge_pmd(pmdp, pmd, addr, next,
  136. write, pages, nr))
  137. return 0;
  138. } else if (!gup_pte_range(pmd, addr, next, write,
  139. pages, nr))
  140. return 0;
  141. } while (pmdp++, addr = next, addr != end);
  142. return 1;
  143. }
  144. static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  145. int write, struct page **pages, int *nr)
  146. {
  147. unsigned long next;
  148. pud_t *pudp;
  149. pudp = pud_offset(&pgd, addr);
  150. do {
  151. pud_t pud = *pudp;
  152. next = pud_addr_end(addr, end);
  153. if (pud_none(pud))
  154. return 0;
  155. if (unlikely(pud_large(pud))) {
  156. if (!gup_huge_pud(pudp, pud, addr, next,
  157. write, pages, nr))
  158. return 0;
  159. } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
  160. return 0;
  161. } while (pudp++, addr = next, addr != end);
  162. return 1;
  163. }
  164. int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  165. struct page **pages)
  166. {
  167. struct mm_struct *mm = current->mm;
  168. unsigned long addr, len, end;
  169. unsigned long next, flags;
  170. pgd_t *pgdp;
  171. int nr = 0;
  172. start &= PAGE_MASK;
  173. addr = start;
  174. len = (unsigned long) nr_pages << PAGE_SHIFT;
  175. end = start + len;
  176. local_irq_save(flags);
  177. pgdp = pgd_offset(mm, addr);
  178. do {
  179. pgd_t pgd = *pgdp;
  180. next = pgd_addr_end(addr, end);
  181. if (pgd_none(pgd))
  182. break;
  183. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  184. break;
  185. } while (pgdp++, addr = next, addr != end);
  186. local_irq_restore(flags);
  187. return nr;
  188. }
  189. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  190. struct page **pages)
  191. {
  192. struct mm_struct *mm = current->mm;
  193. unsigned long addr, len, end;
  194. unsigned long next;
  195. pgd_t *pgdp;
  196. int nr = 0;
  197. start &= PAGE_MASK;
  198. addr = start;
  199. len = (unsigned long) nr_pages << PAGE_SHIFT;
  200. end = start + len;
  201. /*
  202. * XXX: batch / limit 'nr', to avoid large irq off latency
  203. * needs some instrumenting to determine the common sizes used by
  204. * important workloads (eg. DB2), and whether limiting the batch size
  205. * will decrease performance.
  206. *
  207. * It seems like we're in the clear for the moment. Direct-IO is
  208. * the main guy that batches up lots of get_user_pages, and even
  209. * they are limited to 64-at-a-time which is not so many.
  210. */
  211. /*
  212. * This doesn't prevent pagetable teardown, but does prevent
  213. * the pagetables from being freed on sparc.
  214. *
  215. * So long as we atomically load page table pointers versus teardown,
  216. * we can follow the address down to the the page and take a ref on it.
  217. */
  218. local_irq_disable();
  219. pgdp = pgd_offset(mm, addr);
  220. do {
  221. pgd_t pgd = *pgdp;
  222. next = pgd_addr_end(addr, end);
  223. if (pgd_none(pgd))
  224. goto slow;
  225. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  226. goto slow;
  227. } while (pgdp++, addr = next, addr != end);
  228. local_irq_enable();
  229. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  230. return nr;
  231. {
  232. int ret;
  233. slow:
  234. local_irq_enable();
  235. /* Try to get the remaining pages with get_user_pages */
  236. start += nr << PAGE_SHIFT;
  237. pages += nr;
  238. ret = get_user_pages_unlocked(start,
  239. (end - start) >> PAGE_SHIFT, pages,
  240. write ? FOLL_WRITE : 0);
  241. /* Have to be a bit careful with return values */
  242. if (nr > 0) {
  243. if (ret < 0)
  244. ret = nr;
  245. else
  246. ret += nr;
  247. }
  248. return ret;
  249. }
  250. }