gup.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. /*
  2. * Lockless get_user_pages_fast for s390
  3. *
  4. * Copyright IBM Corp. 2010
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/hugetlb.h>
  10. #include <linux/vmstat.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/rwsem.h>
  13. #include <asm/pgtable.h>
  14. /*
  15. * The performance critical leaf functions are made noinline otherwise gcc
  16. * inlines everything into a single function which results in too much
  17. * register pressure.
  18. */
  19. static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
  20. unsigned long end, int write, struct page **pages, int *nr)
  21. {
  22. struct page *head, *page;
  23. unsigned long mask;
  24. pte_t *ptep, pte;
  25. mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
  26. ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
  27. do {
  28. pte = *ptep;
  29. barrier();
  30. /* Similar to the PMD case, NUMA hinting must take slow path */
  31. if (pte_protnone(pte))
  32. return 0;
  33. if ((pte_val(pte) & mask) != 0)
  34. return 0;
  35. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  36. page = pte_page(pte);
  37. head = compound_head(page);
  38. if (!page_cache_get_speculative(head))
  39. return 0;
  40. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  41. put_page(head);
  42. return 0;
  43. }
  44. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  45. pages[*nr] = page;
  46. (*nr)++;
  47. } while (ptep++, addr += PAGE_SIZE, addr != end);
  48. return 1;
  49. }
  50. static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
  51. unsigned long end, int write, struct page **pages, int *nr)
  52. {
  53. struct page *head, *page;
  54. unsigned long mask;
  55. int refs;
  56. mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
  57. if ((pmd_val(pmd) & mask) != 0)
  58. return 0;
  59. VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
  60. refs = 0;
  61. head = pmd_page(pmd);
  62. page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  63. do {
  64. VM_BUG_ON(compound_head(page) != head);
  65. pages[*nr] = page;
  66. (*nr)++;
  67. page++;
  68. refs++;
  69. } while (addr += PAGE_SIZE, addr != end);
  70. if (!page_cache_add_speculative(head, refs)) {
  71. *nr -= refs;
  72. return 0;
  73. }
  74. if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
  75. *nr -= refs;
  76. while (refs--)
  77. put_page(head);
  78. return 0;
  79. }
  80. return 1;
  81. }
  82. static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
  83. unsigned long end, int write, struct page **pages, int *nr)
  84. {
  85. unsigned long next;
  86. pmd_t *pmdp, pmd;
  87. pmdp = (pmd_t *) pudp;
  88. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  89. pmdp = (pmd_t *) pud_deref(pud);
  90. pmdp += pmd_index(addr);
  91. do {
  92. pmd = *pmdp;
  93. barrier();
  94. next = pmd_addr_end(addr, end);
  95. if (pmd_none(pmd))
  96. return 0;
  97. if (unlikely(pmd_large(pmd))) {
  98. /*
  99. * NUMA hinting faults need to be handled in the GUP
  100. * slowpath for accounting purposes and so that they
  101. * can be serialised against THP migration.
  102. */
  103. if (pmd_protnone(pmd))
  104. return 0;
  105. if (!gup_huge_pmd(pmdp, pmd, addr, next,
  106. write, pages, nr))
  107. return 0;
  108. } else if (!gup_pte_range(pmdp, pmd, addr, next,
  109. write, pages, nr))
  110. return 0;
  111. } while (pmdp++, addr = next, addr != end);
  112. return 1;
  113. }
  114. static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
  115. unsigned long end, int write, struct page **pages, int *nr)
  116. {
  117. struct page *head, *page;
  118. unsigned long mask;
  119. int refs;
  120. mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
  121. if ((pud_val(pud) & mask) != 0)
  122. return 0;
  123. VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
  124. refs = 0;
  125. head = pud_page(pud);
  126. page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  127. do {
  128. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  129. pages[*nr] = page;
  130. (*nr)++;
  131. page++;
  132. refs++;
  133. } while (addr += PAGE_SIZE, addr != end);
  134. if (!page_cache_add_speculative(head, refs)) {
  135. *nr -= refs;
  136. return 0;
  137. }
  138. if (unlikely(pud_val(pud) != pud_val(*pudp))) {
  139. *nr -= refs;
  140. while (refs--)
  141. put_page(head);
  142. return 0;
  143. }
  144. return 1;
  145. }
  146. static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
  147. unsigned long end, int write, struct page **pages, int *nr)
  148. {
  149. unsigned long next;
  150. pud_t *pudp, pud;
  151. pudp = (pud_t *) pgdp;
  152. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  153. pudp = (pud_t *) pgd_deref(pgd);
  154. pudp += pud_index(addr);
  155. do {
  156. pud = *pudp;
  157. barrier();
  158. next = pud_addr_end(addr, end);
  159. if (pud_none(pud))
  160. return 0;
  161. if (unlikely(pud_large(pud))) {
  162. if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
  163. nr))
  164. return 0;
  165. } else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
  166. nr))
  167. return 0;
  168. } while (pudp++, addr = next, addr != end);
  169. return 1;
  170. }
  171. /*
  172. * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
  173. * back to the regular GUP.
  174. */
  175. int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  176. struct page **pages)
  177. {
  178. struct mm_struct *mm = current->mm;
  179. unsigned long addr, len, end;
  180. unsigned long next, flags;
  181. pgd_t *pgdp, pgd;
  182. int nr = 0;
  183. start &= PAGE_MASK;
  184. addr = start;
  185. len = (unsigned long) nr_pages << PAGE_SHIFT;
  186. end = start + len;
  187. if ((end <= start) || (end > TASK_SIZE))
  188. return 0;
  189. /*
  190. * local_irq_save() doesn't prevent pagetable teardown, but does
  191. * prevent the pagetables from being freed on s390.
  192. *
  193. * So long as we atomically load page table pointers versus teardown,
  194. * we can follow the address down to the the page and take a ref on it.
  195. */
  196. local_irq_save(flags);
  197. pgdp = pgd_offset(mm, addr);
  198. do {
  199. pgd = *pgdp;
  200. barrier();
  201. next = pgd_addr_end(addr, end);
  202. if (pgd_none(pgd))
  203. break;
  204. if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
  205. break;
  206. } while (pgdp++, addr = next, addr != end);
  207. local_irq_restore(flags);
  208. return nr;
  209. }
  210. /**
  211. * get_user_pages_fast() - pin user pages in memory
  212. * @start: starting user address
  213. * @nr_pages: number of pages from start to pin
  214. * @write: whether pages will be written to
  215. * @pages: array that receives pointers to the pages pinned.
  216. * Should be at least nr_pages long.
  217. *
  218. * Attempt to pin user pages in memory without taking mm->mmap_sem.
  219. * If not successful, it will fall back to taking the lock and
  220. * calling get_user_pages().
  221. *
  222. * Returns number of pages pinned. This may be fewer than the number
  223. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  224. * were pinned, returns -errno.
  225. */
  226. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  227. struct page **pages)
  228. {
  229. int nr, ret;
  230. might_sleep();
  231. start &= PAGE_MASK;
  232. nr = __get_user_pages_fast(start, nr_pages, write, pages);
  233. if (nr == nr_pages)
  234. return nr;
  235. /* Try to get the remaining pages with get_user_pages */
  236. start += nr << PAGE_SHIFT;
  237. pages += nr;
  238. ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
  239. write ? FOLL_WRITE : 0);
  240. /* Have to be a bit careful with return values */
  241. if (nr > 0)
  242. ret = (ret < 0) ? nr : ret + nr;
  243. return ret;
  244. }