hugetlbpage.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * arch/arm64/mm/hugetlbpage.c
  3. *
  4. * Copyright (C) 2013 Linaro Ltd.
  5. *
  6. * Based on arch/x86/mm/hugetlbpage.c.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/init.h>
  18. #include <linux/fs.h>
  19. #include <linux/mm.h>
  20. #include <linux/hugetlb.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/err.h>
  23. #include <linux/sysctl.h>
  24. #include <asm/mman.h>
  25. #include <asm/tlb.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/pgalloc.h>
  28. int pmd_huge(pmd_t pmd)
  29. {
  30. return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
  31. }
  32. int pud_huge(pud_t pud)
  33. {
  34. #ifndef __PAGETABLE_PMD_FOLDED
  35. return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
  36. #else
  37. return 0;
  38. #endif
  39. }
  40. static int find_num_contig(struct mm_struct *mm, unsigned long addr,
  41. pte_t *ptep, pte_t pte, size_t *pgsize)
  42. {
  43. pgd_t *pgd = pgd_offset(mm, addr);
  44. pud_t *pud;
  45. pmd_t *pmd;
  46. *pgsize = PAGE_SIZE;
  47. if (!pte_cont(pte))
  48. return 1;
  49. pud = pud_offset(pgd, addr);
  50. pmd = pmd_offset(pud, addr);
  51. if ((pte_t *)pmd == ptep) {
  52. *pgsize = PMD_SIZE;
  53. return CONT_PMDS;
  54. }
  55. return CONT_PTES;
  56. }
  57. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  58. pte_t *ptep, pte_t pte)
  59. {
  60. size_t pgsize;
  61. int i;
  62. int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
  63. unsigned long pfn;
  64. pgprot_t hugeprot;
  65. if (ncontig == 1) {
  66. set_pte_at(mm, addr, ptep, pte);
  67. return;
  68. }
  69. pfn = pte_pfn(pte);
  70. hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
  71. for (i = 0; i < ncontig; i++) {
  72. pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
  73. pte_val(pfn_pte(pfn, hugeprot)));
  74. set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
  75. ptep++;
  76. pfn += pgsize >> PAGE_SHIFT;
  77. addr += pgsize;
  78. }
  79. }
  80. pte_t *huge_pte_alloc(struct mm_struct *mm,
  81. unsigned long addr, unsigned long sz)
  82. {
  83. pgd_t *pgd;
  84. pud_t *pud;
  85. pte_t *pte = NULL;
  86. pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
  87. pgd = pgd_offset(mm, addr);
  88. pud = pud_alloc(mm, pgd, addr);
  89. if (!pud)
  90. return NULL;
  91. if (sz == PUD_SIZE) {
  92. pte = (pte_t *)pud;
  93. } else if (sz == (PAGE_SIZE * CONT_PTES)) {
  94. pmd_t *pmd = pmd_alloc(mm, pud, addr);
  95. WARN_ON(addr & (sz - 1));
  96. /*
  97. * Note that if this code were ever ported to the
  98. * 32-bit arm platform then it will cause trouble in
  99. * the case where CONFIG_HIGHPTE is set, since there
  100. * will be no pte_unmap() to correspond with this
  101. * pte_alloc_map().
  102. */
  103. pte = pte_alloc_map(mm, pmd, addr);
  104. } else if (sz == PMD_SIZE) {
  105. if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
  106. pud_none(*pud))
  107. pte = huge_pmd_share(mm, addr, pud);
  108. else
  109. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  110. } else if (sz == (PMD_SIZE * CONT_PMDS)) {
  111. pmd_t *pmd;
  112. pmd = pmd_alloc(mm, pud, addr);
  113. WARN_ON(addr & (sz - 1));
  114. return (pte_t *)pmd;
  115. }
  116. pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
  117. sz, pte, pte_val(*pte));
  118. return pte;
  119. }
  120. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  121. {
  122. pgd_t *pgd;
  123. pud_t *pud;
  124. pmd_t *pmd = NULL;
  125. pte_t *pte = NULL;
  126. pgd = pgd_offset(mm, addr);
  127. pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
  128. if (!pgd_present(*pgd))
  129. return NULL;
  130. pud = pud_offset(pgd, addr);
  131. if (!pud_present(*pud))
  132. return NULL;
  133. if (pud_huge(*pud))
  134. return (pte_t *)pud;
  135. pmd = pmd_offset(pud, addr);
  136. if (!pmd_present(*pmd))
  137. return NULL;
  138. if (pte_cont(pmd_pte(*pmd))) {
  139. pmd = pmd_offset(
  140. pud, (addr & CONT_PMD_MASK));
  141. return (pte_t *)pmd;
  142. }
  143. if (pmd_huge(*pmd))
  144. return (pte_t *)pmd;
  145. pte = pte_offset_kernel(pmd, addr);
  146. if (pte_present(*pte) && pte_cont(*pte)) {
  147. pte = pte_offset_kernel(
  148. pmd, (addr & CONT_PTE_MASK));
  149. return pte;
  150. }
  151. return NULL;
  152. }
  153. pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
  154. struct page *page, int writable)
  155. {
  156. size_t pagesize = huge_page_size(hstate_vma(vma));
  157. if (pagesize == CONT_PTE_SIZE) {
  158. entry = pte_mkcont(entry);
  159. } else if (pagesize == CONT_PMD_SIZE) {
  160. entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
  161. } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
  162. pr_warn("%s: unrecognized huge page size 0x%lx\n",
  163. __func__, pagesize);
  164. }
  165. return entry;
  166. }
  167. pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  168. unsigned long addr, pte_t *ptep)
  169. {
  170. pte_t pte;
  171. if (pte_cont(*ptep)) {
  172. int ncontig, i;
  173. size_t pgsize;
  174. pte_t *cpte;
  175. bool is_dirty = false;
  176. cpte = huge_pte_offset(mm, addr);
  177. ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
  178. /* save the 1st pte to return */
  179. pte = ptep_get_and_clear(mm, addr, cpte);
  180. for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
  181. /*
  182. * If HW_AFDBM is enabled, then the HW could
  183. * turn on the dirty bit for any of the page
  184. * in the set, so check them all.
  185. */
  186. ++cpte;
  187. if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
  188. is_dirty = true;
  189. }
  190. if (is_dirty)
  191. return pte_mkdirty(pte);
  192. else
  193. return pte;
  194. } else {
  195. return ptep_get_and_clear(mm, addr, ptep);
  196. }
  197. }
  198. int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  199. unsigned long addr, pte_t *ptep,
  200. pte_t pte, int dirty)
  201. {
  202. pte_t *cpte;
  203. if (pte_cont(pte)) {
  204. int ncontig, i, changed = 0;
  205. size_t pgsize = 0;
  206. unsigned long pfn = pte_pfn(pte);
  207. /* Select all bits except the pfn */
  208. pgprot_t hugeprot =
  209. __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
  210. pte_val(pte));
  211. cpte = huge_pte_offset(vma->vm_mm, addr);
  212. pfn = pte_pfn(*cpte);
  213. ncontig = find_num_contig(vma->vm_mm, addr, cpte,
  214. *cpte, &pgsize);
  215. for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
  216. changed |= ptep_set_access_flags(vma, addr, cpte,
  217. pfn_pte(pfn,
  218. hugeprot),
  219. dirty);
  220. pfn += pgsize >> PAGE_SHIFT;
  221. }
  222. return changed;
  223. } else {
  224. return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
  225. }
  226. }
  227. void huge_ptep_set_wrprotect(struct mm_struct *mm,
  228. unsigned long addr, pte_t *ptep)
  229. {
  230. if (pte_cont(*ptep)) {
  231. int ncontig, i;
  232. pte_t *cpte;
  233. size_t pgsize = 0;
  234. cpte = huge_pte_offset(mm, addr);
  235. ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
  236. for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
  237. ptep_set_wrprotect(mm, addr, cpte);
  238. } else {
  239. ptep_set_wrprotect(mm, addr, ptep);
  240. }
  241. }
  242. void huge_ptep_clear_flush(struct vm_area_struct *vma,
  243. unsigned long addr, pte_t *ptep)
  244. {
  245. if (pte_cont(*ptep)) {
  246. int ncontig, i;
  247. pte_t *cpte;
  248. size_t pgsize = 0;
  249. cpte = huge_pte_offset(vma->vm_mm, addr);
  250. ncontig = find_num_contig(vma->vm_mm, addr, cpte,
  251. *cpte, &pgsize);
  252. for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
  253. ptep_clear_flush(vma, addr, cpte);
  254. } else {
  255. ptep_clear_flush(vma, addr, ptep);
  256. }
  257. }
  258. static __init int setup_hugepagesz(char *opt)
  259. {
  260. unsigned long ps = memparse(opt, &opt);
  261. if (ps == PMD_SIZE) {
  262. hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
  263. } else if (ps == PUD_SIZE) {
  264. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  265. } else if (ps == (PAGE_SIZE * CONT_PTES)) {
  266. hugetlb_add_hstate(CONT_PTE_SHIFT);
  267. } else if (ps == (PMD_SIZE * CONT_PMDS)) {
  268. hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
  269. } else {
  270. hugetlb_bad_size();
  271. pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
  272. return 0;
  273. }
  274. return 1;
  275. }
  276. __setup("hugepagesz=", setup_hugepagesz);
  277. #ifdef CONFIG_ARM64_64K_PAGES
  278. static __init int add_default_hugepagesz(void)
  279. {
  280. if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
  281. hugetlb_add_hstate(CONT_PMD_SHIFT);
  282. return 0;
  283. }
  284. arch_initcall(add_default_hugepagesz);
  285. #endif