hugetlbpage.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * SPARC64 Huge TLB page support.
  3. *
  4. * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  5. */
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/hugetlb.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/sysctl.h>
  11. #include <asm/mman.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/tlb.h>
  15. #include <asm/tlbflush.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/mmu_context.h>
  18. /* Slightly simplified from the non-hugepage variant because by
  19. * definition we don't have to worry about any page coloring stuff
  20. */
  21. static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
  22. unsigned long addr,
  23. unsigned long len,
  24. unsigned long pgoff,
  25. unsigned long flags)
  26. {
  27. unsigned long task_size = TASK_SIZE;
  28. struct vm_unmapped_area_info info;
  29. if (test_thread_flag(TIF_32BIT))
  30. task_size = STACK_TOP32;
  31. info.flags = 0;
  32. info.length = len;
  33. info.low_limit = TASK_UNMAPPED_BASE;
  34. info.high_limit = min(task_size, VA_EXCLUDE_START);
  35. info.align_mask = PAGE_MASK & ~HPAGE_MASK;
  36. info.align_offset = 0;
  37. addr = vm_unmapped_area(&info);
  38. if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
  39. VM_BUG_ON(addr != -ENOMEM);
  40. info.low_limit = VA_EXCLUDE_END;
  41. info.high_limit = task_size;
  42. addr = vm_unmapped_area(&info);
  43. }
  44. return addr;
  45. }
  46. static unsigned long
  47. hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  48. const unsigned long len,
  49. const unsigned long pgoff,
  50. const unsigned long flags)
  51. {
  52. struct mm_struct *mm = current->mm;
  53. unsigned long addr = addr0;
  54. struct vm_unmapped_area_info info;
  55. /* This should only ever run for 32-bit processes. */
  56. BUG_ON(!test_thread_flag(TIF_32BIT));
  57. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  58. info.length = len;
  59. info.low_limit = PAGE_SIZE;
  60. info.high_limit = mm->mmap_base;
  61. info.align_mask = PAGE_MASK & ~HPAGE_MASK;
  62. info.align_offset = 0;
  63. addr = vm_unmapped_area(&info);
  64. /*
  65. * A failed mmap() very likely causes application failure,
  66. * so fall back to the bottom-up function here. This scenario
  67. * can happen with large stack limits and large mmap()
  68. * allocations.
  69. */
  70. if (addr & ~PAGE_MASK) {
  71. VM_BUG_ON(addr != -ENOMEM);
  72. info.flags = 0;
  73. info.low_limit = TASK_UNMAPPED_BASE;
  74. info.high_limit = STACK_TOP32;
  75. addr = vm_unmapped_area(&info);
  76. }
  77. return addr;
  78. }
  79. unsigned long
  80. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  81. unsigned long len, unsigned long pgoff, unsigned long flags)
  82. {
  83. struct mm_struct *mm = current->mm;
  84. struct vm_area_struct *vma;
  85. unsigned long task_size = TASK_SIZE;
  86. if (test_thread_flag(TIF_32BIT))
  87. task_size = STACK_TOP32;
  88. if (len & ~HPAGE_MASK)
  89. return -EINVAL;
  90. if (len > task_size)
  91. return -ENOMEM;
  92. if (flags & MAP_FIXED) {
  93. if (prepare_hugepage_range(file, addr, len))
  94. return -EINVAL;
  95. return addr;
  96. }
  97. if (addr) {
  98. addr = ALIGN(addr, HPAGE_SIZE);
  99. vma = find_vma(mm, addr);
  100. if (task_size - len >= addr &&
  101. (!vma || addr + len <= vm_start_gap(vma)))
  102. return addr;
  103. }
  104. if (mm->get_unmapped_area == arch_get_unmapped_area)
  105. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  106. pgoff, flags);
  107. else
  108. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  109. pgoff, flags);
  110. }
  111. pte_t *huge_pte_alloc(struct mm_struct *mm,
  112. unsigned long addr, unsigned long sz)
  113. {
  114. pgd_t *pgd;
  115. pud_t *pud;
  116. pte_t *pte = NULL;
  117. pgd = pgd_offset(mm, addr);
  118. pud = pud_alloc(mm, pgd, addr);
  119. if (pud)
  120. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  121. return pte;
  122. }
  123. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  124. {
  125. pgd_t *pgd;
  126. pud_t *pud;
  127. pte_t *pte = NULL;
  128. pgd = pgd_offset(mm, addr);
  129. if (!pgd_none(*pgd)) {
  130. pud = pud_offset(pgd, addr);
  131. if (!pud_none(*pud))
  132. pte = (pte_t *)pmd_offset(pud, addr);
  133. }
  134. return pte;
  135. }
  136. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  137. pte_t *ptep, pte_t entry)
  138. {
  139. pte_t orig;
  140. if (!pte_present(*ptep) && pte_present(entry))
  141. mm->context.hugetlb_pte_count++;
  142. addr &= HPAGE_MASK;
  143. orig = *ptep;
  144. *ptep = entry;
  145. /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
  146. maybe_tlb_batch_add(mm, addr, ptep, orig, 0);
  147. maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0);
  148. }
  149. pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  150. pte_t *ptep)
  151. {
  152. pte_t entry;
  153. entry = *ptep;
  154. if (pte_present(entry))
  155. mm->context.hugetlb_pte_count--;
  156. addr &= HPAGE_MASK;
  157. *ptep = __pte(0UL);
  158. /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
  159. maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
  160. maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0);
  161. return entry;
  162. }
  163. int pmd_huge(pmd_t pmd)
  164. {
  165. return !pmd_none(pmd) &&
  166. (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
  167. }
  168. int pud_huge(pud_t pud)
  169. {
  170. return 0;
  171. }
  172. static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  173. unsigned long addr)
  174. {
  175. pgtable_t token = pmd_pgtable(*pmd);
  176. pmd_clear(pmd);
  177. pte_free_tlb(tlb, token, addr);
  178. atomic_long_dec(&tlb->mm->nr_ptes);
  179. }
  180. static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  181. unsigned long addr, unsigned long end,
  182. unsigned long floor, unsigned long ceiling)
  183. {
  184. pmd_t *pmd;
  185. unsigned long next;
  186. unsigned long start;
  187. start = addr;
  188. pmd = pmd_offset(pud, addr);
  189. do {
  190. next = pmd_addr_end(addr, end);
  191. if (pmd_none(*pmd))
  192. continue;
  193. if (is_hugetlb_pmd(*pmd))
  194. pmd_clear(pmd);
  195. else
  196. hugetlb_free_pte_range(tlb, pmd, addr);
  197. } while (pmd++, addr = next, addr != end);
  198. start &= PUD_MASK;
  199. if (start < floor)
  200. return;
  201. if (ceiling) {
  202. ceiling &= PUD_MASK;
  203. if (!ceiling)
  204. return;
  205. }
  206. if (end - 1 > ceiling - 1)
  207. return;
  208. pmd = pmd_offset(pud, start);
  209. pud_clear(pud);
  210. pmd_free_tlb(tlb, pmd, start);
  211. mm_dec_nr_pmds(tlb->mm);
  212. }
  213. static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  214. unsigned long addr, unsigned long end,
  215. unsigned long floor, unsigned long ceiling)
  216. {
  217. pud_t *pud;
  218. unsigned long next;
  219. unsigned long start;
  220. start = addr;
  221. pud = pud_offset(pgd, addr);
  222. do {
  223. next = pud_addr_end(addr, end);
  224. if (pud_none_or_clear_bad(pud))
  225. continue;
  226. hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
  227. ceiling);
  228. } while (pud++, addr = next, addr != end);
  229. start &= PGDIR_MASK;
  230. if (start < floor)
  231. return;
  232. if (ceiling) {
  233. ceiling &= PGDIR_MASK;
  234. if (!ceiling)
  235. return;
  236. }
  237. if (end - 1 > ceiling - 1)
  238. return;
  239. pud = pud_offset(pgd, start);
  240. pgd_clear(pgd);
  241. pud_free_tlb(tlb, pud, start);
  242. }
  243. void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  244. unsigned long addr, unsigned long end,
  245. unsigned long floor, unsigned long ceiling)
  246. {
  247. pgd_t *pgd;
  248. unsigned long next;
  249. pgd = pgd_offset(tlb->mm, addr);
  250. do {
  251. next = pgd_addr_end(addr, end);
  252. if (pgd_none_or_clear_bad(pgd))
  253. continue;
  254. hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
  255. } while (pgd++, addr = next, addr != end);
  256. }