hugetlbpage.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * TILE Huge TLB Page Support for Kernel.
  15. * Taken from i386 hugetlb implementation:
  16. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  17. */
  18. #include <linux/init.h>
  19. #include <linux/fs.h>
  20. #include <linux/mm.h>
  21. #include <linux/hugetlb.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/slab.h>
  24. #include <linux/err.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/mman.h>
  27. #include <asm/tlb.h>
  28. #include <asm/tlbflush.h>
  29. pte_t *huge_pte_alloc(struct mm_struct *mm,
  30. unsigned long addr, unsigned long sz)
  31. {
  32. pgd_t *pgd;
  33. pud_t *pud;
  34. pte_t *pte = NULL;
  35. /* We do not yet support multiple huge page sizes. */
  36. BUG_ON(sz != PMD_SIZE);
  37. pgd = pgd_offset(mm, addr);
  38. pud = pud_alloc(mm, pgd, addr);
  39. if (pud)
  40. pte = (pte_t *) pmd_alloc(mm, pud, addr);
  41. BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
  42. return pte;
  43. }
  44. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  45. {
  46. pgd_t *pgd;
  47. pud_t *pud;
  48. pmd_t *pmd = NULL;
  49. pgd = pgd_offset(mm, addr);
  50. if (pgd_present(*pgd)) {
  51. pud = pud_offset(pgd, addr);
  52. if (pud_present(*pud))
  53. pmd = pmd_offset(pud, addr);
  54. }
  55. return (pte_t *) pmd;
  56. }
  57. #ifdef HUGETLB_TEST
  58. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  59. int write)
  60. {
  61. unsigned long start = address;
  62. int length = 1;
  63. int nr;
  64. struct page *page;
  65. struct vm_area_struct *vma;
  66. vma = find_vma(mm, addr);
  67. if (!vma || !is_vm_hugetlb_page(vma))
  68. return ERR_PTR(-EINVAL);
  69. pte = huge_pte_offset(mm, address);
  70. /* hugetlb should be locked, and hence, prefaulted */
  71. WARN_ON(!pte || pte_none(*pte));
  72. page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
  73. WARN_ON(!PageHead(page));
  74. return page;
  75. }
  76. int pmd_huge(pmd_t pmd)
  77. {
  78. return 0;
  79. }
  80. int pud_huge(pud_t pud)
  81. {
  82. return 0;
  83. }
  84. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  85. pmd_t *pmd, int write)
  86. {
  87. return NULL;
  88. }
  89. #else
  90. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  91. int write)
  92. {
  93. return ERR_PTR(-EINVAL);
  94. }
  95. int pmd_huge(pmd_t pmd)
  96. {
  97. return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
  98. }
  99. int pud_huge(pud_t pud)
  100. {
  101. return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
  102. }
  103. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  104. pmd_t *pmd, int write)
  105. {
  106. struct page *page;
  107. page = pte_page(*(pte_t *)pmd);
  108. if (page)
  109. page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
  110. return page;
  111. }
  112. struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
  113. pud_t *pud, int write)
  114. {
  115. struct page *page;
  116. page = pte_page(*(pte_t *)pud);
  117. if (page)
  118. page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
  119. return page;
  120. }
  121. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  122. {
  123. return 0;
  124. }
  125. #endif
  126. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  127. static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  128. unsigned long addr, unsigned long len,
  129. unsigned long pgoff, unsigned long flags)
  130. {
  131. struct hstate *h = hstate_file(file);
  132. struct mm_struct *mm = current->mm;
  133. struct vm_area_struct *vma;
  134. unsigned long start_addr;
  135. if (len > mm->cached_hole_size) {
  136. start_addr = mm->free_area_cache;
  137. } else {
  138. start_addr = TASK_UNMAPPED_BASE;
  139. mm->cached_hole_size = 0;
  140. }
  141. full_search:
  142. addr = ALIGN(start_addr, huge_page_size(h));
  143. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  144. /* At this point: (!vma || addr < vma->vm_end). */
  145. if (TASK_SIZE - len < addr) {
  146. /*
  147. * Start a new search - just in case we missed
  148. * some holes.
  149. */
  150. if (start_addr != TASK_UNMAPPED_BASE) {
  151. start_addr = TASK_UNMAPPED_BASE;
  152. mm->cached_hole_size = 0;
  153. goto full_search;
  154. }
  155. return -ENOMEM;
  156. }
  157. if (!vma || addr + len <= vma->vm_start) {
  158. mm->free_area_cache = addr + len;
  159. return addr;
  160. }
  161. if (addr + mm->cached_hole_size < vma->vm_start)
  162. mm->cached_hole_size = vma->vm_start - addr;
  163. addr = ALIGN(vma->vm_end, huge_page_size(h));
  164. }
  165. }
  166. static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  167. unsigned long addr0, unsigned long len,
  168. unsigned long pgoff, unsigned long flags)
  169. {
  170. struct hstate *h = hstate_file(file);
  171. struct mm_struct *mm = current->mm;
  172. struct vm_area_struct *vma, *prev_vma;
  173. unsigned long base = mm->mmap_base, addr = addr0;
  174. unsigned long largest_hole = mm->cached_hole_size;
  175. int first_time = 1;
  176. /* don't allow allocations above current base */
  177. if (mm->free_area_cache > base)
  178. mm->free_area_cache = base;
  179. if (len <= largest_hole) {
  180. largest_hole = 0;
  181. mm->free_area_cache = base;
  182. }
  183. try_again:
  184. /* make sure it can fit in the remaining address space */
  185. if (mm->free_area_cache < len)
  186. goto fail;
  187. /* either no address requested or can't fit in requested address hole */
  188. addr = (mm->free_area_cache - len) & huge_page_mask(h);
  189. do {
  190. /*
  191. * Lookup failure means no vma is above this address,
  192. * i.e. return with success:
  193. */
  194. vma = find_vma_prev(mm, addr, &prev_vma);
  195. if (!vma) {
  196. return addr;
  197. break;
  198. }
  199. /*
  200. * new region fits between prev_vma->vm_end and
  201. * vma->vm_start, use it:
  202. */
  203. if (addr + len <= vma->vm_start &&
  204. (!prev_vma || (addr >= prev_vma->vm_end))) {
  205. /* remember the address as a hint for next time */
  206. mm->cached_hole_size = largest_hole;
  207. mm->free_area_cache = addr;
  208. return addr;
  209. } else {
  210. /* pull free_area_cache down to the first hole */
  211. if (mm->free_area_cache == vma->vm_end) {
  212. mm->free_area_cache = vma->vm_start;
  213. mm->cached_hole_size = largest_hole;
  214. }
  215. }
  216. /* remember the largest hole we saw so far */
  217. if (addr + largest_hole < vma->vm_start)
  218. largest_hole = vma->vm_start - addr;
  219. /* try just below the current vma->vm_start */
  220. addr = (vma->vm_start - len) & huge_page_mask(h);
  221. } while (len <= vma->vm_start);
  222. fail:
  223. /*
  224. * if hint left us with no space for the requested
  225. * mapping then try again:
  226. */
  227. if (first_time) {
  228. mm->free_area_cache = base;
  229. largest_hole = 0;
  230. first_time = 0;
  231. goto try_again;
  232. }
  233. /*
  234. * A failed mmap() very likely causes application failure,
  235. * so fall back to the bottom-up function here. This scenario
  236. * can happen with large stack limits and large mmap()
  237. * allocations.
  238. */
  239. mm->free_area_cache = TASK_UNMAPPED_BASE;
  240. mm->cached_hole_size = ~0UL;
  241. addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
  242. len, pgoff, flags);
  243. /*
  244. * Restore the topdown base:
  245. */
  246. mm->free_area_cache = base;
  247. mm->cached_hole_size = ~0UL;
  248. return addr;
  249. }
  250. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  251. unsigned long len, unsigned long pgoff, unsigned long flags)
  252. {
  253. struct hstate *h = hstate_file(file);
  254. struct mm_struct *mm = current->mm;
  255. struct vm_area_struct *vma;
  256. if (len & ~huge_page_mask(h))
  257. return -EINVAL;
  258. if (len > TASK_SIZE)
  259. return -ENOMEM;
  260. if (flags & MAP_FIXED) {
  261. if (prepare_hugepage_range(file, addr, len))
  262. return -EINVAL;
  263. return addr;
  264. }
  265. if (addr) {
  266. addr = ALIGN(addr, huge_page_size(h));
  267. vma = find_vma(mm, addr);
  268. if (TASK_SIZE - len >= addr &&
  269. (!vma || addr + len <= vm_start_gap(vma)))
  270. return addr;
  271. }
  272. if (current->mm->get_unmapped_area == arch_get_unmapped_area)
  273. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  274. pgoff, flags);
  275. else
  276. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  277. pgoff, flags);
  278. }
  279. static __init int setup_hugepagesz(char *opt)
  280. {
  281. unsigned long ps = memparse(opt, &opt);
  282. if (ps == PMD_SIZE) {
  283. hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
  284. } else if (ps == PUD_SIZE) {
  285. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  286. } else {
  287. pr_err("hugepagesz: Unsupported page size %lu M\n",
  288. ps >> 20);
  289. return 0;
  290. }
  291. return 1;
  292. }
  293. __setup("hugepagesz=", setup_hugepagesz);
  294. #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/