huge_mm.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. #ifndef _LINUX_HUGE_MM_H
  2. #define _LINUX_HUGE_MM_H
  3. extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
  4. struct vm_area_struct *vma,
  5. unsigned long address, pmd_t *pmd,
  6. unsigned int flags);
  7. extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  8. pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  9. struct vm_area_struct *vma);
  10. extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  11. unsigned long address, pmd_t *pmd,
  12. pmd_t orig_pmd);
  13. extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
  14. extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
  15. unsigned long addr,
  16. pmd_t *pmd,
  17. unsigned int flags);
  18. extern int zap_huge_pmd(struct mmu_gather *tlb,
  19. struct vm_area_struct *vma,
  20. pmd_t *pmd, unsigned long addr);
  21. extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  22. unsigned long addr, unsigned long end,
  23. unsigned char *vec);
  24. extern int move_huge_pmd(struct vm_area_struct *vma,
  25. struct vm_area_struct *new_vma,
  26. unsigned long old_addr,
  27. unsigned long new_addr, unsigned long old_end,
  28. pmd_t *old_pmd, pmd_t *new_pmd);
  29. extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  30. unsigned long addr, pgprot_t newprot);
  31. enum transparent_hugepage_flag {
  32. TRANSPARENT_HUGEPAGE_FLAG,
  33. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  34. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  35. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
  36. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
  37. #ifdef CONFIG_DEBUG_VM
  38. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
  39. #endif
  40. };
  41. enum page_check_address_pmd_flag {
  42. PAGE_CHECK_ADDRESS_PMD_FLAG,
  43. PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
  44. PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
  45. };
  46. extern pmd_t *page_check_address_pmd(struct page *page,
  47. struct mm_struct *mm,
  48. unsigned long address,
  49. enum page_check_address_pmd_flag flag);
  50. #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
  51. #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
  52. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  53. #define HPAGE_PMD_SHIFT HPAGE_SHIFT
  54. #define HPAGE_PMD_MASK HPAGE_MASK
  55. #define HPAGE_PMD_SIZE HPAGE_SIZE
  56. #define transparent_hugepage_enabled(__vma) \
  57. ((transparent_hugepage_flags & \
  58. (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
  59. (transparent_hugepage_flags & \
  60. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
  61. ((__vma)->vm_flags & VM_HUGEPAGE))) && \
  62. !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
  63. !is_vma_temporary_stack(__vma))
  64. #define transparent_hugepage_defrag(__vma) \
  65. ((transparent_hugepage_flags & \
  66. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
  67. (transparent_hugepage_flags & \
  68. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
  69. (__vma)->vm_flags & VM_HUGEPAGE))
  70. #ifdef CONFIG_DEBUG_VM
  71. #define transparent_hugepage_debug_cow() \
  72. (transparent_hugepage_flags & \
  73. (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
  74. #else /* CONFIG_DEBUG_VM */
  75. #define transparent_hugepage_debug_cow() 0
  76. #endif /* CONFIG_DEBUG_VM */
  77. extern unsigned long transparent_hugepage_flags;
  78. extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  79. pmd_t *dst_pmd, pmd_t *src_pmd,
  80. struct vm_area_struct *vma,
  81. unsigned long addr, unsigned long end);
  82. extern int handle_pte_fault(struct mm_struct *mm,
  83. struct vm_area_struct *vma, unsigned long address,
  84. pte_t *pte, pmd_t *pmd, unsigned int flags);
  85. extern int split_huge_page(struct page *page);
  86. extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
  87. #define split_huge_page_pmd(__mm, __pmd) \
  88. do { \
  89. pmd_t *____pmd = (__pmd); \
  90. if (unlikely(pmd_trans_huge(*____pmd))) \
  91. __split_huge_page_pmd(__mm, ____pmd); \
  92. } while (0)
  93. #define wait_split_huge_page(__anon_vma, __pmd) \
  94. do { \
  95. pmd_t *____pmd = (__pmd); \
  96. anon_vma_lock(__anon_vma); \
  97. anon_vma_unlock(__anon_vma); \
  98. BUG_ON(pmd_trans_splitting(*____pmd) || \
  99. pmd_trans_huge(*____pmd)); \
  100. } while (0)
  101. #if HPAGE_PMD_ORDER > MAX_ORDER
  102. #error "hugepages can't be allocated by the buddy allocator"
  103. #endif
  104. extern int hugepage_madvise(struct vm_area_struct *vma,
  105. unsigned long *vm_flags, int advice);
  106. extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
  107. unsigned long start,
  108. unsigned long end,
  109. long adjust_next);
  110. extern int __pmd_trans_huge_lock(pmd_t *pmd,
  111. struct vm_area_struct *vma);
  112. /* mmap_sem must be held on entry */
  113. static inline int pmd_trans_huge_lock(pmd_t *pmd,
  114. struct vm_area_struct *vma)
  115. {
  116. VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
  117. if (pmd_trans_huge(*pmd))
  118. return __pmd_trans_huge_lock(pmd, vma);
  119. else
  120. return 0;
  121. }
  122. static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
  123. unsigned long start,
  124. unsigned long end,
  125. long adjust_next)
  126. {
  127. if (!vma->anon_vma || vma->vm_ops)
  128. return;
  129. __vma_adjust_trans_huge(vma, start, end, adjust_next);
  130. }
  131. static inline int hpage_nr_pages(struct page *page)
  132. {
  133. if (unlikely(PageTransHuge(page)))
  134. return HPAGE_PMD_NR;
  135. return 1;
  136. }
  137. static inline struct page *compound_trans_head(struct page *page)
  138. {
  139. if (PageTail(page)) {
  140. struct page *head;
  141. head = page->first_page;
  142. smp_rmb();
  143. /*
  144. * head may be a dangling pointer.
  145. * __split_huge_page_refcount clears PageTail before
  146. * overwriting first_page, so if PageTail is still
  147. * there it means the head pointer isn't dangling.
  148. */
  149. if (PageTail(page))
  150. return head;
  151. }
  152. return page;
  153. }
  154. #else /* CONFIG_TRANSPARENT_HUGEPAGE */
  155. #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
  156. #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
  157. #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
  158. #define hpage_nr_pages(x) 1
  159. #define transparent_hugepage_enabled(__vma) 0
  160. #define transparent_hugepage_flags 0UL
  161. static inline int split_huge_page(struct page *page)
  162. {
  163. return 0;
  164. }
  165. #define split_huge_page_pmd(__mm, __pmd) \
  166. do { } while (0)
  167. #define wait_split_huge_page(__anon_vma, __pmd) \
  168. do { } while (0)
  169. #define compound_trans_head(page) compound_head(page)
  170. static inline int hugepage_madvise(struct vm_area_struct *vma,
  171. unsigned long *vm_flags, int advice)
  172. {
  173. BUG();
  174. return 0;
  175. }
  176. static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
  177. unsigned long start,
  178. unsigned long end,
  179. long adjust_next)
  180. {
  181. }
  182. static inline int pmd_trans_huge_lock(pmd_t *pmd,
  183. struct vm_area_struct *vma)
  184. {
  185. return 0;
  186. }
  187. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  188. #endif /* _LINUX_HUGE_MM_H */