hugetlb.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. #ifndef _ASM_POWERPC_HUGETLB_H
  2. #define _ASM_POWERPC_HUGETLB_H
  3. #ifdef CONFIG_HUGETLB_PAGE
  4. #include <asm/page.h>
  5. extern struct kmem_cache *hugepte_cache;
  6. static inline pte_t *hugepd_page(hugepd_t hpd)
  7. {
  8. BUG_ON(!hugepd_ok(hpd));
  9. return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
  10. }
  11. static inline unsigned int hugepd_shift(hugepd_t hpd)
  12. {
  13. return hpd.pd & HUGEPD_SHIFT_MASK;
  14. }
  15. static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
  16. unsigned pdshift)
  17. {
  18. /*
  19. * On FSL BookE, we have multiple higher-level table entries that
  20. * point to the same hugepte. Just use the first one since they're all
  21. * identical. So for that case, idx=0.
  22. */
  23. unsigned long idx = 0;
  24. pte_t *dir = hugepd_page(*hpdp);
  25. #ifndef CONFIG_PPC_FSL_BOOK3E
  26. idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
  27. #endif
  28. return dir + idx;
  29. }
  30. pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
  31. unsigned long addr, unsigned *shift);
  32. void flush_dcache_icache_hugepage(struct page *page);
  33. #if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
  34. int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
  35. unsigned long len);
  36. #else
  37. static inline int is_hugepage_only_range(struct mm_struct *mm,
  38. unsigned long addr,
  39. unsigned long len)
  40. {
  41. return 0;
  42. }
  43. #endif
  44. void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
  45. pte_t pte);
  46. void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  47. void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
  48. unsigned long end, unsigned long floor,
  49. unsigned long ceiling);
  50. /*
  51. * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
  52. * to override the version in mm/hugetlb.c
  53. */
  54. #define vma_mmu_pagesize vma_mmu_pagesize
  55. /*
  56. * If the arch doesn't supply something else, assume that hugepage
  57. * size aligned regions are ok without further preparation.
  58. */
  59. static inline int prepare_hugepage_range(struct file *file,
  60. unsigned long addr, unsigned long len)
  61. {
  62. struct hstate *h = hstate_file(file);
  63. if (len & ~huge_page_mask(h))
  64. return -EINVAL;
  65. if (addr & ~huge_page_mask(h))
  66. return -EINVAL;
  67. return 0;
  68. }
  69. static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
  70. {
  71. }
  72. static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  73. pte_t *ptep, pte_t pte)
  74. {
  75. set_pte_at(mm, addr, ptep, pte);
  76. }
  77. static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  78. unsigned long addr, pte_t *ptep)
  79. {
  80. #ifdef CONFIG_PPC64
  81. return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
  82. #else
  83. return __pte(pte_update(ptep, ~0UL, 0));
  84. #endif
  85. }
  86. static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
  87. unsigned long addr, pte_t *ptep)
  88. {
  89. pte_t pte;
  90. pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
  91. flush_tlb_page(vma, addr);
  92. }
  93. static inline int huge_pte_none(pte_t pte)
  94. {
  95. return pte_none(pte);
  96. }
  97. static inline pte_t huge_pte_wrprotect(pte_t pte)
  98. {
  99. return pte_wrprotect(pte);
  100. }
  101. static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  102. unsigned long addr, pte_t *ptep,
  103. pte_t pte, int dirty)
  104. {
  105. #ifdef HUGETLB_NEED_PRELOAD
  106. /*
  107. * The "return 1" forces a call of update_mmu_cache, which will write a
  108. * TLB entry. Without this, platforms that don't do a write of the TLB
  109. * entry in the TLB miss handler asm will fault ad infinitum.
  110. */
  111. ptep_set_access_flags(vma, addr, ptep, pte, dirty);
  112. return 1;
  113. #else
  114. return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
  115. #endif
  116. }
  117. static inline pte_t huge_ptep_get(pte_t *ptep)
  118. {
  119. return *ptep;
  120. }
  121. static inline int arch_prepare_hugepage(struct page *page)
  122. {
  123. return 0;
  124. }
  125. static inline void arch_release_hugepage(struct page *page)
  126. {
  127. }
  128. #else /* ! CONFIG_HUGETLB_PAGE */
  129. static inline void flush_hugetlb_page(struct vm_area_struct *vma,
  130. unsigned long vmaddr)
  131. {
  132. }
  133. #endif /* CONFIG_HUGETLB_PAGE */
  134. /*
  135. * FSL Book3E platforms require special gpage handling - the gpages
  136. * are reserved early in the boot process by memblock instead of via
  137. * the .dts as on IBM platforms.
  138. */
  139. #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
  140. extern void __init reserve_hugetlb_gpages(void);
  141. #else
  142. static inline void reserve_hugetlb_gpages(void)
  143. {
  144. }
  145. #endif
  146. #endif /* _ASM_POWERPC_HUGETLB_H */