pgtable.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
  2. #define _ASM_POWERPC_NOHASH_PGTABLE_H
  3. #if defined(CONFIG_PPC64)
  4. #include <asm/nohash/64/pgtable.h>
  5. #else
  6. #include <asm/nohash/32/pgtable.h>
  7. #endif
  8. #ifndef __ASSEMBLY__
  9. /* Generic accessors to PTE bits */
  10. static inline int pte_write(pte_t pte)
  11. {
  12. return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
  13. }
  14. static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
  15. static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
  16. static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
  17. static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
  18. static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
  19. #ifdef CONFIG_NUMA_BALANCING
  20. /*
  21. * These work without NUMA balancing but the kernel does not care. See the
  22. * comment in include/asm-generic/pgtable.h . On powerpc, this will only
  23. * work for user pages and always return true for kernel pages.
  24. */
  25. static inline int pte_protnone(pte_t pte)
  26. {
  27. return (pte_val(pte) &
  28. (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
  29. }
  30. static inline int pmd_protnone(pmd_t pmd)
  31. {
  32. return pte_protnone(pmd_pte(pmd));
  33. }
  34. #endif /* CONFIG_NUMA_BALANCING */
  35. static inline int pte_present(pte_t pte)
  36. {
  37. return pte_val(pte) & _PAGE_PRESENT;
  38. }
  39. /* Conversion functions: convert a page and protection to a page entry,
  40. * and a page entry and page directory to the page they refer to.
  41. *
  42. * Even if PTEs can be unsigned long long, a PFN is always an unsigned
  43. * long for now.
  44. */
  45. static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
  46. return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
  47. pgprot_val(pgprot)); }
  48. static inline unsigned long pte_pfn(pte_t pte) {
  49. return pte_val(pte) >> PTE_RPN_SHIFT; }
  50. /* Generic modifiers for PTE bits */
  51. static inline pte_t pte_wrprotect(pte_t pte)
  52. {
  53. pte_basic_t ptev;
  54. ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
  55. ptev |= _PAGE_RO;
  56. return __pte(ptev);
  57. }
  58. static inline pte_t pte_mkclean(pte_t pte)
  59. {
  60. return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
  61. }
  62. static inline pte_t pte_mkold(pte_t pte)
  63. {
  64. return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
  65. }
  66. static inline pte_t pte_mkwrite(pte_t pte)
  67. {
  68. pte_basic_t ptev;
  69. ptev = pte_val(pte) & ~_PAGE_RO;
  70. ptev |= _PAGE_RW;
  71. return __pte(ptev);
  72. }
  73. static inline pte_t pte_mkdirty(pte_t pte)
  74. {
  75. return __pte(pte_val(pte) | _PAGE_DIRTY);
  76. }
  77. static inline pte_t pte_mkyoung(pte_t pte)
  78. {
  79. return __pte(pte_val(pte) | _PAGE_ACCESSED);
  80. }
  81. static inline pte_t pte_mkspecial(pte_t pte)
  82. {
  83. return __pte(pte_val(pte) | _PAGE_SPECIAL);
  84. }
  85. static inline pte_t pte_mkhuge(pte_t pte)
  86. {
  87. return pte;
  88. }
  89. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  90. {
  91. return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
  92. }
  93. /* Insert a PTE, top-level function is out of line. It uses an inline
  94. * low level function in the respective pgtable-* files
  95. */
  96. extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
  97. pte_t pte);
  98. /* This low level function performs the actual PTE insertion
  99. * Setting the PTE depends on the MMU type and other factors. It's
  100. * an horrible mess that I'm not going to try to clean up now but
  101. * I'm keeping it in one place rather than spread around
  102. */
  103. static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
  104. pte_t *ptep, pte_t pte, int percpu)
  105. {
  106. #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
  107. /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
  108. * helper pte_update() which does an atomic update. We need to do that
  109. * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
  110. * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
  111. * the hash bits instead (ie, same as the non-SMP case)
  112. */
  113. if (percpu)
  114. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  115. | (pte_val(pte) & ~_PAGE_HASHPTE));
  116. else
  117. pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
  118. #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
  119. /* Second case is 32-bit with 64-bit PTE. In this case, we
  120. * can just store as long as we do the two halves in the right order
  121. * with a barrier in between. This is possible because we take care,
  122. * in the hash code, to pre-invalidate if the PTE was already hashed,
  123. * which synchronizes us with any concurrent invalidation.
  124. * In the percpu case, we also fallback to the simple update preserving
  125. * the hash bits
  126. */
  127. if (percpu) {
  128. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  129. | (pte_val(pte) & ~_PAGE_HASHPTE));
  130. return;
  131. }
  132. #if _PAGE_HASHPTE != 0
  133. if (pte_val(*ptep) & _PAGE_HASHPTE)
  134. flush_hash_entry(mm, ptep, addr);
  135. #endif
  136. __asm__ __volatile__("\
  137. stw%U0%X0 %2,%0\n\
  138. eieio\n\
  139. stw%U0%X0 %L2,%1"
  140. : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
  141. : "r" (pte) : "memory");
  142. #elif defined(CONFIG_PPC_STD_MMU_32)
  143. /* Third case is 32-bit hash table in UP mode, we need to preserve
  144. * the _PAGE_HASHPTE bit since we may not have invalidated the previous
  145. * translation in the hash yet (done in a subsequent flush_tlb_xxx())
  146. * and see we need to keep track that this PTE needs invalidating
  147. */
  148. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  149. | (pte_val(pte) & ~_PAGE_HASHPTE));
  150. #else
  151. /* Anything else just stores the PTE normally. That covers all 64-bit
  152. * cases, and 32-bit non-hash with 32-bit PTEs.
  153. */
  154. *ptep = pte;
  155. #ifdef CONFIG_PPC_BOOK3E_64
  156. /*
  157. * With hardware tablewalk, a sync is needed to ensure that
  158. * subsequent accesses see the PTE we just wrote. Unlike userspace
  159. * mappings, we can't tolerate spurious faults, so make sure
  160. * the new PTE will be seen the first time.
  161. */
  162. if (is_kernel_addr(addr))
  163. mb();
  164. #endif
  165. #endif
  166. }
  167. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  168. extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
  169. pte_t *ptep, pte_t entry, int dirty);
  170. /*
  171. * Macro to mark a page protection value as "uncacheable".
  172. */
  173. #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
  174. _PAGE_WRITETHRU)
  175. #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  176. _PAGE_NO_CACHE | _PAGE_GUARDED))
  177. #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  178. _PAGE_NO_CACHE))
  179. #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  180. _PAGE_COHERENT))
  181. #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  182. _PAGE_COHERENT | _PAGE_WRITETHRU))
  183. #define pgprot_cached_noncoherent(prot) \
  184. (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
  185. #define pgprot_writecombine pgprot_noncached_wc
  186. struct file;
  187. extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  188. unsigned long size, pgprot_t vma_prot);
  189. #define __HAVE_PHYS_MEM_ACCESS_PROT
  190. #ifdef CONFIG_HUGETLB_PAGE
  191. static inline int hugepd_ok(hugepd_t hpd)
  192. {
  193. return (hpd.pd > 0);
  194. }
  195. static inline int pmd_huge(pmd_t pmd)
  196. {
  197. return 0;
  198. }
  199. static inline int pud_huge(pud_t pud)
  200. {
  201. return 0;
  202. }
  203. static inline int pgd_huge(pgd_t pgd)
  204. {
  205. return 0;
  206. }
  207. #define pgd_huge pgd_huge
  208. #define is_hugepd(hpd) (hugepd_ok(hpd))
  209. #endif
  210. #endif /* __ASSEMBLY__ */
  211. #endif