pgtable.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. /*
  2. * Copyright (C) 2012 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #ifndef __ASM_PGTABLE_H
  17. #define __ASM_PGTABLE_H
  18. #include <asm/bug.h>
  19. #include <asm/proc-fns.h>
  20. #include <asm/memory.h>
  21. #include <asm/pgtable-hwdef.h>
  22. #include <asm/pgtable-prot.h>
  23. /*
  24. * VMALLOC range.
  25. *
  26. * VMALLOC_START: beginning of the kernel vmalloc space
  27. * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
  28. * and fixed mappings
  29. */
  30. #define VMALLOC_START (MODULES_END)
  31. #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
  32. #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
  33. #define FIRST_USER_ADDRESS 0UL
  34. #ifndef __ASSEMBLY__
  35. #include <asm/fixmap.h>
  36. #include <linux/mmdebug.h>
  37. extern void __pte_error(const char *file, int line, unsigned long val);
  38. extern void __pmd_error(const char *file, int line, unsigned long val);
  39. extern void __pud_error(const char *file, int line, unsigned long val);
  40. extern void __pgd_error(const char *file, int line, unsigned long val);
  41. /*
  42. * ZERO_PAGE is a global shared page that is always zero: used
  43. * for zero-mapped memory areas etc..
  44. */
  45. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  46. #define ZERO_PAGE(vaddr) pfn_to_page(PHYS_PFN(__pa(empty_zero_page)))
  47. #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
  48. #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
  49. #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
  50. #define pte_none(pte) (!pte_val(pte))
  51. #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
  52. #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
  53. /*
  54. * The following only work if pte_present(). Undefined behaviour otherwise.
  55. */
  56. #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
  57. #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
  58. #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
  59. #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
  60. #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
  61. #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
  62. #ifdef CONFIG_ARM64_HW_AFDBM
  63. #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
  64. #else
  65. #define pte_hw_dirty(pte) (0)
  66. #endif
  67. #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
  68. #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
  69. #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
  70. /*
  71. * Execute-only user mappings do not have the PTE_USER bit set. All valid
  72. * kernel mappings have the PTE_UXN bit set.
  73. */
  74. #define pte_valid_not_user(pte) \
  75. ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
  76. #define pte_valid_young(pte) \
  77. ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
  78. #define pte_valid_user(pte) \
  79. ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
  80. /*
  81. * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
  82. * so that we don't erroneously return false for pages that have been
  83. * remapped as PROT_NONE but are yet to be flushed from the TLB.
  84. */
  85. #define pte_accessible(mm, pte) \
  86. (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
  87. /*
  88. * p??_access_permitted() is true for valid user mappings (subject to the
  89. * write permission check) other than user execute-only which do not have the
  90. * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
  91. */
  92. #define pte_access_permitted(pte, write) \
  93. (pte_valid_user(pte) && (!(write) || pte_write(pte)))
  94. #define pmd_access_permitted(pmd, write) \
  95. (pte_access_permitted(pmd_pte(pmd), (write)))
  96. #define pud_access_permitted(pud, write) \
  97. (pte_access_permitted(pud_pte(pud), (write)))
  98. static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
  99. {
  100. pte_val(pte) &= ~pgprot_val(prot);
  101. return pte;
  102. }
  103. static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
  104. {
  105. pte_val(pte) |= pgprot_val(prot);
  106. return pte;
  107. }
  108. static inline pte_t pte_wrprotect(pte_t pte)
  109. {
  110. return clear_pte_bit(pte, __pgprot(PTE_WRITE));
  111. }
  112. static inline pte_t pte_mkwrite(pte_t pte)
  113. {
  114. return set_pte_bit(pte, __pgprot(PTE_WRITE));
  115. }
  116. static inline pte_t pte_mkclean(pte_t pte)
  117. {
  118. return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
  119. }
  120. static inline pte_t pte_mkdirty(pte_t pte)
  121. {
  122. return set_pte_bit(pte, __pgprot(PTE_DIRTY));
  123. }
  124. static inline pte_t pte_mkold(pte_t pte)
  125. {
  126. return clear_pte_bit(pte, __pgprot(PTE_AF));
  127. }
  128. static inline pte_t pte_mkyoung(pte_t pte)
  129. {
  130. return set_pte_bit(pte, __pgprot(PTE_AF));
  131. }
  132. static inline pte_t pte_mkspecial(pte_t pte)
  133. {
  134. return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
  135. }
  136. static inline pte_t pte_mkcont(pte_t pte)
  137. {
  138. pte = set_pte_bit(pte, __pgprot(PTE_CONT));
  139. return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
  140. }
  141. static inline pte_t pte_mknoncont(pte_t pte)
  142. {
  143. return clear_pte_bit(pte, __pgprot(PTE_CONT));
  144. }
  145. static inline pte_t pte_clear_rdonly(pte_t pte)
  146. {
  147. return clear_pte_bit(pte, __pgprot(PTE_RDONLY));
  148. }
  149. static inline pte_t pte_mkpresent(pte_t pte)
  150. {
  151. return set_pte_bit(pte, __pgprot(PTE_VALID));
  152. }
  153. static inline pmd_t pmd_mkcont(pmd_t pmd)
  154. {
  155. return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
  156. }
  157. static inline void set_pte(pte_t *ptep, pte_t pte)
  158. {
  159. *ptep = pte;
  160. /*
  161. * Only if the new pte is valid and kernel, otherwise TLB maintenance
  162. * or update_mmu_cache() have the necessary barriers.
  163. */
  164. if (pte_valid_not_user(pte)) {
  165. dsb(ishst);
  166. isb();
  167. }
  168. }
  169. struct mm_struct;
  170. struct vm_area_struct;
  171. extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
  172. /*
  173. * PTE bits configuration in the presence of hardware Dirty Bit Management
  174. * (PTE_WRITE == PTE_DBM):
  175. *
  176. * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
  177. * 0 0 | 1 0 0
  178. * 0 1 | 1 1 0
  179. * 1 0 | 1 0 1
  180. * 1 1 | 0 1 x
  181. *
  182. * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
  183. * the page fault mechanism. Checking the dirty status of a pte becomes:
  184. *
  185. * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
  186. */
  187. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  188. pte_t *ptep, pte_t pte)
  189. {
  190. if (pte_present(pte)) {
  191. if (pte_sw_dirty(pte) && pte_write(pte))
  192. pte_val(pte) &= ~PTE_RDONLY;
  193. else
  194. pte_val(pte) |= PTE_RDONLY;
  195. if (pte_user_exec(pte) && !pte_special(pte))
  196. __sync_icache_dcache(pte, addr);
  197. }
  198. /*
  199. * If the existing pte is valid, check for potential race with
  200. * hardware updates of the pte (ptep_set_access_flags safely changes
  201. * valid ptes without going through an invalid entry).
  202. */
  203. if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
  204. pte_valid(*ptep) && pte_valid(pte)) {
  205. VM_WARN_ONCE(!pte_young(pte),
  206. "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
  207. __func__, pte_val(*ptep), pte_val(pte));
  208. VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
  209. "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
  210. __func__, pte_val(*ptep), pte_val(pte));
  211. }
  212. set_pte(ptep, pte);
  213. }
  214. #define __HAVE_ARCH_PTE_SAME
  215. static inline int pte_same(pte_t pte_a, pte_t pte_b)
  216. {
  217. pteval_t lhs, rhs;
  218. lhs = pte_val(pte_a);
  219. rhs = pte_val(pte_b);
  220. if (pte_present(pte_a))
  221. lhs &= ~PTE_RDONLY;
  222. if (pte_present(pte_b))
  223. rhs &= ~PTE_RDONLY;
  224. return (lhs == rhs);
  225. }
  226. /*
  227. * Huge pte definitions.
  228. */
  229. #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
  230. #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
  231. /*
  232. * Hugetlb definitions.
  233. */
  234. #define HUGE_MAX_HSTATE 4
  235. #define HPAGE_SHIFT PMD_SHIFT
  236. #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
  237. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  238. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  239. #define __HAVE_ARCH_PTE_SPECIAL
  240. static inline pte_t pud_pte(pud_t pud)
  241. {
  242. return __pte(pud_val(pud));
  243. }
  244. static inline pmd_t pud_pmd(pud_t pud)
  245. {
  246. return __pmd(pud_val(pud));
  247. }
  248. static inline pte_t pmd_pte(pmd_t pmd)
  249. {
  250. return __pte(pmd_val(pmd));
  251. }
  252. static inline pmd_t pte_pmd(pte_t pte)
  253. {
  254. return __pmd(pte_val(pte));
  255. }
  256. static inline pgprot_t mk_sect_prot(pgprot_t prot)
  257. {
  258. return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
  259. }
  260. #ifdef CONFIG_NUMA_BALANCING
  261. /*
  262. * See the comment in include/asm-generic/pgtable.h
  263. */
  264. static inline int pte_protnone(pte_t pte)
  265. {
  266. return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
  267. }
  268. static inline int pmd_protnone(pmd_t pmd)
  269. {
  270. return pte_protnone(pmd_pte(pmd));
  271. }
  272. #endif
  273. /*
  274. * THP definitions.
  275. */
  276. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  277. #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
  278. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  279. #define pmd_present(pmd) pte_present(pmd_pte(pmd))
  280. #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
  281. #define pmd_young(pmd) pte_young(pmd_pte(pmd))
  282. #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
  283. #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
  284. #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
  285. #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
  286. #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
  287. #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
  288. #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
  289. #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
  290. #define __HAVE_ARCH_PMD_WRITE
  291. #define pmd_write(pmd) pte_write(pmd_pte(pmd))
  292. #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
  293. #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
  294. #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
  295. #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
  296. #define pud_write(pud) pte_write(pud_pte(pud))
  297. #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
  298. #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
  299. #define __pgprot_modify(prot,mask,bits) \
  300. __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
  301. /*
  302. * Mark the prot value as uncacheable and unbufferable.
  303. */
  304. #define pgprot_noncached(prot) \
  305. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
  306. #define pgprot_writecombine(prot) \
  307. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
  308. #define pgprot_device(prot) \
  309. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
  310. #define __HAVE_PHYS_MEM_ACCESS_PROT
  311. struct file;
  312. extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  313. unsigned long size, pgprot_t vma_prot);
  314. #define pmd_none(pmd) (!pmd_val(pmd))
  315. #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
  316. #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
  317. PMD_TYPE_TABLE)
  318. #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
  319. PMD_TYPE_SECT)
  320. #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
  321. #define pud_sect(pud) (0)
  322. #define pud_table(pud) (1)
  323. #else
  324. #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
  325. PUD_TYPE_SECT)
  326. #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
  327. PUD_TYPE_TABLE)
  328. #endif
  329. static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  330. {
  331. *pmdp = pmd;
  332. dsb(ishst);
  333. isb();
  334. }
  335. static inline void pmd_clear(pmd_t *pmdp)
  336. {
  337. set_pmd(pmdp, __pmd(0));
  338. }
  339. static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
  340. {
  341. return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
  342. }
  343. /* Find an entry in the third-level page table. */
  344. #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  345. #define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
  346. #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
  347. #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
  348. #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
  349. #define pte_unmap(pte) do { } while (0)
  350. #define pte_unmap_nested(pte) do { } while (0)
  351. #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
  352. #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
  353. #define pte_clear_fixmap() clear_fixmap(FIX_PTE)
  354. #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
  355. /* use ONLY for statically allocated translation tables */
  356. #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
  357. /*
  358. * Conversion functions: convert a page and protection to a page entry,
  359. * and a page entry and page directory to the page they refer to.
  360. */
  361. #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
  362. #if CONFIG_PGTABLE_LEVELS > 2
  363. #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
  364. #define pud_none(pud) (!pud_val(pud))
  365. #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
  366. #define pud_present(pud) (pud_val(pud))
  367. static inline void set_pud(pud_t *pudp, pud_t pud)
  368. {
  369. *pudp = pud;
  370. dsb(ishst);
  371. isb();
  372. }
  373. static inline void pud_clear(pud_t *pudp)
  374. {
  375. set_pud(pudp, __pud(0));
  376. }
  377. static inline phys_addr_t pud_page_paddr(pud_t pud)
  378. {
  379. return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
  380. }
  381. /* Find an entry in the second-level page table. */
  382. #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
  383. #define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
  384. #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
  385. #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
  386. #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
  387. #define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
  388. #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
  389. /* use ONLY for statically allocated translation tables */
  390. #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
  391. #else
  392. #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
  393. /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
  394. #define pmd_set_fixmap(addr) NULL
  395. #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
  396. #define pmd_clear_fixmap()
  397. #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
  398. #endif /* CONFIG_PGTABLE_LEVELS > 2 */
  399. #if CONFIG_PGTABLE_LEVELS > 3
  400. #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
  401. #define pgd_none(pgd) (!pgd_val(pgd))
  402. #define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
  403. #define pgd_present(pgd) (pgd_val(pgd))
  404. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  405. {
  406. *pgdp = pgd;
  407. dsb(ishst);
  408. }
  409. static inline void pgd_clear(pgd_t *pgdp)
  410. {
  411. set_pgd(pgdp, __pgd(0));
  412. }
  413. static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
  414. {
  415. return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
  416. }
  417. /* Find an entry in the frst-level page table. */
  418. #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
  419. #define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
  420. #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
  421. #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
  422. #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
  423. #define pud_clear_fixmap() clear_fixmap(FIX_PUD)
  424. #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
  425. /* use ONLY for statically allocated translation tables */
  426. #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
  427. #else
  428. #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
  429. /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
  430. #define pud_set_fixmap(addr) NULL
  431. #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
  432. #define pud_clear_fixmap()
  433. #define pud_offset_kimg(dir,addr) ((pud_t *)dir)
  434. #endif /* CONFIG_PGTABLE_LEVELS > 3 */
  435. #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
  436. /* to find an entry in a page-table-directory */
  437. #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  438. #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
  439. #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
  440. /* to find an entry in a kernel page-table-directory */
  441. #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
  442. #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
  443. #define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
  444. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  445. {
  446. const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
  447. PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
  448. /* preserve the hardware dirty information */
  449. if (pte_hw_dirty(pte))
  450. pte = pte_mkdirty(pte);
  451. pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
  452. return pte;
  453. }
  454. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  455. {
  456. return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
  457. }
  458. #ifdef CONFIG_ARM64_HW_AFDBM
  459. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  460. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  461. unsigned long address, pte_t *ptep,
  462. pte_t entry, int dirty);
  463. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  464. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  465. static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
  466. unsigned long address, pmd_t *pmdp,
  467. pmd_t entry, int dirty)
  468. {
  469. return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
  470. }
  471. #endif
  472. /*
  473. * Atomic pte/pmd modifications.
  474. */
  475. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  476. static inline int __ptep_test_and_clear_young(pte_t *ptep)
  477. {
  478. pteval_t pteval;
  479. unsigned int tmp, res;
  480. asm volatile("// __ptep_test_and_clear_young\n"
  481. " prfm pstl1strm, %2\n"
  482. "1: ldxr %0, %2\n"
  483. " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n"
  484. " and %0, %0, %4 // clear PTE_AF\n"
  485. " stxr %w1, %0, %2\n"
  486. " cbnz %w1, 1b\n"
  487. : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
  488. : "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
  489. return res;
  490. }
  491. static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  492. unsigned long address,
  493. pte_t *ptep)
  494. {
  495. return __ptep_test_and_clear_young(ptep);
  496. }
  497. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  498. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  499. static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  500. unsigned long address,
  501. pmd_t *pmdp)
  502. {
  503. return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
  504. }
  505. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  506. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  507. static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
  508. unsigned long address, pte_t *ptep)
  509. {
  510. pteval_t old_pteval;
  511. unsigned int tmp;
  512. asm volatile("// ptep_get_and_clear\n"
  513. " prfm pstl1strm, %2\n"
  514. "1: ldxr %0, %2\n"
  515. " stxr %w1, xzr, %2\n"
  516. " cbnz %w1, 1b\n"
  517. : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
  518. return __pte(old_pteval);
  519. }
  520. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  521. #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
  522. static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
  523. unsigned long address, pmd_t *pmdp)
  524. {
  525. return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
  526. }
  527. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  528. /*
  529. * ptep_set_wrprotect - mark read-only while trasferring potential hardware
  530. * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
  531. */
  532. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  533. static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
  534. {
  535. pteval_t pteval;
  536. unsigned long tmp;
  537. asm volatile("// ptep_set_wrprotect\n"
  538. " prfm pstl1strm, %2\n"
  539. "1: ldxr %0, %2\n"
  540. " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n"
  541. " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n"
  542. " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n"
  543. " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n"
  544. " stxr %w1, %0, %2\n"
  545. " cbnz %w1, 1b\n"
  546. : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
  547. : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
  548. : "cc");
  549. }
  550. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  551. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  552. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  553. unsigned long address, pmd_t *pmdp)
  554. {
  555. ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
  556. }
  557. #endif
  558. #endif /* CONFIG_ARM64_HW_AFDBM */
  559. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  560. extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
  561. extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
  562. /*
  563. * Encode and decode a swap entry:
  564. * bits 0-1: present (must be zero)
  565. * bits 2-7: swap type
  566. * bits 8-57: swap offset
  567. * bit 58: PTE_PROT_NONE (must be zero)
  568. */
  569. #define __SWP_TYPE_SHIFT 2
  570. #define __SWP_TYPE_BITS 6
  571. #define __SWP_OFFSET_BITS 50
  572. #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
  573. #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
  574. #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
  575. #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
  576. #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
  577. #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
  578. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  579. #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
  580. /*
  581. * Ensure that there are not more swap files than can be encoded in the kernel
  582. * PTEs.
  583. */
  584. #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
  585. extern int kern_addr_valid(unsigned long addr);
  586. #include <asm-generic/pgtable.h>
  587. void pgd_cache_init(void);
  588. #define pgtable_cache_init pgd_cache_init
  589. /*
  590. * On AArch64, the cache coherency is handled via the set_pte_at() function.
  591. */
  592. static inline void update_mmu_cache(struct vm_area_struct *vma,
  593. unsigned long addr, pte_t *ptep)
  594. {
  595. /*
  596. * We don't do anything here, so there's a very small chance of
  597. * us retaking a user fault which we just fixed up. The alternative
  598. * is doing a dsb(ishst), but that penalises the fastpath.
  599. */
  600. }
  601. #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
  602. #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
  603. #define kc_offset_to_vaddr(o) ((o) | VA_START)
  604. #endif /* !__ASSEMBLY__ */
  605. #endif /* __ASM_PGTABLE_H */