pgtable.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. #ifndef _ASM_POWERPC_PGTABLE_H
  2. #define _ASM_POWERPC_PGTABLE_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/mmdebug.h>
  5. #include <linux/mmzone.h>
  6. #include <asm/processor.h> /* For TASK_SIZE */
  7. #include <asm/mmu.h>
  8. #include <asm/page.h>
  9. struct mm_struct;
  10. #endif /* !__ASSEMBLY__ */
  11. #ifdef CONFIG_PPC_BOOK3S
  12. #include <asm/book3s/pgtable.h>
  13. #else
  14. #include <asm/nohash/pgtable.h>
  15. #endif /* !CONFIG_PPC_BOOK3S */
  16. #ifndef __ASSEMBLY__
  17. #include <asm/tlbflush.h>
  18. /* Keep these as a macros to avoid include dependency mess */
  19. #define pte_page(x) pfn_to_page(pte_pfn(x))
  20. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  21. /*
  22. * ZERO_PAGE is a global shared page that is always zero: used
  23. * for zero-mapped memory areas etc..
  24. */
  25. extern unsigned long empty_zero_page[];
  26. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  27. extern pgd_t swapper_pg_dir[];
  28. void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
  29. int dma_pfn_limit_to_zone(u64 pfn_limit);
  30. extern void paging_init(void);
  31. /*
  32. * kern_addr_valid is intended to indicate whether an address is a valid
  33. * kernel address. Most 32-bit archs define it as always true (like this)
  34. * but most 64-bit archs actually perform a test. What should we do here?
  35. */
  36. #define kern_addr_valid(addr) (1)
  37. #include <asm-generic/pgtable.h>
  38. /*
  39. * This gets called at the end of handling a page fault, when
  40. * the kernel has put a new PTE into the page table for the process.
  41. * We use it to ensure coherency between the i-cache and d-cache
  42. * for the page which has just been mapped in.
  43. * On machines which use an MMU hash table, we use this to put a
  44. * corresponding HPTE into the hash table ahead of time, instead of
  45. * waiting for the inevitable extra hash-table miss exception.
  46. */
  47. extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
  48. extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
  49. unsigned long end, int write,
  50. struct page **pages, int *nr);
  51. #ifndef CONFIG_TRANSPARENT_HUGEPAGE
  52. #define pmd_large(pmd) 0
  53. #endif
  54. pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
  55. bool *is_thp, unsigned *shift);
  56. static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
  57. bool *is_thp, unsigned *shift)
  58. {
  59. VM_WARN(!arch_irqs_disabled(),
  60. "%s called with irq enabled\n", __func__);
  61. return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
  62. }
  63. unsigned long vmalloc_to_phys(void *vmalloc_addr);
  64. #endif /* __ASSEMBLY__ */
  65. #endif /* _ASM_POWERPC_PGTABLE_H */