idmap.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. #include <linux/module.h>
  2. #include <linux/kernel.h>
  3. #include <linux/slab.h>
  4. #include <asm/cputype.h>
  5. #include <asm/idmap.h>
  6. #include <asm/pgalloc.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/sections.h>
  9. #include <asm/system_info.h>
  10. /*
  11. * Note: accesses outside of the kernel image and the identity map area
  12. * are not supported on any CPU using the idmap tables as its current
  13. * page tables.
  14. */
  15. pgd_t *idmap_pgd;
  16. long long arch_phys_to_idmap_offset;
  17. #ifdef CONFIG_ARM_LPAE
  18. static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
  19. unsigned long prot)
  20. {
  21. pmd_t *pmd;
  22. unsigned long next;
  23. if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
  24. pmd = pmd_alloc_one(&init_mm, addr);
  25. if (!pmd) {
  26. pr_warn("Failed to allocate identity pmd.\n");
  27. return;
  28. }
  29. /*
  30. * Copy the original PMD to ensure that the PMD entries for
  31. * the kernel image are preserved.
  32. */
  33. if (!pud_none(*pud))
  34. memcpy(pmd, pmd_offset(pud, 0),
  35. PTRS_PER_PMD * sizeof(pmd_t));
  36. pud_populate(&init_mm, pud, pmd);
  37. pmd += pmd_index(addr);
  38. } else
  39. pmd = pmd_offset(pud, addr);
  40. do {
  41. next = pmd_addr_end(addr, end);
  42. *pmd = __pmd((addr & PMD_MASK) | prot);
  43. flush_pmd_entry(pmd);
  44. } while (pmd++, addr = next, addr != end);
  45. }
  46. #else /* !CONFIG_ARM_LPAE */
  47. static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
  48. unsigned long prot)
  49. {
  50. pmd_t *pmd = pmd_offset(pud, addr);
  51. addr = (addr & PMD_MASK) | prot;
  52. pmd[0] = __pmd(addr);
  53. addr += SECTION_SIZE;
  54. pmd[1] = __pmd(addr);
  55. flush_pmd_entry(pmd);
  56. }
  57. #endif /* CONFIG_ARM_LPAE */
  58. static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
  59. unsigned long prot)
  60. {
  61. pud_t *pud = pud_offset(pgd, addr);
  62. unsigned long next;
  63. do {
  64. next = pud_addr_end(addr, end);
  65. idmap_add_pmd(pud, addr, next, prot);
  66. } while (pud++, addr = next, addr != end);
  67. }
  68. static void identity_mapping_add(pgd_t *pgd, const char *text_start,
  69. const char *text_end, unsigned long prot)
  70. {
  71. unsigned long addr, end;
  72. unsigned long next;
  73. addr = virt_to_idmap(text_start);
  74. end = virt_to_idmap(text_end);
  75. pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
  76. prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
  77. if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale_family())
  78. prot |= PMD_BIT4;
  79. pgd += pgd_index(addr);
  80. do {
  81. next = pgd_addr_end(addr, end);
  82. idmap_add_pud(pgd, addr, next, prot);
  83. } while (pgd++, addr = next, addr != end);
  84. }
  85. extern char __idmap_text_start[], __idmap_text_end[];
  86. static int __init init_static_idmap(void)
  87. {
  88. idmap_pgd = pgd_alloc(&init_mm);
  89. if (!idmap_pgd)
  90. return -ENOMEM;
  91. identity_mapping_add(idmap_pgd, __idmap_text_start,
  92. __idmap_text_end, 0);
  93. /* Flush L1 for the hardware to see this page table content */
  94. flush_cache_louis();
  95. return 0;
  96. }
  97. early_initcall(init_static_idmap);
  98. /*
  99. * In order to soft-boot, we need to switch to a 1:1 mapping for the
  100. * cpu_reset functions. This will then ensure that we have predictable
  101. * results when turning off the mmu.
  102. */
  103. void setup_mm_for_reboot(void)
  104. {
  105. /* Switch to the identity mapping. */
  106. cpu_switch_mm(idmap_pgd, &init_mm);
  107. local_flush_bp_all();
  108. #ifdef CONFIG_CPU_HAS_ASID
  109. /*
  110. * We don't have a clean ASID for the identity mapping, which
  111. * may clash with virtual addresses of the previous page tables
  112. * and therefore potentially in the TLB.
  113. */
  114. local_flush_tlb_all();
  115. #endif
  116. }