kaiser.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. #include <linux/bug.h>
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/string.h>
  5. #include <linux/types.h>
  6. #include <linux/bug.h>
  7. #include <linux/init.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/mm.h>
  11. #include <linux/uaccess.h>
  12. #undef pr_fmt
  13. #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
  14. #include <asm/kaiser.h>
  15. #include <asm/tlbflush.h> /* to verify its kaiser declarations */
  16. #include <asm/pgtable.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/desc.h>
  19. #include <asm/cmdline.h>
  20. #include <asm/vsyscall.h>
  21. int kaiser_enabled __read_mostly = 1;
  22. EXPORT_SYMBOL(kaiser_enabled); /* for inlined TLB flush functions */
  23. __visible
  24. DEFINE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
  25. /*
  26. * These can have bit 63 set, so we can not just use a plain "or"
  27. * instruction to get their value or'd into CR3. It would take
  28. * another register. So, we use a memory reference to these instead.
  29. *
  30. * This is also handy because systems that do not support PCIDs
  31. * just end up or'ing a 0 into their CR3, which does no harm.
  32. */
  33. DEFINE_PER_CPU(unsigned long, x86_cr3_pcid_user);
  34. /*
  35. * At runtime, the only things we map are some things for CPU
  36. * hotplug, and stacks for new processes. No two CPUs will ever
  37. * be populating the same addresses, so we only need to ensure
  38. * that we protect between two CPUs trying to allocate and
  39. * populate the same page table page.
  40. *
  41. * Only take this lock when doing a set_p[4um]d(), but it is not
  42. * needed for doing a set_pte(). We assume that only the *owner*
  43. * of a given allocation will be doing this for _their_
  44. * allocation.
  45. *
  46. * This ensures that once a system has been running for a while
  47. * and there have been stacks all over and these page tables
  48. * are fully populated, there will be no further acquisitions of
  49. * this lock.
  50. */
  51. static DEFINE_SPINLOCK(shadow_table_allocation_lock);
  52. /*
  53. * Returns -1 on error.
  54. */
  55. static inline unsigned long get_pa_from_mapping(unsigned long vaddr)
  56. {
  57. pgd_t *pgd;
  58. pud_t *pud;
  59. pmd_t *pmd;
  60. pte_t *pte;
  61. pgd = pgd_offset_k(vaddr);
  62. /*
  63. * We made all the kernel PGDs present in kaiser_init().
  64. * We expect them to stay that way.
  65. */
  66. BUG_ON(pgd_none(*pgd));
  67. /*
  68. * PGDs are either 512GB or 128TB on all x86_64
  69. * configurations. We don't handle these.
  70. */
  71. BUG_ON(pgd_large(*pgd));
  72. pud = pud_offset(pgd, vaddr);
  73. if (pud_none(*pud)) {
  74. WARN_ON_ONCE(1);
  75. return -1;
  76. }
  77. if (pud_large(*pud))
  78. return (pud_pfn(*pud) << PAGE_SHIFT) | (vaddr & ~PUD_PAGE_MASK);
  79. pmd = pmd_offset(pud, vaddr);
  80. if (pmd_none(*pmd)) {
  81. WARN_ON_ONCE(1);
  82. return -1;
  83. }
  84. if (pmd_large(*pmd))
  85. return (pmd_pfn(*pmd) << PAGE_SHIFT) | (vaddr & ~PMD_PAGE_MASK);
  86. pte = pte_offset_kernel(pmd, vaddr);
  87. if (pte_none(*pte)) {
  88. WARN_ON_ONCE(1);
  89. return -1;
  90. }
  91. return (pte_pfn(*pte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
  92. }
  93. /*
  94. * This is a relatively normal page table walk, except that it
  95. * also tries to allocate page tables pages along the way.
  96. *
  97. * Returns a pointer to a PTE on success, or NULL on failure.
  98. */
  99. static pte_t *kaiser_pagetable_walk(unsigned long address, bool user)
  100. {
  101. pmd_t *pmd;
  102. pud_t *pud;
  103. pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address));
  104. gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
  105. unsigned long prot = _KERNPG_TABLE;
  106. if (pgd_none(*pgd)) {
  107. WARN_ONCE(1, "All shadow pgds should have been populated");
  108. return NULL;
  109. }
  110. BUILD_BUG_ON(pgd_large(*pgd) != 0);
  111. if (user) {
  112. /*
  113. * The vsyscall page is the only page that will have
  114. * _PAGE_USER set. Catch everything else.
  115. */
  116. BUG_ON(address != VSYSCALL_ADDR);
  117. set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
  118. prot = _PAGE_TABLE;
  119. }
  120. pud = pud_offset(pgd, address);
  121. /* The shadow page tables do not use large mappings: */
  122. if (pud_large(*pud)) {
  123. WARN_ON(1);
  124. return NULL;
  125. }
  126. if (pud_none(*pud)) {
  127. unsigned long new_pmd_page = __get_free_page(gfp);
  128. if (!new_pmd_page)
  129. return NULL;
  130. spin_lock(&shadow_table_allocation_lock);
  131. if (pud_none(*pud)) {
  132. set_pud(pud, __pud(prot | __pa(new_pmd_page)));
  133. __inc_zone_page_state(virt_to_page((void *)
  134. new_pmd_page), NR_KAISERTABLE);
  135. } else
  136. free_page(new_pmd_page);
  137. spin_unlock(&shadow_table_allocation_lock);
  138. }
  139. pmd = pmd_offset(pud, address);
  140. /* The shadow page tables do not use large mappings: */
  141. if (pmd_large(*pmd)) {
  142. WARN_ON(1);
  143. return NULL;
  144. }
  145. if (pmd_none(*pmd)) {
  146. unsigned long new_pte_page = __get_free_page(gfp);
  147. if (!new_pte_page)
  148. return NULL;
  149. spin_lock(&shadow_table_allocation_lock);
  150. if (pmd_none(*pmd)) {
  151. set_pmd(pmd, __pmd(prot | __pa(new_pte_page)));
  152. __inc_zone_page_state(virt_to_page((void *)
  153. new_pte_page), NR_KAISERTABLE);
  154. } else
  155. free_page(new_pte_page);
  156. spin_unlock(&shadow_table_allocation_lock);
  157. }
  158. return pte_offset_kernel(pmd, address);
  159. }
  160. static int kaiser_add_user_map(const void *__start_addr, unsigned long size,
  161. unsigned long flags)
  162. {
  163. int ret = 0;
  164. pte_t *pte;
  165. unsigned long start_addr = (unsigned long )__start_addr;
  166. unsigned long address = start_addr & PAGE_MASK;
  167. unsigned long end_addr = PAGE_ALIGN(start_addr + size);
  168. unsigned long target_address;
  169. /*
  170. * It is convenient for callers to pass in __PAGE_KERNEL etc,
  171. * and there is no actual harm from setting _PAGE_GLOBAL, so
  172. * long as CR4.PGE is not set. But it is nonetheless troubling
  173. * to see Kaiser itself setting _PAGE_GLOBAL (now that "nokaiser"
  174. * requires that not to be #defined to 0): so mask it off here.
  175. */
  176. flags &= ~_PAGE_GLOBAL;
  177. if (!(__supported_pte_mask & _PAGE_NX))
  178. flags &= ~_PAGE_NX;
  179. for (; address < end_addr; address += PAGE_SIZE) {
  180. target_address = get_pa_from_mapping(address);
  181. if (target_address == -1) {
  182. ret = -EIO;
  183. break;
  184. }
  185. pte = kaiser_pagetable_walk(address, flags & _PAGE_USER);
  186. if (!pte) {
  187. ret = -ENOMEM;
  188. break;
  189. }
  190. if (pte_none(*pte)) {
  191. set_pte(pte, __pte(flags | target_address));
  192. } else {
  193. pte_t tmp;
  194. set_pte(&tmp, __pte(flags | target_address));
  195. WARN_ON_ONCE(!pte_same(*pte, tmp));
  196. }
  197. }
  198. return ret;
  199. }
  200. static int kaiser_add_user_map_ptrs(const void *start, const void *end, unsigned long flags)
  201. {
  202. unsigned long size = end - start;
  203. return kaiser_add_user_map(start, size, flags);
  204. }
  205. /*
  206. * Ensure that the top level of the (shadow) page tables are
  207. * entirely populated. This ensures that all processes that get
  208. * forked have the same entries. This way, we do not have to
  209. * ever go set up new entries in older processes.
  210. *
  211. * Note: we never free these, so there are no updates to them
  212. * after this.
  213. */
  214. static void __init kaiser_init_all_pgds(void)
  215. {
  216. pgd_t *pgd;
  217. int i = 0;
  218. pgd = native_get_shadow_pgd(pgd_offset_k((unsigned long )0));
  219. for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) {
  220. pgd_t new_pgd;
  221. pud_t *pud = pud_alloc_one(&init_mm,
  222. PAGE_OFFSET + i * PGDIR_SIZE);
  223. if (!pud) {
  224. WARN_ON(1);
  225. break;
  226. }
  227. inc_zone_page_state(virt_to_page(pud), NR_KAISERTABLE);
  228. new_pgd = __pgd(_KERNPG_TABLE |__pa(pud));
  229. /*
  230. * Make sure not to stomp on some other pgd entry.
  231. */
  232. if (!pgd_none(pgd[i])) {
  233. WARN_ON(1);
  234. continue;
  235. }
  236. set_pgd(pgd + i, new_pgd);
  237. }
  238. }
  239. #define kaiser_add_user_map_early(start, size, flags) do { \
  240. int __ret = kaiser_add_user_map(start, size, flags); \
  241. WARN_ON(__ret); \
  242. } while (0)
  243. #define kaiser_add_user_map_ptrs_early(start, end, flags) do { \
  244. int __ret = kaiser_add_user_map_ptrs(start, end, flags); \
  245. WARN_ON(__ret); \
  246. } while (0)
  247. void __init kaiser_check_boottime_disable(void)
  248. {
  249. bool enable = true;
  250. char arg[5];
  251. int ret;
  252. if (boot_cpu_has(X86_FEATURE_XENPV))
  253. goto silent_disable;
  254. ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
  255. if (ret > 0) {
  256. if (!strncmp(arg, "on", 2))
  257. goto enable;
  258. if (!strncmp(arg, "off", 3))
  259. goto disable;
  260. if (!strncmp(arg, "auto", 4))
  261. goto skip;
  262. }
  263. if (cmdline_find_option_bool(boot_command_line, "nopti"))
  264. goto disable;
  265. skip:
  266. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
  267. goto disable;
  268. enable:
  269. if (enable)
  270. setup_force_cpu_cap(X86_FEATURE_KAISER);
  271. return;
  272. disable:
  273. pr_info("disabled\n");
  274. silent_disable:
  275. kaiser_enabled = 0;
  276. setup_clear_cpu_cap(X86_FEATURE_KAISER);
  277. }
  278. /*
  279. * If anything in here fails, we will likely die on one of the
  280. * first kernel->user transitions and init will die. But, we
  281. * will have most of the kernel up by then and should be able to
  282. * get a clean warning out of it. If we BUG_ON() here, we run
  283. * the risk of being before we have good console output.
  284. */
  285. void __init kaiser_init(void)
  286. {
  287. int cpu;
  288. if (!kaiser_enabled)
  289. return;
  290. kaiser_init_all_pgds();
  291. /*
  292. * Note that this sets _PAGE_USER and it needs to happen when the
  293. * pagetable hierarchy gets created, i.e., early. Otherwise
  294. * kaiser_pagetable_walk() will encounter initialized PTEs in the
  295. * hierarchy and not set the proper permissions, leading to the
  296. * pagefaults with page-protection violations when trying to read the
  297. * vsyscall page. For example.
  298. */
  299. if (vsyscall_enabled())
  300. kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
  301. PAGE_SIZE,
  302. vsyscall_pgprot);
  303. for_each_possible_cpu(cpu) {
  304. void *percpu_vaddr = __per_cpu_user_mapped_start +
  305. per_cpu_offset(cpu);
  306. unsigned long percpu_sz = __per_cpu_user_mapped_end -
  307. __per_cpu_user_mapped_start;
  308. kaiser_add_user_map_early(percpu_vaddr, percpu_sz,
  309. __PAGE_KERNEL);
  310. }
  311. /*
  312. * Map the entry/exit text section, which is needed at
  313. * switches from user to and from kernel.
  314. */
  315. kaiser_add_user_map_ptrs_early(__entry_text_start, __entry_text_end,
  316. __PAGE_KERNEL_RX);
  317. #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
  318. kaiser_add_user_map_ptrs_early(__irqentry_text_start,
  319. __irqentry_text_end,
  320. __PAGE_KERNEL_RX);
  321. #endif
  322. kaiser_add_user_map_early((void *)idt_descr.address,
  323. sizeof(gate_desc) * NR_VECTORS,
  324. __PAGE_KERNEL_RO);
  325. #ifdef CONFIG_TRACING
  326. kaiser_add_user_map_early(&trace_idt_descr,
  327. sizeof(trace_idt_descr),
  328. __PAGE_KERNEL);
  329. kaiser_add_user_map_early(&trace_idt_table,
  330. sizeof(gate_desc) * NR_VECTORS,
  331. __PAGE_KERNEL);
  332. #endif
  333. kaiser_add_user_map_early(&debug_idt_descr, sizeof(debug_idt_descr),
  334. __PAGE_KERNEL);
  335. kaiser_add_user_map_early(&debug_idt_table,
  336. sizeof(gate_desc) * NR_VECTORS,
  337. __PAGE_KERNEL);
  338. pr_info("enabled\n");
  339. }
  340. /* Add a mapping to the shadow mapping, and synchronize the mappings */
  341. int kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags)
  342. {
  343. if (!kaiser_enabled)
  344. return 0;
  345. return kaiser_add_user_map((const void *)addr, size, flags);
  346. }
  347. void kaiser_remove_mapping(unsigned long start, unsigned long size)
  348. {
  349. extern void unmap_pud_range_nofree(pgd_t *pgd,
  350. unsigned long start, unsigned long end);
  351. unsigned long end = start + size;
  352. unsigned long addr, next;
  353. pgd_t *pgd;
  354. if (!kaiser_enabled)
  355. return;
  356. pgd = native_get_shadow_pgd(pgd_offset_k(start));
  357. for (addr = start; addr < end; pgd++, addr = next) {
  358. next = pgd_addr_end(addr, end);
  359. unmap_pud_range_nofree(pgd, addr, next);
  360. }
  361. }
  362. /*
  363. * Page table pages are page-aligned. The lower half of the top
  364. * level is used for userspace and the top half for the kernel.
  365. * This returns true for user pages that need to get copied into
  366. * both the user and kernel copies of the page tables, and false
  367. * for kernel pages that should only be in the kernel copy.
  368. */
  369. static inline bool is_userspace_pgd(pgd_t *pgdp)
  370. {
  371. return ((unsigned long)pgdp % PAGE_SIZE) < (PAGE_SIZE / 2);
  372. }
  373. pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd)
  374. {
  375. if (!kaiser_enabled)
  376. return pgd;
  377. /*
  378. * Do we need to also populate the shadow pgd? Check _PAGE_USER to
  379. * skip cases like kexec and EFI which make temporary low mappings.
  380. */
  381. if (pgd.pgd & _PAGE_USER) {
  382. if (is_userspace_pgd(pgdp)) {
  383. native_get_shadow_pgd(pgdp)->pgd = pgd.pgd;
  384. /*
  385. * Even if the entry is *mapping* userspace, ensure
  386. * that userspace can not use it. This way, if we
  387. * get out to userspace running on the kernel CR3,
  388. * userspace will crash instead of running.
  389. */
  390. if (__supported_pte_mask & _PAGE_NX)
  391. pgd.pgd |= _PAGE_NX;
  392. }
  393. } else if (!pgd.pgd) {
  394. /*
  395. * pgd_clear() cannot check _PAGE_USER, and is even used to
  396. * clear corrupted pgd entries: so just rely on cases like
  397. * kexec and EFI never to be using pgd_clear().
  398. */
  399. if (!WARN_ON_ONCE((unsigned long)pgdp & PAGE_SIZE) &&
  400. is_userspace_pgd(pgdp))
  401. native_get_shadow_pgd(pgdp)->pgd = pgd.pgd;
  402. }
  403. return pgd;
  404. }
  405. void kaiser_setup_pcid(void)
  406. {
  407. unsigned long user_cr3 = KAISER_SHADOW_PGD_OFFSET;
  408. if (this_cpu_has(X86_FEATURE_PCID))
  409. user_cr3 |= X86_CR3_PCID_USER_NOFLUSH;
  410. /*
  411. * These variables are used by the entry/exit
  412. * code to change PCID and pgd and TLB flushing.
  413. */
  414. this_cpu_write(x86_cr3_pcid_user, user_cr3);
  415. }
  416. /*
  417. * Make a note that this cpu will need to flush USER tlb on return to user.
  418. * If cpu does not have PCID, then the NOFLUSH bit will never have been set.
  419. */
  420. void kaiser_flush_tlb_on_return_to_user(void)
  421. {
  422. if (this_cpu_has(X86_FEATURE_PCID))
  423. this_cpu_write(x86_cr3_pcid_user,
  424. X86_CR3_PCID_USER_FLUSH | KAISER_SHADOW_PGD_OFFSET);
  425. }
  426. EXPORT_SYMBOL(kaiser_flush_tlb_on_return_to_user);