common.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /*
  2. * common.c - C code for kernel entry and exit
  3. * Copyright (c) 2015 Andrew Lutomirski
  4. * GPL v2
  5. *
  6. * Based on asm and ptrace code by many authors. The code here originated
  7. * in ptrace.c and signal.c.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/errno.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/tracehook.h>
  16. #include <linux/audit.h>
  17. #include <linux/seccomp.h>
  18. #include <linux/signal.h>
  19. #include <linux/export.h>
  20. #include <linux/context_tracking.h>
  21. #include <linux/user-return-notifier.h>
  22. #include <linux/nospec.h>
  23. #include <linux/uprobes.h>
  24. #include <asm/desc.h>
  25. #include <asm/traps.h>
  26. #include <asm/vdso.h>
  27. #include <asm/uaccess.h>
  28. #include <asm/cpufeature.h>
  29. #define CREATE_TRACE_POINTS
  30. #include <trace/events/syscalls.h>
  31. #ifdef CONFIG_CONTEXT_TRACKING
  32. /* Called on entry from user mode with IRQs off. */
  33. __visible inline void enter_from_user_mode(void)
  34. {
  35. CT_WARN_ON(ct_state() != CONTEXT_USER);
  36. user_exit_irqoff();
  37. }
  38. #else
  39. static inline void enter_from_user_mode(void) {}
  40. #endif
  41. static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
  42. {
  43. #ifdef CONFIG_X86_64
  44. if (arch == AUDIT_ARCH_X86_64) {
  45. audit_syscall_entry(regs->orig_ax, regs->di,
  46. regs->si, regs->dx, regs->r10);
  47. } else
  48. #endif
  49. {
  50. audit_syscall_entry(regs->orig_ax, regs->bx,
  51. regs->cx, regs->dx, regs->si);
  52. }
  53. }
  54. /*
  55. * Returns the syscall nr to run (which should match regs->orig_ax) or -1
  56. * to skip the syscall.
  57. */
  58. static long syscall_trace_enter(struct pt_regs *regs)
  59. {
  60. u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
  61. struct thread_info *ti = current_thread_info();
  62. unsigned long ret = 0;
  63. bool emulated = false;
  64. u32 work;
  65. if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
  66. BUG_ON(regs != task_pt_regs(current));
  67. work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
  68. if (unlikely(work & _TIF_SYSCALL_EMU))
  69. emulated = true;
  70. if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
  71. tracehook_report_syscall_entry(regs))
  72. return -1L;
  73. if (emulated)
  74. return -1L;
  75. #ifdef CONFIG_SECCOMP
  76. /*
  77. * Do seccomp after ptrace, to catch any tracer changes.
  78. */
  79. if (work & _TIF_SECCOMP) {
  80. struct seccomp_data sd;
  81. sd.arch = arch;
  82. sd.nr = regs->orig_ax;
  83. sd.instruction_pointer = regs->ip;
  84. #ifdef CONFIG_X86_64
  85. if (arch == AUDIT_ARCH_X86_64) {
  86. sd.args[0] = regs->di;
  87. sd.args[1] = regs->si;
  88. sd.args[2] = regs->dx;
  89. sd.args[3] = regs->r10;
  90. sd.args[4] = regs->r8;
  91. sd.args[5] = regs->r9;
  92. } else
  93. #endif
  94. {
  95. sd.args[0] = regs->bx;
  96. sd.args[1] = regs->cx;
  97. sd.args[2] = regs->dx;
  98. sd.args[3] = regs->si;
  99. sd.args[4] = regs->di;
  100. sd.args[5] = regs->bp;
  101. }
  102. ret = __secure_computing(&sd);
  103. if (ret == -1)
  104. return ret;
  105. }
  106. #endif
  107. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  108. trace_sys_enter(regs, regs->orig_ax);
  109. do_audit_syscall_entry(regs, arch);
  110. return ret ?: regs->orig_ax;
  111. }
  112. #define EXIT_TO_USERMODE_LOOP_FLAGS \
  113. (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
  114. _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
  115. static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
  116. {
  117. /*
  118. * In order to return to user mode, we need to have IRQs off with
  119. * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
  120. * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
  121. * can be set at any time on preemptable kernels if we have IRQs on,
  122. * so we need to loop. Disabling preemption wouldn't help: doing the
  123. * work to clear some of the flags can sleep.
  124. */
  125. while (true) {
  126. /* We have work to do. */
  127. local_irq_enable();
  128. if (cached_flags & _TIF_NEED_RESCHED)
  129. schedule();
  130. if (cached_flags & _TIF_UPROBE)
  131. uprobe_notify_resume(regs);
  132. /* deal with pending signal delivery */
  133. if (cached_flags & _TIF_SIGPENDING)
  134. do_signal(regs);
  135. if (cached_flags & _TIF_NOTIFY_RESUME) {
  136. clear_thread_flag(TIF_NOTIFY_RESUME);
  137. tracehook_notify_resume(regs);
  138. }
  139. if (cached_flags & _TIF_USER_RETURN_NOTIFY)
  140. fire_user_return_notifiers();
  141. /* Disable IRQs and retry */
  142. local_irq_disable();
  143. cached_flags = READ_ONCE(current_thread_info()->flags);
  144. if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
  145. break;
  146. }
  147. }
  148. /* Called with IRQs disabled. */
  149. __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
  150. {
  151. struct thread_info *ti = current_thread_info();
  152. u32 cached_flags;
  153. if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
  154. local_irq_disable();
  155. lockdep_sys_exit();
  156. cached_flags = READ_ONCE(ti->flags);
  157. if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
  158. exit_to_usermode_loop(regs, cached_flags);
  159. #ifdef CONFIG_COMPAT
  160. /*
  161. * Compat syscalls set TS_COMPAT. Make sure we clear it before
  162. * returning to user mode. We need to clear it *after* signal
  163. * handling, because syscall restart has a fixup for compat
  164. * syscalls. The fixup is exercised by the ptrace_syscall_32
  165. * selftest.
  166. *
  167. * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
  168. * special case only applies after poking regs and before the
  169. * very next return to user mode.
  170. */
  171. ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
  172. #endif
  173. user_enter_irqoff();
  174. }
  175. #define SYSCALL_EXIT_WORK_FLAGS \
  176. (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
  177. _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
  178. static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
  179. {
  180. bool step;
  181. audit_syscall_exit(regs);
  182. if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
  183. trace_sys_exit(regs, regs->ax);
  184. /*
  185. * If TIF_SYSCALL_EMU is set, we only get here because of
  186. * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
  187. * We already reported this syscall instruction in
  188. * syscall_trace_enter().
  189. */
  190. step = unlikely(
  191. (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
  192. == _TIF_SINGLESTEP);
  193. if (step || cached_flags & _TIF_SYSCALL_TRACE)
  194. tracehook_report_syscall_exit(regs, step);
  195. }
  196. /*
  197. * Called with IRQs on and fully valid regs. Returns with IRQs off in a
  198. * state such that we can immediately switch to user mode.
  199. */
  200. __visible inline void syscall_return_slowpath(struct pt_regs *regs)
  201. {
  202. struct thread_info *ti = current_thread_info();
  203. u32 cached_flags = READ_ONCE(ti->flags);
  204. CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
  205. if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
  206. WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
  207. local_irq_enable();
  208. /*
  209. * First do one-time work. If these work items are enabled, we
  210. * want to run them exactly once per syscall exit with IRQs on.
  211. */
  212. if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
  213. syscall_slow_exit_work(regs, cached_flags);
  214. local_irq_disable();
  215. prepare_exit_to_usermode(regs);
  216. }
  217. #ifdef CONFIG_X86_64
  218. __visible void do_syscall_64(struct pt_regs *regs)
  219. {
  220. struct thread_info *ti = current_thread_info();
  221. unsigned long nr = regs->orig_ax;
  222. enter_from_user_mode();
  223. local_irq_enable();
  224. if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
  225. nr = syscall_trace_enter(regs);
  226. /*
  227. * NB: Native and x32 syscalls are dispatched from the same
  228. * table. The only functional difference is the x32 bit in
  229. * regs->orig_ax, which changes the behavior of some syscalls.
  230. */
  231. if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
  232. nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls);
  233. regs->ax = sys_call_table[nr](
  234. regs->di, regs->si, regs->dx,
  235. regs->r10, regs->r8, regs->r9);
  236. }
  237. syscall_return_slowpath(regs);
  238. }
  239. #endif
  240. #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
  241. /*
  242. * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
  243. * all entry and exit work and returns with IRQs off. This function is
  244. * extremely hot in workloads that use it, and it's usually called from
  245. * do_fast_syscall_32, so forcibly inline it to improve performance.
  246. */
  247. static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
  248. {
  249. struct thread_info *ti = current_thread_info();
  250. unsigned int nr = (unsigned int)regs->orig_ax;
  251. #ifdef CONFIG_IA32_EMULATION
  252. ti->status |= TS_COMPAT;
  253. #endif
  254. if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
  255. /*
  256. * Subtlety here: if ptrace pokes something larger than
  257. * 2^32-1 into orig_ax, this truncates it. This may or
  258. * may not be necessary, but it matches the old asm
  259. * behavior.
  260. */
  261. nr = syscall_trace_enter(regs);
  262. }
  263. if (likely(nr < IA32_NR_syscalls)) {
  264. nr = array_index_nospec(nr, IA32_NR_syscalls);
  265. /*
  266. * It's possible that a 32-bit syscall implementation
  267. * takes a 64-bit parameter but nonetheless assumes that
  268. * the high bits are zero. Make sure we zero-extend all
  269. * of the args.
  270. */
  271. regs->ax = ia32_sys_call_table[nr](
  272. (unsigned int)regs->bx, (unsigned int)regs->cx,
  273. (unsigned int)regs->dx, (unsigned int)regs->si,
  274. (unsigned int)regs->di, (unsigned int)regs->bp);
  275. }
  276. syscall_return_slowpath(regs);
  277. }
  278. /* Handles int $0x80 */
  279. __visible void do_int80_syscall_32(struct pt_regs *regs)
  280. {
  281. enter_from_user_mode();
  282. local_irq_enable();
  283. do_syscall_32_irqs_on(regs);
  284. }
  285. /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
  286. __visible long do_fast_syscall_32(struct pt_regs *regs)
  287. {
  288. /*
  289. * Called using the internal vDSO SYSENTER/SYSCALL32 calling
  290. * convention. Adjust regs so it looks like we entered using int80.
  291. */
  292. unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
  293. vdso_image_32.sym_int80_landing_pad;
  294. /*
  295. * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
  296. * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
  297. * Fix it up.
  298. */
  299. regs->ip = landing_pad;
  300. enter_from_user_mode();
  301. local_irq_enable();
  302. /* Fetch EBP from where the vDSO stashed it. */
  303. if (
  304. #ifdef CONFIG_X86_64
  305. /*
  306. * Micro-optimization: the pointer we're following is explicitly
  307. * 32 bits, so it can't be out of range.
  308. */
  309. __get_user(*(u32 *)&regs->bp,
  310. (u32 __user __force *)(unsigned long)(u32)regs->sp)
  311. #else
  312. get_user(*(u32 *)&regs->bp,
  313. (u32 __user __force *)(unsigned long)(u32)regs->sp)
  314. #endif
  315. ) {
  316. /* User code screwed up. */
  317. local_irq_disable();
  318. regs->ax = -EFAULT;
  319. prepare_exit_to_usermode(regs);
  320. return 0; /* Keep it simple: use IRET. */
  321. }
  322. /* Now this is just like a normal syscall. */
  323. do_syscall_32_irqs_on(regs);
  324. #ifdef CONFIG_X86_64
  325. /*
  326. * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
  327. * SYSRETL is available on all 64-bit CPUs, so we don't need to
  328. * bother with SYSEXIT.
  329. *
  330. * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
  331. * because the ECX fixup above will ensure that this is essentially
  332. * never the case.
  333. */
  334. return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
  335. regs->ip == landing_pad &&
  336. (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
  337. #else
  338. /*
  339. * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
  340. *
  341. * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
  342. * because the ECX fixup above will ensure that this is essentially
  343. * never the case.
  344. *
  345. * We don't allow syscalls at all from VM86 mode, but we still
  346. * need to check VM, because we might be returning from sys_vm86.
  347. */
  348. return static_cpu_has(X86_FEATURE_SEP) &&
  349. regs->cs == __USER_CS && regs->ss == __USER_DS &&
  350. regs->ip == landing_pad &&
  351. (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
  352. #endif
  353. }
  354. #endif