traps.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4. *
  5. * Pentium III FXSR, SSE support
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. */
  8. /*
  9. * Handle hardware traps and faults.
  10. */
  11. #include <linux/interrupt.h>
  12. #include <linux/kallsyms.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/kprobes.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/kdebug.h>
  17. #include <linux/kgdb.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/string.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/kexec.h>
  25. #include <linux/sched.h>
  26. #include <linux/timer.h>
  27. #include <linux/init.h>
  28. #include <linux/bug.h>
  29. #include <linux/nmi.h>
  30. #include <linux/mm.h>
  31. #include <linux/smp.h>
  32. #include <linux/io.h>
  33. #ifdef CONFIG_EISA
  34. #include <linux/ioport.h>
  35. #include <linux/eisa.h>
  36. #endif
  37. #ifdef CONFIG_MCA
  38. #include <linux/mca.h>
  39. #endif
  40. #if defined(CONFIG_EDAC)
  41. #include <linux/edac.h>
  42. #endif
  43. #include <asm/kmemcheck.h>
  44. #include <asm/stacktrace.h>
  45. #include <asm/processor.h>
  46. #include <asm/debugreg.h>
  47. #include <linux/atomic.h>
  48. #include <asm/traps.h>
  49. #include <asm/desc.h>
  50. #include <asm/i387.h>
  51. #include <asm/fpu-internal.h>
  52. #include <asm/mce.h>
  53. #include <asm/mach_traps.h>
  54. #ifdef CONFIG_X86_64
  55. #include <asm/x86_init.h>
  56. #include <asm/pgalloc.h>
  57. #include <asm/proto.h>
  58. #else
  59. #include <asm/processor-flags.h>
  60. #include <asm/setup.h>
  61. asmlinkage int system_call(void);
  62. /* Do we ignore FPU interrupts ? */
  63. char ignore_fpu_irq;
  64. /*
  65. * The IDT has to be page-aligned to simplify the Pentium
  66. * F0 0F bug workaround.
  67. */
  68. gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
  69. #endif
  70. DECLARE_BITMAP(used_vectors, NR_VECTORS);
  71. EXPORT_SYMBOL_GPL(used_vectors);
  72. static inline void conditional_sti(struct pt_regs *regs)
  73. {
  74. if (regs->flags & X86_EFLAGS_IF)
  75. local_irq_enable();
  76. }
  77. static inline void preempt_conditional_sti(struct pt_regs *regs)
  78. {
  79. inc_preempt_count();
  80. if (regs->flags & X86_EFLAGS_IF)
  81. local_irq_enable();
  82. }
  83. static inline void conditional_cli(struct pt_regs *regs)
  84. {
  85. if (regs->flags & X86_EFLAGS_IF)
  86. local_irq_disable();
  87. }
  88. static inline void preempt_conditional_cli(struct pt_regs *regs)
  89. {
  90. if (regs->flags & X86_EFLAGS_IF)
  91. local_irq_disable();
  92. dec_preempt_count();
  93. }
  94. static void __kprobes
  95. do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
  96. long error_code, siginfo_t *info)
  97. {
  98. struct task_struct *tsk = current;
  99. #ifdef CONFIG_X86_32
  100. if (regs->flags & X86_VM_MASK) {
  101. /*
  102. * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
  103. * On nmi (interrupt 2), do_trap should not be called.
  104. */
  105. if (trapnr < X86_TRAP_UD)
  106. goto vm86_trap;
  107. goto trap_signal;
  108. }
  109. #endif
  110. if (!user_mode(regs))
  111. goto kernel_trap;
  112. #ifdef CONFIG_X86_32
  113. trap_signal:
  114. #endif
  115. /*
  116. * We want error_code and trap_nr set for userspace faults and
  117. * kernelspace faults which result in die(), but not
  118. * kernelspace faults which are fixed up. die() gives the
  119. * process no chance to handle the signal and notice the
  120. * kernel fault information, so that won't result in polluting
  121. * the information about previously queued, but not yet
  122. * delivered, faults. See also do_general_protection below.
  123. */
  124. tsk->thread.error_code = error_code;
  125. tsk->thread.trap_nr = trapnr;
  126. #ifdef CONFIG_X86_64
  127. if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
  128. printk_ratelimit()) {
  129. printk(KERN_INFO
  130. "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
  131. tsk->comm, tsk->pid, str,
  132. regs->ip, regs->sp, error_code);
  133. print_vma_addr(" in ", regs->ip);
  134. printk("\n");
  135. }
  136. #endif
  137. if (info)
  138. force_sig_info(signr, info, tsk);
  139. else
  140. force_sig(signr, tsk);
  141. return;
  142. kernel_trap:
  143. if (!fixup_exception(regs)) {
  144. tsk->thread.error_code = error_code;
  145. tsk->thread.trap_nr = trapnr;
  146. die(str, regs, error_code);
  147. }
  148. return;
  149. #ifdef CONFIG_X86_32
  150. vm86_trap:
  151. if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
  152. error_code, trapnr))
  153. goto trap_signal;
  154. return;
  155. #endif
  156. }
  157. #define DO_ERROR(trapnr, signr, str, name) \
  158. dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
  159. { \
  160. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  161. == NOTIFY_STOP) \
  162. return; \
  163. conditional_sti(regs); \
  164. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  165. }
  166. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  167. dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
  168. { \
  169. siginfo_t info; \
  170. info.si_signo = signr; \
  171. info.si_errno = 0; \
  172. info.si_code = sicode; \
  173. info.si_addr = (void __user *)siaddr; \
  174. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  175. == NOTIFY_STOP) \
  176. return; \
  177. conditional_sti(regs); \
  178. do_trap(trapnr, signr, str, regs, error_code, &info); \
  179. }
  180. DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
  181. regs->ip)
  182. DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
  183. DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
  184. DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
  185. regs->ip)
  186. DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
  187. coprocessor_segment_overrun)
  188. DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
  189. DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
  190. DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
  191. DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
  192. BUS_ADRALN, 0)
  193. #ifdef CONFIG_X86_64
  194. /* Runs on IST stack */
  195. dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
  196. {
  197. static const char str[] = "double fault";
  198. struct task_struct *tsk = current;
  199. #ifdef CONFIG_X86_ESPFIX64
  200. extern unsigned char native_irq_return_iret[];
  201. /*
  202. * If IRET takes a non-IST fault on the espfix64 stack, then we
  203. * end up promoting it to a doublefault. In that case, modify
  204. * the stack to make it look like we just entered the #GP
  205. * handler from user space, similar to bad_iret.
  206. */
  207. if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
  208. regs->cs == __KERNEL_CS &&
  209. regs->ip == (unsigned long)native_irq_return_iret)
  210. {
  211. struct pt_regs *normal_regs = task_pt_regs(current);
  212. /* Fake a #GP(0) from userspace. */
  213. memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
  214. normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
  215. regs->ip = (unsigned long)general_protection;
  216. regs->sp = (unsigned long)&normal_regs->orig_ax;
  217. return;
  218. }
  219. #endif
  220. /* Return not checked because double check cannot be ignored */
  221. notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
  222. tsk->thread.error_code = error_code;
  223. tsk->thread.trap_nr = X86_TRAP_DF;
  224. /*
  225. * This is always a kernel trap and never fixable (and thus must
  226. * never return).
  227. */
  228. for (;;)
  229. die(str, regs, error_code);
  230. }
  231. #endif
  232. dotraplinkage void __kprobes
  233. do_general_protection(struct pt_regs *regs, long error_code)
  234. {
  235. struct task_struct *tsk;
  236. conditional_sti(regs);
  237. #ifdef CONFIG_X86_32
  238. if (regs->flags & X86_VM_MASK)
  239. goto gp_in_vm86;
  240. #endif
  241. tsk = current;
  242. if (!user_mode(regs))
  243. goto gp_in_kernel;
  244. tsk->thread.error_code = error_code;
  245. tsk->thread.trap_nr = X86_TRAP_GP;
  246. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  247. printk_ratelimit()) {
  248. printk(KERN_INFO
  249. "%s[%d] general protection ip:%lx sp:%lx error:%lx",
  250. tsk->comm, task_pid_nr(tsk),
  251. regs->ip, regs->sp, error_code);
  252. print_vma_addr(" in ", regs->ip);
  253. printk("\n");
  254. }
  255. force_sig(SIGSEGV, tsk);
  256. return;
  257. #ifdef CONFIG_X86_32
  258. gp_in_vm86:
  259. local_irq_enable();
  260. handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
  261. return;
  262. #endif
  263. gp_in_kernel:
  264. if (fixup_exception(regs))
  265. return;
  266. tsk->thread.error_code = error_code;
  267. tsk->thread.trap_nr = X86_TRAP_GP;
  268. if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
  269. X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
  270. return;
  271. die("general protection fault", regs, error_code);
  272. }
  273. /* May run on IST stack. */
  274. dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
  275. {
  276. #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
  277. if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
  278. SIGTRAP) == NOTIFY_STOP)
  279. return;
  280. #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
  281. if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
  282. SIGTRAP) == NOTIFY_STOP)
  283. return;
  284. /*
  285. * Let others (NMI) know that the debug stack is in use
  286. * as we may switch to the interrupt stack.
  287. */
  288. debug_stack_usage_inc();
  289. preempt_conditional_sti(regs);
  290. do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
  291. preempt_conditional_cli(regs);
  292. debug_stack_usage_dec();
  293. }
  294. #ifdef CONFIG_X86_64
  295. /*
  296. * Help handler running on IST stack to switch back to user stack
  297. * for scheduling or signal handling. The actual stack switch is done in
  298. * entry.S
  299. */
  300. asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
  301. {
  302. struct pt_regs *regs = eregs;
  303. /* Did already sync */
  304. if (eregs == (struct pt_regs *)eregs->sp)
  305. ;
  306. /* Exception from user space */
  307. else if (user_mode(eregs))
  308. regs = task_pt_regs(current);
  309. /*
  310. * Exception from kernel and interrupts are enabled. Move to
  311. * kernel process stack.
  312. */
  313. else if (eregs->flags & X86_EFLAGS_IF)
  314. regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
  315. if (eregs != regs)
  316. *regs = *eregs;
  317. return regs;
  318. }
  319. struct bad_iret_stack {
  320. void *error_entry_ret;
  321. struct pt_regs regs;
  322. };
  323. asmlinkage notrace __kprobes
  324. struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
  325. {
  326. /*
  327. * This is called from entry_64.S early in handling a fault
  328. * caused by a bad iret to user mode. To handle the fault
  329. * correctly, we want move our stack frame to task_pt_regs
  330. * and we want to pretend that the exception came from the
  331. * iret target.
  332. */
  333. struct bad_iret_stack *new_stack =
  334. container_of(task_pt_regs(current),
  335. struct bad_iret_stack, regs);
  336. /* Copy the IRET target to the new stack. */
  337. memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
  338. /* Copy the remainder of the stack from the current stack. */
  339. memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
  340. BUG_ON(!user_mode_vm(&new_stack->regs));
  341. return new_stack;
  342. }
  343. #endif
  344. /*
  345. * Our handling of the processor debug registers is non-trivial.
  346. * We do not clear them on entry and exit from the kernel. Therefore
  347. * it is possible to get a watchpoint trap here from inside the kernel.
  348. * However, the code in ./ptrace.c has ensured that the user can
  349. * only set watchpoints on userspace addresses. Therefore the in-kernel
  350. * watchpoint trap can only occur in code which is reading/writing
  351. * from user space. Such code must not hold kernel locks (since it
  352. * can equally take a page fault), therefore it is safe to call
  353. * force_sig_info even though that claims and releases locks.
  354. *
  355. * Code in ./signal.c ensures that the debug control register
  356. * is restored before we deliver any signal, and therefore that
  357. * user code runs with the correct debug control register even though
  358. * we clear it here.
  359. *
  360. * Being careful here means that we don't have to be as careful in a
  361. * lot of more complicated places (task switching can be a bit lazy
  362. * about restoring all the debug state, and ptrace doesn't have to
  363. * find every occurrence of the TF bit that could be saved away even
  364. * by user code)
  365. *
  366. * May run on IST stack.
  367. */
  368. dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
  369. {
  370. struct task_struct *tsk = current;
  371. int user_icebp = 0;
  372. unsigned long dr6;
  373. int si_code;
  374. get_debugreg(dr6, 6);
  375. /* Filter out all the reserved bits which are preset to 1 */
  376. dr6 &= ~DR6_RESERVED;
  377. /*
  378. * If dr6 has no reason to give us about the origin of this trap,
  379. * then it's very likely the result of an icebp/int01 trap.
  380. * User wants a sigtrap for that.
  381. */
  382. if (!dr6 && user_mode_vm(regs))
  383. user_icebp = 1;
  384. /* Catch kmemcheck conditions first of all! */
  385. if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
  386. return;
  387. /* DR6 may or may not be cleared by the CPU */
  388. set_debugreg(0, 6);
  389. /*
  390. * The processor cleared BTF, so don't mark that we need it set.
  391. */
  392. clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
  393. /* Store the virtualized DR6 value */
  394. tsk->thread.debugreg6 = dr6;
  395. if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
  396. SIGTRAP) == NOTIFY_STOP)
  397. return;
  398. /*
  399. * Let others (NMI) know that the debug stack is in use
  400. * as we may switch to the interrupt stack.
  401. */
  402. debug_stack_usage_inc();
  403. /* It's safe to allow irq's after DR6 has been saved */
  404. preempt_conditional_sti(regs);
  405. if (regs->flags & X86_VM_MASK) {
  406. handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
  407. X86_TRAP_DB);
  408. preempt_conditional_cli(regs);
  409. debug_stack_usage_dec();
  410. return;
  411. }
  412. /*
  413. * Single-stepping through system calls: ignore any exceptions in
  414. * kernel space, but re-enable TF when returning to user mode.
  415. *
  416. * We already checked v86 mode above, so we can check for kernel mode
  417. * by just checking the CPL of CS.
  418. */
  419. if ((dr6 & DR_STEP) && !user_mode(regs)) {
  420. tsk->thread.debugreg6 &= ~DR_STEP;
  421. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  422. regs->flags &= ~X86_EFLAGS_TF;
  423. }
  424. si_code = get_si_code(tsk->thread.debugreg6);
  425. if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
  426. send_sigtrap(tsk, regs, error_code, si_code);
  427. preempt_conditional_cli(regs);
  428. debug_stack_usage_dec();
  429. return;
  430. }
  431. /*
  432. * Note that we play around with the 'TS' bit in an attempt to get
  433. * the correct behaviour even in the presence of the asynchronous
  434. * IRQ13 behaviour
  435. */
  436. void math_error(struct pt_regs *regs, int error_code, int trapnr)
  437. {
  438. struct task_struct *task = current;
  439. siginfo_t info;
  440. unsigned short err;
  441. char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
  442. "simd exception";
  443. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
  444. return;
  445. conditional_sti(regs);
  446. if (!user_mode_vm(regs))
  447. {
  448. if (!fixup_exception(regs)) {
  449. task->thread.error_code = error_code;
  450. task->thread.trap_nr = trapnr;
  451. die(str, regs, error_code);
  452. }
  453. return;
  454. }
  455. /*
  456. * Save the info for the exception handler and clear the error.
  457. */
  458. save_init_fpu(task);
  459. task->thread.trap_nr = trapnr;
  460. task->thread.error_code = error_code;
  461. info.si_signo = SIGFPE;
  462. info.si_errno = 0;
  463. info.si_addr = (void __user *)regs->ip;
  464. if (trapnr == X86_TRAP_MF) {
  465. unsigned short cwd, swd;
  466. /*
  467. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  468. * status. 0x3f is the exception bits in these regs, 0x200 is the
  469. * C1 reg you need in case of a stack fault, 0x040 is the stack
  470. * fault bit. We should only be taking one exception at a time,
  471. * so if this combination doesn't produce any single exception,
  472. * then we have a bad program that isn't synchronizing its FPU usage
  473. * and it will suffer the consequences since we won't be able to
  474. * fully reproduce the context of the exception
  475. */
  476. cwd = get_fpu_cwd(task);
  477. swd = get_fpu_swd(task);
  478. err = swd & ~cwd;
  479. } else {
  480. /*
  481. * The SIMD FPU exceptions are handled a little differently, as there
  482. * is only a single status/control register. Thus, to determine which
  483. * unmasked exception was caught we must mask the exception mask bits
  484. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  485. */
  486. unsigned short mxcsr = get_fpu_mxcsr(task);
  487. err = ~(mxcsr >> 7) & mxcsr;
  488. }
  489. if (err & 0x001) { /* Invalid op */
  490. /*
  491. * swd & 0x240 == 0x040: Stack Underflow
  492. * swd & 0x240 == 0x240: Stack Overflow
  493. * User must clear the SF bit (0x40) if set
  494. */
  495. info.si_code = FPE_FLTINV;
  496. } else if (err & 0x004) { /* Divide by Zero */
  497. info.si_code = FPE_FLTDIV;
  498. } else if (err & 0x008) { /* Overflow */
  499. info.si_code = FPE_FLTOVF;
  500. } else if (err & 0x012) { /* Denormal, Underflow */
  501. info.si_code = FPE_FLTUND;
  502. } else if (err & 0x020) { /* Precision */
  503. info.si_code = FPE_FLTRES;
  504. } else {
  505. /*
  506. * If we're using IRQ 13, or supposedly even some trap
  507. * X86_TRAP_MF implementations, it's possible
  508. * we get a spurious trap, which is not an error.
  509. */
  510. return;
  511. }
  512. force_sig_info(SIGFPE, &info, task);
  513. }
  514. dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
  515. {
  516. #ifdef CONFIG_X86_32
  517. ignore_fpu_irq = 1;
  518. #endif
  519. math_error(regs, error_code, X86_TRAP_MF);
  520. }
  521. dotraplinkage void
  522. do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
  523. {
  524. math_error(regs, error_code, X86_TRAP_XF);
  525. }
  526. dotraplinkage void
  527. do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
  528. {
  529. conditional_sti(regs);
  530. #if 0
  531. /* No need to warn about this any longer. */
  532. printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
  533. #endif
  534. }
  535. asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
  536. {
  537. }
  538. asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
  539. {
  540. }
  541. /*
  542. * 'math_state_restore()' saves the current math information in the
  543. * old math state array, and gets the new ones from the current task
  544. *
  545. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  546. * Don't touch unless you *really* know how it works.
  547. *
  548. * Must be called with kernel preemption disabled (eg with local
  549. * local interrupts as in the case of do_device_not_available).
  550. */
  551. void math_state_restore(void)
  552. {
  553. struct task_struct *tsk = current;
  554. if (!tsk_used_math(tsk)) {
  555. local_irq_enable();
  556. /*
  557. * does a slab alloc which can sleep
  558. */
  559. if (init_fpu(tsk)) {
  560. /*
  561. * ran out of memory!
  562. */
  563. do_group_exit(SIGKILL);
  564. return;
  565. }
  566. local_irq_disable();
  567. }
  568. __thread_fpu_begin(tsk);
  569. /*
  570. * Paranoid restore. send a SIGSEGV if we fail to restore the state.
  571. */
  572. if (unlikely(restore_fpu_checking(tsk))) {
  573. __thread_fpu_end(tsk);
  574. force_sig(SIGSEGV, tsk);
  575. return;
  576. }
  577. tsk->fpu_counter++;
  578. }
  579. EXPORT_SYMBOL_GPL(math_state_restore);
  580. dotraplinkage void __kprobes
  581. do_device_not_available(struct pt_regs *regs, long error_code)
  582. {
  583. #ifdef CONFIG_MATH_EMULATION
  584. if (read_cr0() & X86_CR0_EM) {
  585. struct math_emu_info info = { };
  586. conditional_sti(regs);
  587. info.regs = regs;
  588. math_emulate(&info);
  589. return;
  590. }
  591. #endif
  592. math_state_restore(); /* interrupts still off */
  593. #ifdef CONFIG_X86_32
  594. conditional_sti(regs);
  595. #endif
  596. }
  597. #ifdef CONFIG_X86_32
  598. dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
  599. {
  600. siginfo_t info;
  601. local_irq_enable();
  602. info.si_signo = SIGILL;
  603. info.si_errno = 0;
  604. info.si_code = ILL_BADSTK;
  605. info.si_addr = NULL;
  606. if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
  607. X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
  608. return;
  609. do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
  610. &info);
  611. }
  612. #endif
  613. /* Set of traps needed for early debugging. */
  614. void __init early_trap_init(void)
  615. {
  616. set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
  617. /* int3 can be called from all */
  618. set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
  619. set_intr_gate(X86_TRAP_PF, &page_fault);
  620. load_idt(&idt_descr);
  621. }
  622. void __init trap_init(void)
  623. {
  624. int i;
  625. #ifdef CONFIG_EISA
  626. void __iomem *p = early_ioremap(0x0FFFD9, 4);
  627. if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
  628. EISA_bus = 1;
  629. early_iounmap(p, 4);
  630. #endif
  631. set_intr_gate(X86_TRAP_DE, &divide_error);
  632. set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
  633. /* int4 can be called from all */
  634. set_system_intr_gate(X86_TRAP_OF, &overflow);
  635. set_intr_gate(X86_TRAP_BR, &bounds);
  636. set_intr_gate(X86_TRAP_UD, &invalid_op);
  637. set_intr_gate(X86_TRAP_NM, &device_not_available);
  638. #ifdef CONFIG_X86_32
  639. set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
  640. #else
  641. set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
  642. #endif
  643. set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
  644. set_intr_gate(X86_TRAP_TS, &invalid_TSS);
  645. set_intr_gate(X86_TRAP_NP, &segment_not_present);
  646. set_intr_gate(X86_TRAP_SS, stack_segment);
  647. set_intr_gate(X86_TRAP_GP, &general_protection);
  648. set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
  649. set_intr_gate(X86_TRAP_MF, &coprocessor_error);
  650. set_intr_gate(X86_TRAP_AC, &alignment_check);
  651. #ifdef CONFIG_X86_MCE
  652. set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
  653. #endif
  654. set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
  655. /* Reserve all the builtin and the syscall vector: */
  656. for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
  657. set_bit(i, used_vectors);
  658. #ifdef CONFIG_IA32_EMULATION
  659. set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
  660. set_bit(IA32_SYSCALL_VECTOR, used_vectors);
  661. #endif
  662. #ifdef CONFIG_X86_32
  663. set_system_trap_gate(SYSCALL_VECTOR, &system_call);
  664. set_bit(SYSCALL_VECTOR, used_vectors);
  665. #endif
  666. /*
  667. * Should be a barrier for any external CPU state:
  668. */
  669. cpu_init();
  670. x86_init.irqs.trap_init();
  671. #ifdef CONFIG_X86_64
  672. memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
  673. set_nmi_gate(X86_TRAP_DB, &debug);
  674. set_nmi_gate(X86_TRAP_BP, &int3);
  675. #endif
  676. }