traps.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * arch/s390/kernel/traps.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  8. *
  9. * Derived from "arch/i386/kernel/traps.c"
  10. * Copyright (C) 1991, 1992 Linus Torvalds
  11. */
  12. /*
  13. * 'Traps.c' handles hardware traps and faults after we have saved some
  14. * state in 'asm.s'.
  15. */
  16. #include <linux/sched.h>
  17. #include <linux/kernel.h>
  18. #include <linux/string.h>
  19. #include <linux/errno.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/timer.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/delay.h>
  28. #include <linux/module.h>
  29. #include <linux/kdebug.h>
  30. #include <linux/kallsyms.h>
  31. #include <linux/reboot.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/bug.h>
  34. #include <linux/utsname.h>
  35. #include <asm/uaccess.h>
  36. #include <asm/io.h>
  37. #include <linux/atomic.h>
  38. #include <asm/mathemu.h>
  39. #include <asm/cpcmd.h>
  40. #include <asm/lowcore.h>
  41. #include <asm/debug.h>
  42. #include <asm/ipl.h>
  43. #include "entry.h"
  44. void (*pgm_check_table[128])(struct pt_regs *regs);
  45. int show_unhandled_signals = 1;
  46. #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
  47. #ifndef CONFIG_64BIT
  48. #define LONG "%08lx "
  49. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  50. static int kstack_depth_to_print = 12;
  51. #else /* CONFIG_64BIT */
  52. #define LONG "%016lx "
  53. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  54. static int kstack_depth_to_print = 20;
  55. #endif /* CONFIG_64BIT */
  56. /*
  57. * For show_trace we have tree different stack to consider:
  58. * - the panic stack which is used if the kernel stack has overflown
  59. * - the asynchronous interrupt stack (cpu related)
  60. * - the synchronous kernel stack (process related)
  61. * The stack trace can start at any of the three stack and can potentially
  62. * touch all of them. The order is: panic stack, async stack, sync stack.
  63. */
  64. static unsigned long
  65. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  66. {
  67. struct stack_frame *sf;
  68. struct pt_regs *regs;
  69. while (1) {
  70. sp = sp & PSW_ADDR_INSN;
  71. if (sp < low || sp > high - sizeof(*sf))
  72. return sp;
  73. sf = (struct stack_frame *) sp;
  74. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  75. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  76. /* Follow the backchain. */
  77. while (1) {
  78. low = sp;
  79. sp = sf->back_chain & PSW_ADDR_INSN;
  80. if (!sp)
  81. break;
  82. if (sp <= low || sp > high - sizeof(*sf))
  83. return sp;
  84. sf = (struct stack_frame *) sp;
  85. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  86. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  87. }
  88. /* Zero backchain detected, check for interrupt frame. */
  89. sp = (unsigned long) (sf + 1);
  90. if (sp <= low || sp > high - sizeof(*regs))
  91. return sp;
  92. regs = (struct pt_regs *) sp;
  93. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  94. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  95. low = sp;
  96. sp = regs->gprs[15];
  97. }
  98. }
  99. static void show_trace(struct task_struct *task, unsigned long *stack)
  100. {
  101. register unsigned long __r15 asm ("15");
  102. unsigned long sp;
  103. sp = (unsigned long) stack;
  104. if (!sp)
  105. sp = task ? task->thread.ksp : __r15;
  106. printk("Call Trace:\n");
  107. #ifdef CONFIG_CHECK_STACK
  108. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  109. S390_lowcore.panic_stack);
  110. #endif
  111. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  112. S390_lowcore.async_stack);
  113. if (task)
  114. __show_trace(sp, (unsigned long) task_stack_page(task),
  115. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  116. else
  117. __show_trace(sp, S390_lowcore.thread_info,
  118. S390_lowcore.thread_info + THREAD_SIZE);
  119. if (!task)
  120. task = current;
  121. debug_show_held_locks(task);
  122. }
  123. void show_stack(struct task_struct *task, unsigned long *sp)
  124. {
  125. register unsigned long * __r15 asm ("15");
  126. unsigned long *stack;
  127. int i;
  128. if (!sp)
  129. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  130. else
  131. stack = sp;
  132. for (i = 0; i < kstack_depth_to_print; i++) {
  133. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  134. break;
  135. if ((i * sizeof(long) % 32) == 0)
  136. printk("%s ", i == 0 ? "" : "\n");
  137. printk(LONG, *stack++);
  138. }
  139. printk("\n");
  140. show_trace(task, sp);
  141. }
  142. static void show_last_breaking_event(struct pt_regs *regs)
  143. {
  144. #ifdef CONFIG_64BIT
  145. printk("Last Breaking-Event-Address:\n");
  146. printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
  147. print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
  148. #endif
  149. }
  150. /*
  151. * The architecture-independent dump_stack generator
  152. */
  153. void dump_stack(void)
  154. {
  155. printk("CPU: %d %s %s %.*s\n",
  156. task_thread_info(current)->cpu, print_tainted(),
  157. init_utsname()->release,
  158. (int)strcspn(init_utsname()->version, " "),
  159. init_utsname()->version);
  160. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  161. current->comm, current->pid, current,
  162. (void *) current->thread.ksp);
  163. show_stack(NULL, NULL);
  164. }
  165. EXPORT_SYMBOL(dump_stack);
  166. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  167. {
  168. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  169. }
  170. void show_registers(struct pt_regs *regs)
  171. {
  172. char *mode;
  173. mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
  174. printk("%s PSW : %p %p",
  175. mode, (void *) regs->psw.mask,
  176. (void *) regs->psw.addr);
  177. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  178. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  179. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  180. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  181. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  182. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  183. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  184. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  185. #ifdef CONFIG_64BIT
  186. printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
  187. #endif
  188. printk("\n%s GPRS: " FOURLONG, mode,
  189. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  190. printk(" " FOURLONG,
  191. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  192. printk(" " FOURLONG,
  193. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  194. printk(" " FOURLONG,
  195. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  196. show_code(regs);
  197. }
  198. void show_regs(struct pt_regs *regs)
  199. {
  200. print_modules();
  201. printk("CPU: %d %s %s %.*s\n",
  202. task_thread_info(current)->cpu, print_tainted(),
  203. init_utsname()->release,
  204. (int)strcspn(init_utsname()->version, " "),
  205. init_utsname()->version);
  206. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  207. current->comm, current->pid, current,
  208. (void *) current->thread.ksp);
  209. show_registers(regs);
  210. /* Show stack backtrace if pt_regs is from kernel mode */
  211. if (!(regs->psw.mask & PSW_MASK_PSTATE))
  212. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  213. show_last_breaking_event(regs);
  214. }
  215. static DEFINE_SPINLOCK(die_lock);
  216. void die(struct pt_regs *regs, const char *str)
  217. {
  218. static int die_counter;
  219. oops_enter();
  220. lgr_info_log();
  221. debug_stop_all();
  222. console_verbose();
  223. spin_lock_irq(&die_lock);
  224. bust_spinlocks(1);
  225. printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
  226. #ifdef CONFIG_PREEMPT
  227. printk("PREEMPT ");
  228. #endif
  229. #ifdef CONFIG_SMP
  230. printk("SMP ");
  231. #endif
  232. #ifdef CONFIG_DEBUG_PAGEALLOC
  233. printk("DEBUG_PAGEALLOC");
  234. #endif
  235. printk("\n");
  236. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  237. show_regs(regs);
  238. bust_spinlocks(0);
  239. add_taint(TAINT_DIE);
  240. spin_unlock_irq(&die_lock);
  241. if (in_interrupt())
  242. panic("Fatal exception in interrupt");
  243. if (panic_on_oops)
  244. panic("Fatal exception: panic_on_oops");
  245. oops_exit();
  246. do_exit(SIGSEGV);
  247. }
  248. static inline void report_user_fault(struct pt_regs *regs, int signr)
  249. {
  250. if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
  251. return;
  252. if (!unhandled_signal(current, signr))
  253. return;
  254. if (!printk_ratelimit())
  255. return;
  256. printk("User process fault: interruption code 0x%X ", regs->int_code);
  257. print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
  258. printk("\n");
  259. show_regs(regs);
  260. }
  261. int is_valid_bugaddr(unsigned long addr)
  262. {
  263. return 1;
  264. }
  265. static inline void __user *get_psw_address(struct pt_regs *regs)
  266. {
  267. return (void __user *)
  268. ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
  269. }
  270. static void __kprobes do_trap(struct pt_regs *regs,
  271. int si_signo, int si_code, char *str)
  272. {
  273. siginfo_t info;
  274. if (notify_die(DIE_TRAP, str, regs, 0,
  275. regs->int_code, si_signo) == NOTIFY_STOP)
  276. return;
  277. if (regs->psw.mask & PSW_MASK_PSTATE) {
  278. info.si_signo = si_signo;
  279. info.si_errno = 0;
  280. info.si_code = si_code;
  281. info.si_addr = get_psw_address(regs);
  282. force_sig_info(si_signo, &info, current);
  283. report_user_fault(regs, si_signo);
  284. } else {
  285. const struct exception_table_entry *fixup;
  286. fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
  287. if (fixup)
  288. regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
  289. else {
  290. enum bug_trap_type btt;
  291. btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
  292. if (btt == BUG_TRAP_TYPE_WARN)
  293. return;
  294. die(regs, str);
  295. }
  296. }
  297. }
  298. void __kprobes do_per_trap(struct pt_regs *regs)
  299. {
  300. siginfo_t info;
  301. if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
  302. return;
  303. if (!current->ptrace)
  304. return;
  305. info.si_signo = SIGTRAP;
  306. info.si_errno = 0;
  307. info.si_code = TRAP_HWBKPT;
  308. info.si_addr =
  309. (void __force __user *) current->thread.per_event.address;
  310. force_sig_info(SIGTRAP, &info, current);
  311. }
  312. static void default_trap_handler(struct pt_regs *regs)
  313. {
  314. if (regs->psw.mask & PSW_MASK_PSTATE) {
  315. report_user_fault(regs, SIGSEGV);
  316. do_exit(SIGSEGV);
  317. } else
  318. die(regs, "Unknown program exception");
  319. }
  320. #define DO_ERROR_INFO(name, signr, sicode, str) \
  321. static void name(struct pt_regs *regs) \
  322. { \
  323. do_trap(regs, signr, sicode, str); \
  324. }
  325. DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
  326. "addressing exception")
  327. DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
  328. "execute exception")
  329. DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
  330. "fixpoint divide exception")
  331. DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
  332. "fixpoint overflow exception")
  333. DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
  334. "HFP overflow exception")
  335. DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
  336. "HFP underflow exception")
  337. DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
  338. "HFP significance exception")
  339. DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
  340. "HFP divide exception")
  341. DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
  342. "HFP square root exception")
  343. DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
  344. "operand exception")
  345. DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
  346. "privileged operation")
  347. DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
  348. "special operation exception")
  349. DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
  350. "translation exception")
  351. static inline void do_fp_trap(struct pt_regs *regs, int fpc)
  352. {
  353. int si_code = 0;
  354. /* FPC[2] is Data Exception Code */
  355. if ((fpc & 0x00000300) == 0) {
  356. /* bits 6 and 7 of DXC are 0 iff IEEE exception */
  357. if (fpc & 0x8000) /* invalid fp operation */
  358. si_code = FPE_FLTINV;
  359. else if (fpc & 0x4000) /* div by 0 */
  360. si_code = FPE_FLTDIV;
  361. else if (fpc & 0x2000) /* overflow */
  362. si_code = FPE_FLTOVF;
  363. else if (fpc & 0x1000) /* underflow */
  364. si_code = FPE_FLTUND;
  365. else if (fpc & 0x0800) /* inexact */
  366. si_code = FPE_FLTRES;
  367. }
  368. do_trap(regs, SIGFPE, si_code, "floating point exception");
  369. }
  370. static void __kprobes illegal_op(struct pt_regs *regs)
  371. {
  372. siginfo_t info;
  373. __u8 opcode[6];
  374. __u16 __user *location;
  375. int signal = 0;
  376. location = get_psw_address(regs);
  377. if (regs->psw.mask & PSW_MASK_PSTATE) {
  378. if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
  379. return;
  380. if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
  381. if (current->ptrace) {
  382. info.si_signo = SIGTRAP;
  383. info.si_errno = 0;
  384. info.si_code = TRAP_BRKPT;
  385. info.si_addr = location;
  386. force_sig_info(SIGTRAP, &info, current);
  387. } else
  388. signal = SIGILL;
  389. #ifdef CONFIG_MATHEMU
  390. } else if (opcode[0] == 0xb3) {
  391. if (get_user(*((__u16 *) (opcode+2)), location+1))
  392. return;
  393. signal = math_emu_b3(opcode, regs);
  394. } else if (opcode[0] == 0xed) {
  395. if (get_user(*((__u32 *) (opcode+2)),
  396. (__u32 __user *)(location+1)))
  397. return;
  398. signal = math_emu_ed(opcode, regs);
  399. } else if (*((__u16 *) opcode) == 0xb299) {
  400. if (get_user(*((__u16 *) (opcode+2)), location+1))
  401. return;
  402. signal = math_emu_srnm(opcode, regs);
  403. } else if (*((__u16 *) opcode) == 0xb29c) {
  404. if (get_user(*((__u16 *) (opcode+2)), location+1))
  405. return;
  406. signal = math_emu_stfpc(opcode, regs);
  407. } else if (*((__u16 *) opcode) == 0xb29d) {
  408. if (get_user(*((__u16 *) (opcode+2)), location+1))
  409. return;
  410. signal = math_emu_lfpc(opcode, regs);
  411. #endif
  412. } else
  413. signal = SIGILL;
  414. } else {
  415. /*
  416. * If we get an illegal op in kernel mode, send it through the
  417. * kprobes notifier. If kprobes doesn't pick it up, SIGILL
  418. */
  419. if (notify_die(DIE_BPT, "bpt", regs, 0,
  420. 3, SIGTRAP) != NOTIFY_STOP)
  421. signal = SIGILL;
  422. }
  423. #ifdef CONFIG_MATHEMU
  424. if (signal == SIGFPE)
  425. do_fp_trap(regs, current->thread.fp_regs.fpc);
  426. else if (signal == SIGSEGV)
  427. do_trap(regs, signal, SEGV_MAPERR, "user address fault");
  428. else
  429. #endif
  430. if (signal)
  431. do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
  432. }
  433. #ifdef CONFIG_MATHEMU
  434. void specification_exception(struct pt_regs *regs)
  435. {
  436. __u8 opcode[6];
  437. __u16 __user *location = NULL;
  438. int signal = 0;
  439. location = (__u16 __user *) get_psw_address(regs);
  440. if (regs->psw.mask & PSW_MASK_PSTATE) {
  441. get_user(*((__u16 *) opcode), location);
  442. switch (opcode[0]) {
  443. case 0x28: /* LDR Rx,Ry */
  444. signal = math_emu_ldr(opcode);
  445. break;
  446. case 0x38: /* LER Rx,Ry */
  447. signal = math_emu_ler(opcode);
  448. break;
  449. case 0x60: /* STD R,D(X,B) */
  450. get_user(*((__u16 *) (opcode+2)), location+1);
  451. signal = math_emu_std(opcode, regs);
  452. break;
  453. case 0x68: /* LD R,D(X,B) */
  454. get_user(*((__u16 *) (opcode+2)), location+1);
  455. signal = math_emu_ld(opcode, regs);
  456. break;
  457. case 0x70: /* STE R,D(X,B) */
  458. get_user(*((__u16 *) (opcode+2)), location+1);
  459. signal = math_emu_ste(opcode, regs);
  460. break;
  461. case 0x78: /* LE R,D(X,B) */
  462. get_user(*((__u16 *) (opcode+2)), location+1);
  463. signal = math_emu_le(opcode, regs);
  464. break;
  465. default:
  466. signal = SIGILL;
  467. break;
  468. }
  469. } else
  470. signal = SIGILL;
  471. if (signal == SIGFPE)
  472. do_fp_trap(regs, current->thread.fp_regs.fpc);
  473. else if (signal)
  474. do_trap(regs, signal, ILL_ILLOPN, "specification exception");
  475. }
  476. #else
  477. DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
  478. "specification exception");
  479. #endif
  480. static void data_exception(struct pt_regs *regs)
  481. {
  482. __u16 __user *location;
  483. int signal = 0;
  484. location = get_psw_address(regs);
  485. if (MACHINE_HAS_IEEE)
  486. asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
  487. #ifdef CONFIG_MATHEMU
  488. else if (regs->psw.mask & PSW_MASK_PSTATE) {
  489. __u8 opcode[6];
  490. get_user(*((__u16 *) opcode), location);
  491. switch (opcode[0]) {
  492. case 0x28: /* LDR Rx,Ry */
  493. signal = math_emu_ldr(opcode);
  494. break;
  495. case 0x38: /* LER Rx,Ry */
  496. signal = math_emu_ler(opcode);
  497. break;
  498. case 0x60: /* STD R,D(X,B) */
  499. get_user(*((__u16 *) (opcode+2)), location+1);
  500. signal = math_emu_std(opcode, regs);
  501. break;
  502. case 0x68: /* LD R,D(X,B) */
  503. get_user(*((__u16 *) (opcode+2)), location+1);
  504. signal = math_emu_ld(opcode, regs);
  505. break;
  506. case 0x70: /* STE R,D(X,B) */
  507. get_user(*((__u16 *) (opcode+2)), location+1);
  508. signal = math_emu_ste(opcode, regs);
  509. break;
  510. case 0x78: /* LE R,D(X,B) */
  511. get_user(*((__u16 *) (opcode+2)), location+1);
  512. signal = math_emu_le(opcode, regs);
  513. break;
  514. case 0xb3:
  515. get_user(*((__u16 *) (opcode+2)), location+1);
  516. signal = math_emu_b3(opcode, regs);
  517. break;
  518. case 0xed:
  519. get_user(*((__u32 *) (opcode+2)),
  520. (__u32 __user *)(location+1));
  521. signal = math_emu_ed(opcode, regs);
  522. break;
  523. case 0xb2:
  524. if (opcode[1] == 0x99) {
  525. get_user(*((__u16 *) (opcode+2)), location+1);
  526. signal = math_emu_srnm(opcode, regs);
  527. } else if (opcode[1] == 0x9c) {
  528. get_user(*((__u16 *) (opcode+2)), location+1);
  529. signal = math_emu_stfpc(opcode, regs);
  530. } else if (opcode[1] == 0x9d) {
  531. get_user(*((__u16 *) (opcode+2)), location+1);
  532. signal = math_emu_lfpc(opcode, regs);
  533. } else
  534. signal = SIGILL;
  535. break;
  536. default:
  537. signal = SIGILL;
  538. break;
  539. }
  540. }
  541. #endif
  542. if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
  543. signal = SIGFPE;
  544. else
  545. signal = SIGILL;
  546. if (signal == SIGFPE)
  547. do_fp_trap(regs, current->thread.fp_regs.fpc);
  548. else if (signal)
  549. do_trap(regs, signal, ILL_ILLOPN, "data exception");
  550. }
  551. static void space_switch_exception(struct pt_regs *regs)
  552. {
  553. /* Set user psw back to home space mode. */
  554. if (regs->psw.mask & PSW_MASK_PSTATE)
  555. regs->psw.mask |= PSW_ASC_HOME;
  556. /* Send SIGILL. */
  557. do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
  558. }
  559. void __kprobes kernel_stack_overflow(struct pt_regs * regs)
  560. {
  561. bust_spinlocks(1);
  562. printk("Kernel stack overflow.\n");
  563. show_regs(regs);
  564. bust_spinlocks(0);
  565. panic("Corrupt kernel stack, can't continue.");
  566. }
  567. /* init is done in lowcore.S and head.S */
  568. void __init trap_init(void)
  569. {
  570. int i;
  571. for (i = 0; i < 128; i++)
  572. pgm_check_table[i] = &default_trap_handler;
  573. pgm_check_table[1] = &illegal_op;
  574. pgm_check_table[2] = &privileged_op;
  575. pgm_check_table[3] = &execute_exception;
  576. pgm_check_table[4] = &do_protection_exception;
  577. pgm_check_table[5] = &addressing_exception;
  578. pgm_check_table[6] = &specification_exception;
  579. pgm_check_table[7] = &data_exception;
  580. pgm_check_table[8] = &overflow_exception;
  581. pgm_check_table[9] = &divide_exception;
  582. pgm_check_table[0x0A] = &overflow_exception;
  583. pgm_check_table[0x0B] = &divide_exception;
  584. pgm_check_table[0x0C] = &hfp_overflow_exception;
  585. pgm_check_table[0x0D] = &hfp_underflow_exception;
  586. pgm_check_table[0x0E] = &hfp_significance_exception;
  587. pgm_check_table[0x0F] = &hfp_divide_exception;
  588. pgm_check_table[0x10] = &do_dat_exception;
  589. pgm_check_table[0x11] = &do_dat_exception;
  590. pgm_check_table[0x12] = &translation_exception;
  591. pgm_check_table[0x13] = &special_op_exception;
  592. #ifdef CONFIG_64BIT
  593. pgm_check_table[0x38] = &do_asce_exception;
  594. pgm_check_table[0x39] = &do_dat_exception;
  595. pgm_check_table[0x3A] = &do_dat_exception;
  596. pgm_check_table[0x3B] = &do_dat_exception;
  597. #endif /* CONFIG_64BIT */
  598. pgm_check_table[0x15] = &operand_exception;
  599. pgm_check_table[0x1C] = &space_switch_exception;
  600. pgm_check_table[0x1D] = &hfp_sqrt_exception;
  601. /* Enable machine checks early. */
  602. local_mcck_enable();
  603. }