traps.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * linux/arch/m32r/kernel/traps.c
  3. *
  4. * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo,
  5. * Hitoshi Yamamoto
  6. */
  7. /*
  8. * 'traps.c' handles hardware traps and faults after we have saved some
  9. * state in 'entry.S'.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kallsyms.h>
  14. #include <linux/stddef.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/mm.h>
  17. #include <asm/page.h>
  18. #include <asm/processor.h>
  19. #include <asm/uaccess.h>
  20. #include <asm/io.h>
  21. #include <linux/atomic.h>
  22. #include <asm/smp.h>
  23. #include <linux/module.h>
  24. asmlinkage void alignment_check(void);
  25. asmlinkage void ei_handler(void);
  26. asmlinkage void rie_handler(void);
  27. asmlinkage void debug_trap(void);
  28. asmlinkage void cache_flushing_handler(void);
  29. asmlinkage void ill_trap(void);
  30. #ifdef CONFIG_SMP
  31. extern void smp_reschedule_interrupt(void);
  32. extern void smp_invalidate_interrupt(void);
  33. extern void smp_call_function_interrupt(void);
  34. extern void smp_ipi_timer_interrupt(void);
  35. extern void smp_flush_cache_all_interrupt(void);
  36. extern void smp_call_function_single_interrupt(void);
  37. /*
  38. * for Boot AP function
  39. */
  40. asm (
  41. " .section .eit_vector4,\"ax\" \n"
  42. " .global _AP_RE \n"
  43. " .global startup_AP \n"
  44. "_AP_RE: \n"
  45. " .fill 32, 4, 0 \n"
  46. "_AP_EI: bra startup_AP \n"
  47. " .previous \n"
  48. );
  49. #endif /* CONFIG_SMP */
  50. extern unsigned long eit_vector[];
  51. #define BRA_INSN(func, entry) \
  52. ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
  53. + 0xff000000UL
  54. static void set_eit_vector_entries(void)
  55. {
  56. extern void default_eit_handler(void);
  57. extern void system_call(void);
  58. extern void pie_handler(void);
  59. extern void ace_handler(void);
  60. extern void tme_handler(void);
  61. extern void _flush_cache_copyback_all(void);
  62. eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
  63. eit_vector[1] = BRA_INSN(default_eit_handler, 1);
  64. eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
  65. eit_vector[5] = BRA_INSN(default_eit_handler, 5);
  66. eit_vector[8] = BRA_INSN(rie_handler, 8);
  67. eit_vector[12] = BRA_INSN(alignment_check, 12);
  68. eit_vector[16] = BRA_INSN(ill_trap, 16);
  69. eit_vector[17] = BRA_INSN(debug_trap, 17);
  70. eit_vector[18] = BRA_INSN(system_call, 18);
  71. eit_vector[19] = BRA_INSN(ill_trap, 19);
  72. eit_vector[20] = BRA_INSN(ill_trap, 20);
  73. eit_vector[21] = BRA_INSN(ill_trap, 21);
  74. eit_vector[22] = BRA_INSN(ill_trap, 22);
  75. eit_vector[23] = BRA_INSN(ill_trap, 23);
  76. eit_vector[24] = BRA_INSN(ill_trap, 24);
  77. eit_vector[25] = BRA_INSN(ill_trap, 25);
  78. eit_vector[26] = BRA_INSN(ill_trap, 26);
  79. eit_vector[27] = BRA_INSN(ill_trap, 27);
  80. eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
  81. eit_vector[29] = BRA_INSN(ill_trap, 29);
  82. eit_vector[30] = BRA_INSN(ill_trap, 30);
  83. eit_vector[31] = BRA_INSN(ill_trap, 31);
  84. eit_vector[32] = BRA_INSN(ei_handler, 32);
  85. eit_vector[64] = BRA_INSN(pie_handler, 64);
  86. #ifdef CONFIG_MMU
  87. eit_vector[68] = BRA_INSN(ace_handler, 68);
  88. eit_vector[72] = BRA_INSN(tme_handler, 72);
  89. #endif /* CONFIG_MMU */
  90. #ifdef CONFIG_SMP
  91. eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
  92. eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
  93. eit_vector[186] = (unsigned long)smp_call_function_interrupt;
  94. eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
  95. eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
  96. eit_vector[189] = 0; /* CPU_BOOT_IPI */
  97. eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
  98. eit_vector[191] = 0;
  99. #endif
  100. _flush_cache_copyback_all();
  101. }
  102. void __init trap_init(void)
  103. {
  104. set_eit_vector_entries();
  105. /*
  106. * Should be a barrier for any external CPU state.
  107. */
  108. cpu_init();
  109. }
  110. static int kstack_depth_to_print = 24;
  111. static void show_trace(struct task_struct *task, unsigned long *stack)
  112. {
  113. unsigned long addr;
  114. if (!stack)
  115. stack = (unsigned long*)&stack;
  116. printk("Call Trace: ");
  117. while (!kstack_end(stack)) {
  118. addr = *stack++;
  119. if (__kernel_text_address(addr)) {
  120. printk("[<%08lx>] ", addr);
  121. print_symbol("%s\n", addr);
  122. }
  123. }
  124. printk("\n");
  125. }
  126. void show_stack(struct task_struct *task, unsigned long *sp)
  127. {
  128. unsigned long *stack;
  129. int i;
  130. /*
  131. * debugging aid: "show_stack(NULL);" prints the
  132. * back trace for this cpu.
  133. */
  134. if(sp==NULL) {
  135. if (task)
  136. sp = (unsigned long *)task->thread.sp;
  137. else
  138. sp=(unsigned long*)&sp;
  139. }
  140. stack = sp;
  141. for(i=0; i < kstack_depth_to_print; i++) {
  142. if (kstack_end(stack))
  143. break;
  144. if (i && ((i % 4) == 0))
  145. printk("\n ");
  146. printk("%08lx ", *stack++);
  147. }
  148. printk("\n");
  149. show_trace(task, sp);
  150. }
  151. void dump_stack(void)
  152. {
  153. unsigned long stack;
  154. show_trace(current, &stack);
  155. }
  156. EXPORT_SYMBOL(dump_stack);
  157. static void show_registers(struct pt_regs *regs)
  158. {
  159. int i = 0;
  160. int in_kernel = 1;
  161. unsigned long sp;
  162. printk("CPU: %d\n", smp_processor_id());
  163. show_regs(regs);
  164. sp = (unsigned long) (1+regs);
  165. if (user_mode(regs)) {
  166. in_kernel = 0;
  167. sp = regs->spu;
  168. printk("SPU: %08lx\n", sp);
  169. } else {
  170. printk("SPI: %08lx\n", sp);
  171. }
  172. printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
  173. current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
  174. /*
  175. * When in-kernel, we also print out the stack and code at the
  176. * time of the fault..
  177. */
  178. if (in_kernel) {
  179. printk("\nStack: ");
  180. show_stack(current, (unsigned long*) sp);
  181. printk("\nCode: ");
  182. if (regs->bpc < PAGE_OFFSET)
  183. goto bad;
  184. for(i=0;i<20;i++) {
  185. unsigned char c;
  186. if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
  187. bad:
  188. printk(" Bad PC value.");
  189. break;
  190. }
  191. printk("%02x ", c);
  192. }
  193. }
  194. printk("\n");
  195. }
  196. static DEFINE_SPINLOCK(die_lock);
  197. void die(const char * str, struct pt_regs * regs, long err)
  198. {
  199. console_verbose();
  200. spin_lock_irq(&die_lock);
  201. bust_spinlocks(1);
  202. printk("%s: %04lx\n", str, err & 0xffff);
  203. show_registers(regs);
  204. bust_spinlocks(0);
  205. spin_unlock_irq(&die_lock);
  206. do_exit(SIGSEGV);
  207. }
  208. static __inline__ void die_if_kernel(const char * str,
  209. struct pt_regs * regs, long err)
  210. {
  211. if (!user_mode(regs))
  212. die(str, regs, err);
  213. }
  214. static __inline__ void do_trap(int trapnr, int signr, const char * str,
  215. struct pt_regs * regs, long error_code, siginfo_t *info)
  216. {
  217. if (user_mode(regs)) {
  218. /* trap_signal */
  219. struct task_struct *tsk = current;
  220. tsk->thread.error_code = error_code;
  221. tsk->thread.trap_no = trapnr;
  222. if (info)
  223. force_sig_info(signr, info, tsk);
  224. else
  225. force_sig(signr, tsk);
  226. return;
  227. } else {
  228. /* kernel_trap */
  229. if (!fixup_exception(regs))
  230. die(str, regs, error_code);
  231. return;
  232. }
  233. }
  234. #define DO_ERROR(trapnr, signr, str, name) \
  235. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  236. { \
  237. do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
  238. }
  239. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  240. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  241. { \
  242. siginfo_t info; \
  243. info.si_signo = signr; \
  244. info.si_errno = 0; \
  245. info.si_code = sicode; \
  246. info.si_addr = (void __user *)siaddr; \
  247. do_trap(trapnr, signr, str, regs, error_code, &info); \
  248. }
  249. DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
  250. DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
  251. DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
  252. DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
  253. extern int handle_unaligned_access(unsigned long, struct pt_regs *);
  254. /* This code taken from arch/sh/kernel/traps.c */
  255. asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
  256. {
  257. mm_segment_t oldfs;
  258. unsigned long insn;
  259. int tmp;
  260. oldfs = get_fs();
  261. if (user_mode(regs)) {
  262. local_irq_enable();
  263. current->thread.error_code = error_code;
  264. current->thread.trap_no = 0x17;
  265. set_fs(USER_DS);
  266. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  267. set_fs(oldfs);
  268. goto uspace_segv;
  269. }
  270. tmp = handle_unaligned_access(insn, regs);
  271. set_fs(oldfs);
  272. if (!tmp)
  273. return;
  274. uspace_segv:
  275. printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
  276. "access\n", current->comm);
  277. force_sig(SIGSEGV, current);
  278. } else {
  279. set_fs(KERNEL_DS);
  280. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  281. set_fs(oldfs);
  282. die("insn faulting in do_address_error", regs, 0);
  283. }
  284. handle_unaligned_access(insn, regs);
  285. set_fs(oldfs);
  286. }
  287. }