irq.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Code to handle x86 style IRQs plus some generic interrupt stuff.
  7. *
  8. * Copyright (C) 1992 Linus Torvalds
  9. * Copyright (C) 1994 - 2000 Ralf Baechle
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/delay.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel_stat.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/mm.h>
  18. #include <linux/random.h>
  19. #include <linux/sched.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/kgdb.h>
  23. #include <linux/ftrace.h>
  24. #include <linux/atomic.h>
  25. #include <asm/uaccess.h>
  26. void *irq_stack[NR_CPUS];
  27. /*
  28. * 'what should we do if we get a hw irq event on an illegal vector'.
  29. * each architecture has to answer this themselves.
  30. */
  31. void ack_bad_irq(unsigned int irq)
  32. {
  33. printk("unexpected IRQ # %d\n", irq);
  34. }
  35. atomic_t irq_err_count;
  36. int arch_show_interrupts(struct seq_file *p, int prec)
  37. {
  38. seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
  39. return 0;
  40. }
  41. asmlinkage void spurious_interrupt(void)
  42. {
  43. atomic_inc(&irq_err_count);
  44. }
  45. void __init init_IRQ(void)
  46. {
  47. int i;
  48. for (i = 0; i < NR_IRQS; i++)
  49. irq_set_noprobe(i);
  50. if (cpu_has_veic)
  51. clear_c0_status(ST0_IM);
  52. arch_init_irq();
  53. for_each_possible_cpu(i) {
  54. int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
  55. void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
  56. irq_stack[i] = s;
  57. pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
  58. irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
  59. }
  60. }
  61. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  62. static inline void check_stack_overflow(void)
  63. {
  64. unsigned long sp;
  65. __asm__ __volatile__("move %0, $sp" : "=r" (sp));
  66. sp &= THREAD_MASK;
  67. /*
  68. * Check for stack overflow: is there less than STACK_WARN free?
  69. * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
  70. */
  71. if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
  72. printk("do_IRQ: stack overflow: %ld\n",
  73. sp - sizeof(struct thread_info));
  74. dump_stack();
  75. }
  76. }
  77. #else
  78. static inline void check_stack_overflow(void) {}
  79. #endif
  80. /*
  81. * do_IRQ handles all normal device IRQ's (the special
  82. * SMP cross-CPU interrupts have their own specific
  83. * handlers).
  84. */
  85. void __irq_entry do_IRQ(unsigned int irq)
  86. {
  87. irq_enter();
  88. check_stack_overflow();
  89. generic_handle_irq(irq);
  90. irq_exit();
  91. }