irq.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle
  7. * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
  8. */
  9. #ifndef _ASM_IRQ_H
  10. #define _ASM_IRQ_H
  11. #include <linux/linkage.h>
  12. #include <linux/smp.h>
  13. #include <linux/irqdomain.h>
  14. #include <asm/mipsmtregs.h>
  15. #include <irq.h>
  16. #ifdef CONFIG_I8259
  17. static inline int irq_canonicalize(int irq)
  18. {
  19. return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq);
  20. }
  21. #else
  22. #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
  23. #endif
  24. #ifdef CONFIG_MIPS_MT_SMTC
  25. struct irqaction;
  26. extern unsigned long irq_hwmask[];
  27. extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
  28. unsigned long hwmask);
  29. static inline void smtc_im_ack_irq(unsigned int irq)
  30. {
  31. if (irq_hwmask[irq] & ST0_IM)
  32. set_c0_status(irq_hwmask[irq] & ST0_IM);
  33. }
  34. #else
  35. static inline void smtc_im_ack_irq(unsigned int irq)
  36. {
  37. }
  38. #endif /* CONFIG_MIPS_MT_SMTC */
  39. #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
  40. #include <linux/cpumask.h>
  41. extern int plat_set_irq_affinity(struct irq_data *d,
  42. const struct cpumask *affinity, bool force);
  43. extern void smtc_forward_irq(struct irq_data *d);
  44. /*
  45. * IRQ affinity hook invoked at the beginning of interrupt dispatch
  46. * if option is enabled.
  47. *
  48. * Up through Linux 2.6.22 (at least) cpumask operations are very
  49. * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
  50. * used a "fast path" per-IRQ-descriptor cache of affinity information
  51. * to reduce latency. As there is a project afoot to optimize the
  52. * cpumask implementations, this version is optimistically assuming
  53. * that cpumask.h macro overhead is reasonable during interrupt dispatch.
  54. */
  55. static inline int handle_on_other_cpu(unsigned int irq)
  56. {
  57. struct irq_data *d = irq_get_irq_data(irq);
  58. if (cpumask_test_cpu(smp_processor_id(), d->affinity))
  59. return 0;
  60. smtc_forward_irq(d);
  61. return 1;
  62. }
  63. #else /* Not doing SMTC affinity */
  64. static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
  65. #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
  66. #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
  67. static inline void smtc_im_backstop(unsigned int irq)
  68. {
  69. if (irq_hwmask[irq] & 0x0000ff00)
  70. write_c0_tccontext(read_c0_tccontext() &
  71. ~(irq_hwmask[irq] & 0x0000ff00));
  72. }
  73. /*
  74. * Clear interrupt mask handling "backstop" if irq_hwmask
  75. * entry so indicates. This implies that the ack() or end()
  76. * functions will take over re-enabling the low-level mask.
  77. * Otherwise it will be done on return from exception.
  78. */
  79. static inline int smtc_handle_on_other_cpu(unsigned int irq)
  80. {
  81. int ret = handle_on_other_cpu(irq);
  82. if (!ret)
  83. smtc_im_backstop(irq);
  84. return ret;
  85. }
  86. #else
  87. static inline void smtc_im_backstop(unsigned int irq) { }
  88. static inline int smtc_handle_on_other_cpu(unsigned int irq)
  89. {
  90. return handle_on_other_cpu(irq);
  91. }
  92. #endif
  93. extern void do_IRQ(unsigned int irq);
  94. #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
  95. extern void do_IRQ_no_affinity(unsigned int irq);
  96. #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
  97. extern void arch_init_irq(void);
  98. extern void spurious_interrupt(void);
  99. extern int allocate_irqno(void);
  100. extern void alloc_legacy_irqno(void);
  101. extern void free_irqno(unsigned int irq);
  102. /*
  103. * Before R2 the timer and performance counter interrupts were both fixed to
  104. * IE7. Since R2 their number has to be read from the c0_intctl register.
  105. */
  106. #define CP0_LEGACY_COMPARE_IRQ 7
  107. extern int cp0_compare_irq;
  108. extern int cp0_compare_irq_shift;
  109. extern int cp0_perfcount_irq;
  110. #endif /* _ASM_IRQ_H */