irq.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irq.h>
  18. #include <linux/kernel_stat.h>
  19. #include <linux/uaccess.h>
  20. #include <hv/drv_pcie_rc_intf.h>
  21. #include <arch/spr_def.h>
  22. #include <asm/traps.h>
  23. /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
  24. #define IS_HW_CLEARED 1
  25. /*
  26. * The set of interrupts we enable for arch_local_irq_enable().
  27. * This is initialized to have just a single interrupt that the kernel
  28. * doesn't actually use as a sentinel. During kernel init,
  29. * interrupts are added as the kernel gets prepared to support them.
  30. * NOTE: we could probably initialize them all statically up front.
  31. */
  32. DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
  33. INITIAL_INTERRUPTS_ENABLED;
  34. EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
  35. /* Define per-tile device interrupt statistics state. */
  36. DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
  37. EXPORT_PER_CPU_SYMBOL(irq_stat);
  38. /*
  39. * Define per-tile irq disable mask; the hardware/HV only has a single
  40. * mask that we use to implement both masking and disabling.
  41. */
  42. static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
  43. ____cacheline_internodealigned_in_smp;
  44. /*
  45. * Per-tile IRQ nesting depth. Used to make sure we enable newly
  46. * enabled IRQs before exiting the outermost interrupt.
  47. */
  48. static DEFINE_PER_CPU(int, irq_depth);
  49. /* State for allocating IRQs on Gx. */
  50. #if CHIP_HAS_IPI()
  51. static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE);
  52. static DEFINE_SPINLOCK(available_irqs_lock);
  53. #endif
  54. #if CHIP_HAS_IPI()
  55. /* Use SPRs to manipulate device interrupts. */
  56. #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
  57. #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
  58. #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
  59. #else
  60. /* Use HV to manipulate device interrupts. */
  61. #define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
  62. #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
  63. #define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
  64. #endif
  65. /*
  66. * The interrupt handling path, implemented in terms of HV interrupt
  67. * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
  68. */
  69. void tile_dev_intr(struct pt_regs *regs, int intnum)
  70. {
  71. int depth = __get_cpu_var(irq_depth)++;
  72. unsigned long original_irqs;
  73. unsigned long remaining_irqs;
  74. struct pt_regs *old_regs;
  75. #if CHIP_HAS_IPI()
  76. /*
  77. * Pending interrupts are listed in an SPR. We might be
  78. * nested, so be sure to only handle irqs that weren't already
  79. * masked by a previous interrupt. Then, mask out the ones
  80. * we're going to handle.
  81. */
  82. unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
  83. original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
  84. __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
  85. #else
  86. /*
  87. * Hypervisor performs the equivalent of the Gx code above and
  88. * then puts the pending interrupt mask into a system save reg
  89. * for us to find.
  90. */
  91. original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
  92. #endif
  93. remaining_irqs = original_irqs;
  94. /* Track time spent here in an interrupt context. */
  95. old_regs = set_irq_regs(regs);
  96. irq_enter();
  97. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  98. /* Debugging check for stack overflow: less than 1/8th stack free? */
  99. {
  100. long sp = stack_pointer - (long) current_thread_info();
  101. if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
  102. pr_emerg("tile_dev_intr: "
  103. "stack overflow: %ld\n",
  104. sp - sizeof(struct thread_info));
  105. dump_stack();
  106. }
  107. }
  108. #endif
  109. while (remaining_irqs) {
  110. unsigned long irq = __ffs(remaining_irqs);
  111. remaining_irqs &= ~(1UL << irq);
  112. /* Count device irqs; Linux IPIs are counted elsewhere. */
  113. if (irq != IRQ_RESCHEDULE)
  114. __get_cpu_var(irq_stat).irq_dev_intr_count++;
  115. generic_handle_irq(irq);
  116. }
  117. /*
  118. * If we weren't nested, turn on all enabled interrupts,
  119. * including any that were reenabled during interrupt
  120. * handling.
  121. */
  122. if (depth == 0)
  123. unmask_irqs(~__get_cpu_var(irq_disable_mask));
  124. __get_cpu_var(irq_depth)--;
  125. /*
  126. * Track time spent against the current process again and
  127. * process any softirqs if they are waiting.
  128. */
  129. irq_exit();
  130. set_irq_regs(old_regs);
  131. }
  132. /*
  133. * Remove an irq from the disabled mask. If we're in an interrupt
  134. * context, defer enabling the HW interrupt until we leave.
  135. */
  136. static void tile_irq_chip_enable(struct irq_data *d)
  137. {
  138. get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
  139. if (__get_cpu_var(irq_depth) == 0)
  140. unmask_irqs(1UL << d->irq);
  141. put_cpu_var(irq_disable_mask);
  142. }
  143. /*
  144. * Add an irq to the disabled mask. We disable the HW interrupt
  145. * immediately so that there's no possibility of it firing. If we're
  146. * in an interrupt context, the return path is careful to avoid
  147. * unmasking a newly disabled interrupt.
  148. */
  149. static void tile_irq_chip_disable(struct irq_data *d)
  150. {
  151. get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
  152. mask_irqs(1UL << d->irq);
  153. put_cpu_var(irq_disable_mask);
  154. }
  155. /* Mask an interrupt. */
  156. static void tile_irq_chip_mask(struct irq_data *d)
  157. {
  158. mask_irqs(1UL << d->irq);
  159. }
  160. /* Unmask an interrupt. */
  161. static void tile_irq_chip_unmask(struct irq_data *d)
  162. {
  163. unmask_irqs(1UL << d->irq);
  164. }
  165. /*
  166. * Clear an interrupt before processing it so that any new assertions
  167. * will trigger another irq.
  168. */
  169. static void tile_irq_chip_ack(struct irq_data *d)
  170. {
  171. if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
  172. clear_irqs(1UL << d->irq);
  173. }
  174. /*
  175. * For per-cpu interrupts, we need to avoid unmasking any interrupts
  176. * that we disabled via disable_percpu_irq().
  177. */
  178. static void tile_irq_chip_eoi(struct irq_data *d)
  179. {
  180. if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
  181. unmask_irqs(1UL << d->irq);
  182. }
  183. static struct irq_chip tile_irq_chip = {
  184. .name = "tile_irq_chip",
  185. .irq_enable = tile_irq_chip_enable,
  186. .irq_disable = tile_irq_chip_disable,
  187. .irq_ack = tile_irq_chip_ack,
  188. .irq_eoi = tile_irq_chip_eoi,
  189. .irq_mask = tile_irq_chip_mask,
  190. .irq_unmask = tile_irq_chip_unmask,
  191. };
  192. void __init init_IRQ(void)
  193. {
  194. ipi_init();
  195. }
  196. void __cpuinit setup_irq_regs(void)
  197. {
  198. /* Enable interrupt delivery. */
  199. unmask_irqs(~0UL);
  200. #if CHIP_HAS_IPI()
  201. arch_local_irq_unmask(INT_IPI_K);
  202. #endif
  203. }
  204. void tile_irq_activate(unsigned int irq, int tile_irq_type)
  205. {
  206. /*
  207. * We use handle_level_irq() by default because the pending
  208. * interrupt vector (whether modeled by the HV on TILE64 and
  209. * TILEPro or implemented in hardware on TILE-Gx) has
  210. * level-style semantics for each bit. An interrupt fires
  211. * whenever a bit is high, not just at edges.
  212. */
  213. irq_flow_handler_t handle = handle_level_irq;
  214. if (tile_irq_type == TILE_IRQ_PERCPU)
  215. handle = handle_percpu_irq;
  216. irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
  217. /*
  218. * Flag interrupts that are hardware-cleared so that ack()
  219. * won't clear them.
  220. */
  221. if (tile_irq_type == TILE_IRQ_HW_CLEAR)
  222. irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
  223. }
  224. EXPORT_SYMBOL(tile_irq_activate);
  225. void ack_bad_irq(unsigned int irq)
  226. {
  227. pr_err("unexpected IRQ trap at vector %02x\n", irq);
  228. }
  229. /*
  230. * Generic, controller-independent functions:
  231. */
  232. #if CHIP_HAS_IPI()
  233. int create_irq(void)
  234. {
  235. unsigned long flags;
  236. int result;
  237. spin_lock_irqsave(&available_irqs_lock, flags);
  238. if (available_irqs == 0)
  239. result = -ENOMEM;
  240. else {
  241. result = __ffs(available_irqs);
  242. available_irqs &= ~(1UL << result);
  243. dynamic_irq_init(result);
  244. }
  245. spin_unlock_irqrestore(&available_irqs_lock, flags);
  246. return result;
  247. }
  248. EXPORT_SYMBOL(create_irq);
  249. void destroy_irq(unsigned int irq)
  250. {
  251. unsigned long flags;
  252. spin_lock_irqsave(&available_irqs_lock, flags);
  253. available_irqs |= (1UL << irq);
  254. dynamic_irq_cleanup(irq);
  255. spin_unlock_irqrestore(&available_irqs_lock, flags);
  256. }
  257. EXPORT_SYMBOL(destroy_irq);
  258. #endif