handle.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. * linux/kernel/irq/handle.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  6. *
  7. * This file contains the core interrupt handling code.
  8. *
  9. * Detailed information is available in Documentation/core-api/genericirq.rst
  10. *
  11. */
  12. #include <linux/irq.h>
  13. #include <linux/random.h>
  14. #include <linux/sched.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/kernel_stat.h>
  17. #include <trace/events/irq.h>
  18. #include "internals.h"
  19. #ifdef CONFIG_MTK_RT_THROTTLE_MON
  20. #include "mtk_rt_mon.h"
  21. #endif
  22. /**
  23. * handle_bad_irq - handle spurious and unhandled irqs
  24. * @desc: description of the interrupt
  25. *
  26. * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
  27. */
  28. void handle_bad_irq(struct irq_desc *desc)
  29. {
  30. unsigned int irq = irq_desc_get_irq(desc);
  31. print_irq_desc(irq, desc);
  32. kstat_incr_irqs_this_cpu(desc);
  33. ack_bad_irq(irq);
  34. }
  35. EXPORT_SYMBOL_GPL(handle_bad_irq);
  36. /*
  37. * Special, empty irq handler:
  38. */
  39. irqreturn_t no_action(int cpl, void *dev_id)
  40. {
  41. return IRQ_NONE;
  42. }
  43. EXPORT_SYMBOL_GPL(no_action);
  44. static void warn_no_thread(unsigned int irq, struct irqaction *action)
  45. {
  46. if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
  47. return;
  48. printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
  49. "but no thread function available.", irq, action->name);
  50. }
  51. void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
  52. {
  53. /*
  54. * In case the thread crashed and was killed we just pretend that
  55. * we handled the interrupt. The hardirq handler has disabled the
  56. * device interrupt, so no irq storm is lurking.
  57. */
  58. if (action->thread->flags & PF_EXITING)
  59. return;
  60. /*
  61. * Wake up the handler thread for this action. If the
  62. * RUNTHREAD bit is already set, nothing to do.
  63. */
  64. if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  65. return;
  66. /*
  67. * It's safe to OR the mask lockless here. We have only two
  68. * places which write to threads_oneshot: This code and the
  69. * irq thread.
  70. *
  71. * This code is the hard irq context and can never run on two
  72. * cpus in parallel. If it ever does we have more serious
  73. * problems than this bitmask.
  74. *
  75. * The irq threads of this irq which clear their "running" bit
  76. * in threads_oneshot are serialized via desc->lock against
  77. * each other and they are serialized against this code by
  78. * IRQS_INPROGRESS.
  79. *
  80. * Hard irq handler:
  81. *
  82. * spin_lock(desc->lock);
  83. * desc->state |= IRQS_INPROGRESS;
  84. * spin_unlock(desc->lock);
  85. * set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
  86. * desc->threads_oneshot |= mask;
  87. * spin_lock(desc->lock);
  88. * desc->state &= ~IRQS_INPROGRESS;
  89. * spin_unlock(desc->lock);
  90. *
  91. * irq thread:
  92. *
  93. * again:
  94. * spin_lock(desc->lock);
  95. * if (desc->state & IRQS_INPROGRESS) {
  96. * spin_unlock(desc->lock);
  97. * while(desc->state & IRQS_INPROGRESS)
  98. * cpu_relax();
  99. * goto again;
  100. * }
  101. * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  102. * desc->threads_oneshot &= ~mask;
  103. * spin_unlock(desc->lock);
  104. *
  105. * So either the thread waits for us to clear IRQS_INPROGRESS
  106. * or we are waiting in the flow handler for desc->lock to be
  107. * released before we reach this point. The thread also checks
  108. * IRQTF_RUNTHREAD under desc->lock. If set it leaves
  109. * threads_oneshot untouched and runs the thread another time.
  110. */
  111. desc->threads_oneshot |= action->thread_mask;
  112. /*
  113. * We increment the threads_active counter in case we wake up
  114. * the irq thread. The irq thread decrements the counter when
  115. * it returns from the handler or in the exit path and wakes
  116. * up waiters which are stuck in synchronize_irq() when the
  117. * active count becomes zero. synchronize_irq() is serialized
  118. * against this code (hard irq handler) via IRQS_INPROGRESS
  119. * like the finalize_oneshot() code. See comment above.
  120. */
  121. atomic_inc(&desc->threads_active);
  122. wake_up_process(action->thread);
  123. }
  124. #ifdef CONFIG_MTK_RT_THROTTLE_MON
  125. static void save_isr_info(unsigned long long start, unsigned long long end)
  126. {
  127. unsigned long long dur = end - start;
  128. if ((current->policy == SCHED_FIFO || current->policy == SCHED_RR)
  129. && mt_rt_mon_enable(smp_processor_id()))
  130. current->se.mtk_isr_time += dur;
  131. }
  132. #endif
  133. irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
  134. {
  135. irqreturn_t retval = IRQ_NONE;
  136. unsigned int irq = desc->irq_data.irq;
  137. struct irqaction *action;
  138. #ifdef CONFIG_MTK_RT_THROTTLE_MON
  139. unsigned long long t1, t2;
  140. #endif
  141. record_irq_time(desc);
  142. for_each_action_of_desc(desc, action) {
  143. irqreturn_t res;
  144. /*
  145. * If this IRQ would be threaded under force_irqthreads, mark it so.
  146. */
  147. if (irq_settings_can_thread(desc) &&
  148. !(action->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)))
  149. trace_hardirq_threaded();
  150. trace_irq_handler_entry(irq, action);
  151. #ifdef CONFIG_MTK_RT_THROTTLE_MON
  152. t1 = sched_clock();
  153. #endif
  154. res = action->handler(irq, action->dev_id);
  155. #ifdef CONFIG_MTK_RT_THROTTLE_MON
  156. t2 = sched_clock();
  157. save_isr_info(t1, t2);
  158. #endif
  159. trace_irq_handler_exit(irq, action, res);
  160. if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
  161. irq, action->handler))
  162. local_irq_disable();
  163. switch (res) {
  164. case IRQ_WAKE_THREAD:
  165. /*
  166. * Catch drivers which return WAKE_THREAD but
  167. * did not set up a thread function
  168. */
  169. if (unlikely(!action->thread_fn)) {
  170. warn_no_thread(irq, action);
  171. break;
  172. }
  173. __irq_wake_thread(desc, action);
  174. /* Fall through to add to randomness */
  175. case IRQ_HANDLED:
  176. *flags |= action->flags;
  177. break;
  178. default:
  179. break;
  180. }
  181. retval |= res;
  182. }
  183. return retval;
  184. }
  185. irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
  186. {
  187. irqreturn_t retval;
  188. unsigned int flags = 0;
  189. retval = __handle_irq_event_percpu(desc, &flags);
  190. add_interrupt_randomness(desc->irq_data.irq, flags);
  191. if (!noirqdebug)
  192. note_interrupt(desc, retval);
  193. return retval;
  194. }
  195. irqreturn_t handle_irq_event(struct irq_desc *desc)
  196. {
  197. irqreturn_t ret;
  198. desc->istate &= ~IRQS_PENDING;
  199. irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  200. raw_spin_unlock(&desc->lock);
  201. ret = handle_irq_event_percpu(desc);
  202. raw_spin_lock(&desc->lock);
  203. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  204. return ret;
  205. }