irq_work.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  3. *
  4. * Provides a framework for enqueueing and running callbacks from hardirq
  5. * context. The enqueueing is NMI-safe.
  6. */
  7. #include <linux/bug.h>
  8. #include <linux/kernel.h>
  9. #include <linux/export.h>
  10. #include <linux/irq_work.h>
  11. #include <linux/percpu.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/irqflags.h>
  14. #include <linux/sched.h>
  15. #include <linux/tick.h>
  16. #include <linux/cpu.h>
  17. #include <linux/notifier.h>
  18. #include <linux/smp.h>
  19. #include <linux/interrupt.h>
  20. #include <asm/processor.h>
  21. static DEFINE_PER_CPU(struct llist_head, raised_list);
  22. static DEFINE_PER_CPU(struct llist_head, lazy_list);
  23. /*
  24. * Claim the entry so that no one else will poke at it.
  25. */
  26. static bool irq_work_claim(struct irq_work *work)
  27. {
  28. unsigned long flags, oflags, nflags;
  29. /*
  30. * Start with our best wish as a premise but only trust any
  31. * flag value after cmpxchg() result.
  32. */
  33. flags = work->flags & ~IRQ_WORK_PENDING;
  34. for (;;) {
  35. nflags = flags | IRQ_WORK_FLAGS;
  36. oflags = cmpxchg(&work->flags, flags, nflags);
  37. if (oflags == flags)
  38. break;
  39. if (oflags & IRQ_WORK_PENDING)
  40. return false;
  41. flags = oflags;
  42. cpu_relax();
  43. }
  44. return true;
  45. }
  46. void __weak arch_irq_work_raise(void)
  47. {
  48. /*
  49. * Lame architectures will get the timer tick callback
  50. */
  51. }
  52. #ifdef CONFIG_SMP
  53. /*
  54. * Enqueue the irq_work @work on @cpu unless it's already pending
  55. * somewhere.
  56. *
  57. * Can be re-enqueued while the callback is still in progress.
  58. */
  59. bool irq_work_queue_on(struct irq_work *work, int cpu)
  60. {
  61. /* All work should have been flushed before going offline */
  62. WARN_ON_ONCE(cpu_is_offline(cpu));
  63. /* Arch remote IPI send/receive backend aren't NMI safe */
  64. WARN_ON_ONCE(in_nmi());
  65. /* Only queue if not already pending */
  66. if (!irq_work_claim(work))
  67. return false;
  68. if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
  69. arch_send_call_function_single_ipi(cpu);
  70. return true;
  71. }
  72. EXPORT_SYMBOL_GPL(irq_work_queue_on);
  73. #endif
  74. /* Enqueue the irq work @work on the current CPU */
  75. bool irq_work_queue(struct irq_work *work)
  76. {
  77. /* Only queue if not already pending */
  78. if (!irq_work_claim(work))
  79. return false;
  80. /* Queue the entry and raise the IPI if needed. */
  81. preempt_disable();
  82. /* If the work is "lazy", handle it from next tick if any */
  83. if (work->flags & IRQ_WORK_LAZY) {
  84. if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
  85. tick_nohz_tick_stopped())
  86. arch_irq_work_raise();
  87. } else {
  88. if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
  89. arch_irq_work_raise();
  90. }
  91. preempt_enable();
  92. return true;
  93. }
  94. EXPORT_SYMBOL_GPL(irq_work_queue);
  95. bool irq_work_needs_cpu(void)
  96. {
  97. struct llist_head *raised, *lazy;
  98. raised = this_cpu_ptr(&raised_list);
  99. lazy = this_cpu_ptr(&lazy_list);
  100. if (llist_empty(raised) || arch_irq_work_has_interrupt())
  101. if (llist_empty(lazy))
  102. return false;
  103. /* All work should have been flushed before going offline */
  104. WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
  105. return true;
  106. }
  107. static void irq_work_run_list(struct llist_head *list)
  108. {
  109. unsigned long flags;
  110. struct irq_work *work;
  111. struct llist_node *llnode;
  112. unsigned long long ts;
  113. BUG_ON(!irqs_disabled());
  114. if (llist_empty(list))
  115. return;
  116. llnode = llist_del_all(list);
  117. while (llnode != NULL) {
  118. work = llist_entry(llnode, struct irq_work, llnode);
  119. llnode = llist_next(llnode);
  120. /*
  121. * Clear the PENDING bit, after this point the @work
  122. * can be re-used.
  123. * Make it immediately visible so that other CPUs trying
  124. * to claim that work don't rely on us to handle their data
  125. * while we are in the middle of the func.
  126. */
  127. flags = work->flags & ~IRQ_WORK_PENDING;
  128. xchg(&work->flags, flags);
  129. check_start_time(ts);
  130. work->func(work);
  131. check_process_time("irq_work %ps", ts, work->func);
  132. /*
  133. * Clear the BUSY bit and return to the free state if
  134. * no-one else claimed it meanwhile.
  135. */
  136. (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
  137. }
  138. }
  139. /*
  140. * hotplug calls this through:
  141. * hotplug_cfd() -> flush_smp_call_function_queue()
  142. */
  143. void irq_work_run(void)
  144. {
  145. irq_work_run_list(this_cpu_ptr(&raised_list));
  146. irq_work_run_list(this_cpu_ptr(&lazy_list));
  147. }
  148. EXPORT_SYMBOL_GPL(irq_work_run);
  149. void irq_work_tick(void)
  150. {
  151. struct llist_head *raised = this_cpu_ptr(&raised_list);
  152. if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
  153. irq_work_run_list(raised);
  154. irq_work_run_list(this_cpu_ptr(&lazy_list));
  155. }
  156. /*
  157. * Synchronize against the irq_work @entry, ensures the entry is not
  158. * currently in use.
  159. */
  160. void irq_work_sync(struct irq_work *work)
  161. {
  162. WARN_ON_ONCE(irqs_disabled());
  163. while (work->flags & IRQ_WORK_BUSY)
  164. cpu_relax();
  165. }
  166. EXPORT_SYMBOL_GPL(irq_work_sync);