irq_work.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  3. *
  4. * Provides a framework for enqueueing and running callbacks from hardirq
  5. * context. The enqueueing is NMI-safe.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/irq_work.h>
  10. #include <linux/hardirq.h>
  11. /*
  12. * An entry can be in one of four states:
  13. *
  14. * free NULL, 0 -> {claimed} : free to be used
  15. * claimed NULL, 3 -> {pending} : claimed to be enqueued
  16. * pending next, 3 -> {busy} : queued, pending callback
  17. * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
  18. *
  19. * We use the lower two bits of the next pointer to keep PENDING and BUSY
  20. * flags.
  21. */
  22. #define IRQ_WORK_PENDING 1UL
  23. #define IRQ_WORK_BUSY 2UL
  24. #define IRQ_WORK_FLAGS 3UL
  25. static inline bool irq_work_is_set(struct irq_work *entry, int flags)
  26. {
  27. return (unsigned long)entry->next & flags;
  28. }
  29. static inline struct irq_work *irq_work_next(struct irq_work *entry)
  30. {
  31. unsigned long next = (unsigned long)entry->next;
  32. next &= ~IRQ_WORK_FLAGS;
  33. return (struct irq_work *)next;
  34. }
  35. static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
  36. {
  37. unsigned long next = (unsigned long)entry;
  38. next |= flags;
  39. return (struct irq_work *)next;
  40. }
  41. static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
  42. /*
  43. * Claim the entry so that no one else will poke at it.
  44. */
  45. static bool irq_work_claim(struct irq_work *entry)
  46. {
  47. struct irq_work *next, *nflags;
  48. do {
  49. next = entry->next;
  50. if ((unsigned long)next & IRQ_WORK_PENDING)
  51. return false;
  52. nflags = next_flags(next, IRQ_WORK_FLAGS);
  53. } while (cmpxchg(&entry->next, next, nflags) != next);
  54. return true;
  55. }
  56. void __weak arch_irq_work_raise(void)
  57. {
  58. /*
  59. * Lame architectures will get the timer tick callback
  60. */
  61. }
  62. /*
  63. * Queue the entry and raise the IPI if needed.
  64. */
  65. static void __irq_work_queue(struct irq_work *entry)
  66. {
  67. struct irq_work *next;
  68. preempt_disable();
  69. do {
  70. next = __this_cpu_read(irq_work_list);
  71. /* Can assign non-atomic because we keep the flags set. */
  72. entry->next = next_flags(next, IRQ_WORK_FLAGS);
  73. } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
  74. /* The list was empty, raise self-interrupt to start processing. */
  75. if (!irq_work_next(entry))
  76. arch_irq_work_raise();
  77. preempt_enable();
  78. }
  79. /*
  80. * Enqueue the irq_work @entry, returns true on success, failure when the
  81. * @entry was already enqueued by someone else.
  82. *
  83. * Can be re-enqueued while the callback is still in progress.
  84. */
  85. bool irq_work_queue(struct irq_work *entry)
  86. {
  87. if (!irq_work_claim(entry)) {
  88. /*
  89. * Already enqueued, can't do!
  90. */
  91. return false;
  92. }
  93. __irq_work_queue(entry);
  94. return true;
  95. }
  96. EXPORT_SYMBOL_GPL(irq_work_queue);
  97. /*
  98. * Run the irq_work entries on this cpu. Requires to be ran from hardirq
  99. * context with local IRQs disabled.
  100. */
  101. void irq_work_run(void)
  102. {
  103. struct irq_work *list;
  104. if (this_cpu_read(irq_work_list) == NULL)
  105. return;
  106. BUG_ON(!in_irq());
  107. BUG_ON(!irqs_disabled());
  108. list = this_cpu_xchg(irq_work_list, NULL);
  109. while (list != NULL) {
  110. struct irq_work *entry = list;
  111. list = irq_work_next(list);
  112. /*
  113. * Clear the PENDING bit, after this point the @entry
  114. * can be re-used.
  115. */
  116. entry->next = next_flags(NULL, IRQ_WORK_BUSY);
  117. entry->func(entry);
  118. /*
  119. * Clear the BUSY bit and return to the free state if
  120. * no-one else claimed it meanwhile.
  121. */
  122. (void)cmpxchg(&entry->next,
  123. next_flags(NULL, IRQ_WORK_BUSY),
  124. NULL);
  125. }
  126. }
  127. EXPORT_SYMBOL_GPL(irq_work_run);
  128. /*
  129. * Synchronize against the irq_work @entry, ensures the entry is not
  130. * currently in use.
  131. */
  132. void irq_work_sync(struct irq_work *entry)
  133. {
  134. WARN_ON_ONCE(irqs_disabled());
  135. while (irq_work_is_set(entry, IRQ_WORK_BUSY))
  136. cpu_relax();
  137. }
  138. EXPORT_SYMBOL_GPL(irq_work_sync);