cpuhotplug.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Generic cpu hotunplug interrupt migration code copied from the
  3. * arch/arm implementation
  4. *
  5. * Copyright (C) Russell King
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/interrupt.h>
  12. #include <linux/ratelimit.h>
  13. #include <linux/irq.h>
  14. #include "internals.h"
  15. /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
  16. static inline bool irq_needs_fixup(struct irq_data *d)
  17. {
  18. const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
  19. unsigned int cpu = smp_processor_id();
  20. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  21. /*
  22. * The cpumask_empty() check is a workaround for interrupt chips,
  23. * which do not implement effective affinity, but the architecture has
  24. * enabled the config switch. Use the general affinity mask instead.
  25. */
  26. if (cpumask_empty(m))
  27. m = irq_data_get_affinity_mask(d);
  28. /*
  29. * Sanity check. If the mask is not empty when excluding the outgoing
  30. * CPU then it must contain at least one online CPU. The outgoing CPU
  31. * has been removed from the online mask already.
  32. */
  33. if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
  34. cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
  35. /*
  36. * If this happens then there was a missed IRQ fixup at some
  37. * point. Warn about it and enforce fixup.
  38. */
  39. pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
  40. cpumask_pr_args(m), d->irq, cpu);
  41. return true;
  42. }
  43. #endif
  44. return cpumask_test_cpu(cpu, m);
  45. }
  46. static bool migrate_one_irq(struct irq_desc *desc)
  47. {
  48. struct irq_data *d = irq_desc_get_irq_data(desc);
  49. struct irq_chip *chip = irq_data_get_irq_chip(d);
  50. bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
  51. const struct cpumask *affinity;
  52. bool brokeaff = false;
  53. int err;
  54. /*
  55. * IRQ chip might be already torn down, but the irq descriptor is
  56. * still in the radix tree. Also if the chip has no affinity setter,
  57. * nothing can be done here.
  58. */
  59. if (!chip || !chip->irq_set_affinity) {
  60. pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
  61. return false;
  62. }
  63. /*
  64. * No move required, if:
  65. * - Interrupt is per cpu
  66. * - Interrupt is not started
  67. * - Affinity mask does not include this CPU.
  68. *
  69. * Note: Do not check desc->action as this might be a chained
  70. * interrupt.
  71. */
  72. if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
  73. /*
  74. * If an irq move is pending, abort it if the dying CPU is
  75. * the sole target.
  76. */
  77. irq_fixup_move_pending(desc, false);
  78. return false;
  79. }
  80. /*
  81. * Complete an eventually pending irq move cleanup. If this
  82. * interrupt was moved in hard irq context, then the vectors need
  83. * to be cleaned up. It can't wait until this interrupt actually
  84. * happens and this CPU was involved.
  85. */
  86. irq_force_complete_move(desc);
  87. /*
  88. * If there is a setaffinity pending, then try to reuse the pending
  89. * mask, so the last change of the affinity does not get lost. If
  90. * there is no move pending or the pending mask does not contain
  91. * any online CPU, use the current affinity mask.
  92. */
  93. if (irq_fixup_move_pending(desc, true))
  94. affinity = irq_desc_get_pending_mask(desc);
  95. else
  96. affinity = irq_data_get_affinity_mask(d);
  97. /* Mask the chip for interrupts which cannot move in process context */
  98. if (maskchip && chip->irq_mask)
  99. chip->irq_mask(d);
  100. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  101. /*
  102. * If the interrupt is managed, then shut it down and leave
  103. * the affinity untouched.
  104. */
  105. if (irqd_affinity_is_managed(d)) {
  106. irqd_set_managed_shutdown(d);
  107. irq_shutdown(desc);
  108. return false;
  109. }
  110. affinity = cpu_online_mask;
  111. brokeaff = true;
  112. }
  113. /*
  114. * Do not set the force argument of irq_do_set_affinity() as this
  115. * disables the masking of offline CPUs from the supplied affinity
  116. * mask and therefore might keep/reassign the irq to the outgoing
  117. * CPU.
  118. */
  119. err = irq_do_set_affinity(d, affinity, false);
  120. if (err) {
  121. pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
  122. d->irq, err);
  123. brokeaff = false;
  124. }
  125. if (maskchip && chip->irq_unmask)
  126. chip->irq_unmask(d);
  127. return brokeaff;
  128. }
  129. /**
  130. * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
  131. *
  132. * The current CPU has been marked offline. Migrate IRQs off this CPU.
  133. * If the affinity settings do not allow other CPUs, force them onto any
  134. * available CPU.
  135. *
  136. * Note: we must iterate over all IRQs, whether they have an attached
  137. * action structure or not, as we need to get chained interrupts too.
  138. */
  139. void irq_migrate_all_off_this_cpu(void)
  140. {
  141. struct irq_desc *desc;
  142. unsigned int irq;
  143. for_each_active_irq(irq) {
  144. bool affinity_broken;
  145. desc = irq_to_desc(irq);
  146. raw_spin_lock(&desc->lock);
  147. affinity_broken = migrate_one_irq(desc);
  148. raw_spin_unlock(&desc->lock);
  149. if (affinity_broken) {
  150. pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
  151. irq, smp_processor_id());
  152. }
  153. }
  154. }
  155. static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
  156. {
  157. struct irq_data *data = irq_desc_get_irq_data(desc);
  158. const struct cpumask *affinity = irq_data_get_affinity_mask(data);
  159. if (!irqd_affinity_is_managed(data) || !desc->action ||
  160. !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
  161. return;
  162. if (irqd_is_managed_and_shutdown(data)) {
  163. irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
  164. return;
  165. }
  166. /*
  167. * If the interrupt can only be directed to a single target
  168. * CPU then it is already assigned to a CPU in the affinity
  169. * mask. No point in trying to move it around.
  170. */
  171. if (!irqd_is_single_target(data))
  172. irq_set_affinity_locked(data, affinity, false);
  173. }
  174. /**
  175. * irq_affinity_online_cpu - Restore affinity for managed interrupts
  176. * @cpu: Upcoming CPU for which interrupts should be restored
  177. */
  178. int irq_affinity_online_cpu(unsigned int cpu)
  179. {
  180. struct irq_desc *desc;
  181. unsigned int irq;
  182. irq_lock_sparse();
  183. for_each_active_irq(irq) {
  184. desc = irq_to_desc(irq);
  185. raw_spin_lock_irq(&desc->lock);
  186. irq_restore_affinity_of_irq(desc, cpu);
  187. raw_spin_unlock_irq(&desc->lock);
  188. }
  189. irq_unlock_sparse();
  190. return 0;
  191. }