migration.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/irq.h>
  3. #include <linux/interrupt.h>
  4. #include "internals.h"
  5. /**
  6. * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
  7. * @desc: Interrupt descpriptor to clean up
  8. * @force_clear: If set clear the move pending bit unconditionally.
  9. * If not set, clear it only when the dying CPU is the
  10. * last one in the pending mask.
  11. *
  12. * Returns true if the pending bit was set and the pending mask contains an
  13. * online CPU other than the dying CPU.
  14. */
  15. bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
  16. {
  17. struct irq_data *data = irq_desc_get_irq_data(desc);
  18. if (!irqd_is_setaffinity_pending(data))
  19. return false;
  20. /*
  21. * The outgoing CPU might be the last online target in a pending
  22. * interrupt move. If that's the case clear the pending move bit.
  23. */
  24. if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
  25. irqd_clr_move_pending(data);
  26. return false;
  27. }
  28. if (force_clear)
  29. irqd_clr_move_pending(data);
  30. return true;
  31. }
  32. void irq_move_masked_irq(struct irq_data *idata)
  33. {
  34. struct irq_desc *desc = irq_data_to_desc(idata);
  35. struct irq_data *data = &desc->irq_data;
  36. struct irq_chip *chip = data->chip;
  37. if (likely(!irqd_is_setaffinity_pending(data)))
  38. return;
  39. irqd_clr_move_pending(data);
  40. /*
  41. * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
  42. */
  43. if (irqd_is_per_cpu(data)) {
  44. WARN_ON(1);
  45. return;
  46. }
  47. if (unlikely(cpumask_empty(desc->pending_mask)))
  48. return;
  49. if (!chip->irq_set_affinity)
  50. return;
  51. assert_raw_spin_locked(&desc->lock);
  52. /*
  53. * If there was a valid mask to work with, please
  54. * do the disable, re-program, enable sequence.
  55. * This is *not* particularly important for level triggered
  56. * but in a edge trigger case, we might be setting rte
  57. * when an active trigger is coming in. This could
  58. * cause some ioapics to mal-function.
  59. * Being paranoid i guess!
  60. *
  61. * For correct operation this depends on the caller
  62. * masking the irqs.
  63. */
  64. if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
  65. int ret;
  66. ret = irq_do_set_affinity(data, desc->pending_mask, false);
  67. /*
  68. * If the there is a cleanup pending in the underlying
  69. * vector management, reschedule the move for the next
  70. * interrupt. Leave desc->pending_mask intact.
  71. */
  72. if (ret == -EBUSY) {
  73. irqd_set_move_pending(data);
  74. return;
  75. }
  76. }
  77. cpumask_clear(desc->pending_mask);
  78. }
  79. void irq_move_irq(struct irq_data *idata)
  80. {
  81. bool masked;
  82. /*
  83. * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
  84. * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
  85. * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
  86. */
  87. idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
  88. if (likely(!irqd_is_setaffinity_pending(idata)))
  89. return;
  90. if (unlikely(irqd_irq_disabled(idata)))
  91. return;
  92. /*
  93. * Be careful vs. already masked interrupts. If this is a
  94. * threaded interrupt with ONESHOT set, we can end up with an
  95. * interrupt storm.
  96. */
  97. masked = irqd_irq_masked(idata);
  98. if (!masked)
  99. idata->chip->irq_mask(idata);
  100. irq_move_masked_irq(idata);
  101. if (!masked)
  102. idata->chip->irq_unmask(idata);
  103. }