migration.c 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. #include <linux/irq.h>
  2. #include <linux/interrupt.h>
  3. #include "internals.h"
  4. void irq_move_masked_irq(struct irq_data *idata)
  5. {
  6. struct irq_desc *desc = irq_data_to_desc(idata);
  7. struct irq_chip *chip = desc->irq_data.chip;
  8. if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
  9. return;
  10. irqd_clr_move_pending(&desc->irq_data);
  11. /*
  12. * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
  13. */
  14. if (irqd_is_per_cpu(&desc->irq_data)) {
  15. WARN_ON(1);
  16. return;
  17. }
  18. if (unlikely(cpumask_empty(desc->pending_mask)))
  19. return;
  20. if (!chip->irq_set_affinity)
  21. return;
  22. assert_raw_spin_locked(&desc->lock);
  23. /*
  24. * If there was a valid mask to work with, please
  25. * do the disable, re-program, enable sequence.
  26. * This is *not* particularly important for level triggered
  27. * but in a edge trigger case, we might be setting rte
  28. * when an active trigger is coming in. This could
  29. * cause some ioapics to mal-function.
  30. * Being paranoid i guess!
  31. *
  32. * For correct operation this depends on the caller
  33. * masking the irqs.
  34. */
  35. if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
  36. irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
  37. cpumask_clear(desc->pending_mask);
  38. }
  39. void irq_move_irq(struct irq_data *idata)
  40. {
  41. bool masked;
  42. /*
  43. * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
  44. * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
  45. * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
  46. */
  47. idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
  48. if (likely(!irqd_is_setaffinity_pending(idata)))
  49. return;
  50. if (unlikely(irqd_irq_disabled(idata)))
  51. return;
  52. /*
  53. * Be careful vs. already masked interrupts. If this is a
  54. * threaded interrupt with ONESHOT set, we can end up with an
  55. * interrupt storm.
  56. */
  57. masked = irqd_irq_masked(idata);
  58. if (!masked)
  59. idata->chip->irq_mask(idata);
  60. irq_move_masked_irq(idata);
  61. if (!masked)
  62. idata->chip->irq_unmask(idata);
  63. }