ipi.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. #ifndef _ASM_X86_IPI_H
  2. #define _ASM_X86_IPI_H
  3. #ifdef CONFIG_X86_LOCAL_APIC
  4. /*
  5. * Copyright 2004 James Cleverdon, IBM.
  6. * Subject to the GNU Public License, v.2
  7. *
  8. * Generic APIC InterProcessor Interrupt code.
  9. *
  10. * Moved to include file by James Cleverdon from
  11. * arch/x86-64/kernel/smp.c
  12. *
  13. * Copyrights from kernel/smp.c:
  14. *
  15. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  16. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  17. * (c) 2002,2003 Andi Kleen, SuSE Labs.
  18. * Subject to the GNU Public License, v.2
  19. */
  20. #include <asm/hw_irq.h>
  21. #include <asm/apic.h>
  22. #include <asm/smp.h>
  23. /*
  24. * the following functions deal with sending IPIs between CPUs.
  25. *
  26. * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  27. */
  28. static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
  29. unsigned int dest)
  30. {
  31. unsigned int icr = shortcut | dest;
  32. switch (vector) {
  33. default:
  34. icr |= APIC_DM_FIXED | vector;
  35. break;
  36. case NMI_VECTOR:
  37. icr |= APIC_DM_NMI;
  38. break;
  39. }
  40. return icr;
  41. }
  42. static inline int __prepare_ICR2(unsigned int mask)
  43. {
  44. return SET_APIC_DEST_FIELD(mask);
  45. }
  46. static inline void __xapic_wait_icr_idle(void)
  47. {
  48. while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
  49. cpu_relax();
  50. }
  51. static inline void
  52. __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
  53. {
  54. /*
  55. * Subtle. In the case of the 'never do double writes' workaround
  56. * we have to lock out interrupts to be safe. As we don't care
  57. * of the value read we use an atomic rmw access to avoid costly
  58. * cli/sti. Otherwise we use an even cheaper single atomic write
  59. * to the APIC.
  60. */
  61. unsigned int cfg;
  62. /*
  63. * Wait for idle.
  64. */
  65. __xapic_wait_icr_idle();
  66. /*
  67. * No need to touch the target chip field
  68. */
  69. cfg = __prepare_ICR(shortcut, vector, dest);
  70. /*
  71. * Send the IPI. The write to APIC_ICR fires this off.
  72. */
  73. native_apic_mem_write(APIC_ICR, cfg);
  74. }
  75. /*
  76. * This is used to send an IPI with no shorthand notation (the destination is
  77. * specified in bits 56 to 63 of the ICR).
  78. */
  79. static inline void
  80. __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
  81. {
  82. unsigned long cfg;
  83. /*
  84. * Wait for idle.
  85. */
  86. if (unlikely(vector == NMI_VECTOR))
  87. safe_apic_wait_icr_idle();
  88. else
  89. __xapic_wait_icr_idle();
  90. /*
  91. * prepare target chip field
  92. */
  93. cfg = __prepare_ICR2(mask);
  94. native_apic_mem_write(APIC_ICR2, cfg);
  95. /*
  96. * program the ICR
  97. */
  98. cfg = __prepare_ICR(0, vector, dest);
  99. /*
  100. * Send the IPI. The write to APIC_ICR fires this off.
  101. */
  102. native_apic_mem_write(APIC_ICR, cfg);
  103. }
  104. extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
  105. int vector);
  106. extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
  107. int vector);
  108. /* Avoid include hell */
  109. #define NMI_VECTOR 0x02
  110. extern int no_broadcast;
  111. static inline void __default_local_send_IPI_allbutself(int vector)
  112. {
  113. if (no_broadcast || vector == NMI_VECTOR)
  114. apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
  115. else
  116. __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
  117. }
  118. static inline void __default_local_send_IPI_all(int vector)
  119. {
  120. if (no_broadcast || vector == NMI_VECTOR)
  121. apic->send_IPI_mask(cpu_online_mask, vector);
  122. else
  123. __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
  124. }
  125. #ifdef CONFIG_X86_32
  126. extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
  127. int vector);
  128. extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
  129. int vector);
  130. extern void default_send_IPI_mask_logical(const struct cpumask *mask,
  131. int vector);
  132. extern void default_send_IPI_allbutself(int vector);
  133. extern void default_send_IPI_all(int vector);
  134. extern void default_send_IPI_self(int vector);
  135. #endif
  136. #endif
  137. #endif /* _ASM_X86_IPI_H */