ipi.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. #include <linux/cpumask.h>
  2. #include <linux/interrupt.h>
  3. #include <linux/mm.h>
  4. #include <linux/delay.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/kernel_stat.h>
  7. #include <linux/mc146818rtc.h>
  8. #include <linux/cache.h>
  9. #include <linux/cpu.h>
  10. #include <asm/smp.h>
  11. #include <asm/mtrr.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/apic.h>
  15. #include <asm/proto.h>
  16. #include <asm/ipi.h>
  17. void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
  18. {
  19. /*
  20. * Subtle. In the case of the 'never do double writes' workaround
  21. * we have to lock out interrupts to be safe. As we don't care
  22. * of the value read we use an atomic rmw access to avoid costly
  23. * cli/sti. Otherwise we use an even cheaper single atomic write
  24. * to the APIC.
  25. */
  26. unsigned int cfg;
  27. /*
  28. * Wait for idle.
  29. */
  30. __xapic_wait_icr_idle();
  31. /*
  32. * No need to touch the target chip field
  33. */
  34. cfg = __prepare_ICR(shortcut, vector, dest);
  35. /*
  36. * Send the IPI. The write to APIC_ICR fires this off.
  37. */
  38. native_apic_mem_write(APIC_ICR, cfg);
  39. }
  40. /*
  41. * This is used to send an IPI with no shorthand notation (the destination is
  42. * specified in bits 56 to 63 of the ICR).
  43. */
  44. void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
  45. {
  46. unsigned long cfg;
  47. /*
  48. * Wait for idle.
  49. */
  50. if (unlikely(vector == NMI_VECTOR))
  51. safe_apic_wait_icr_idle();
  52. else
  53. __xapic_wait_icr_idle();
  54. /*
  55. * prepare target chip field
  56. */
  57. cfg = __prepare_ICR2(mask);
  58. native_apic_mem_write(APIC_ICR2, cfg);
  59. /*
  60. * program the ICR
  61. */
  62. cfg = __prepare_ICR(0, vector, dest);
  63. /*
  64. * Send the IPI. The write to APIC_ICR fires this off.
  65. */
  66. native_apic_mem_write(APIC_ICR, cfg);
  67. }
  68. void default_send_IPI_single_phys(int cpu, int vector)
  69. {
  70. unsigned long flags;
  71. local_irq_save(flags);
  72. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
  73. vector, APIC_DEST_PHYSICAL);
  74. local_irq_restore(flags);
  75. }
  76. void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
  77. {
  78. unsigned long query_cpu;
  79. unsigned long flags;
  80. /*
  81. * Hack. The clustered APIC addressing mode doesn't allow us to send
  82. * to an arbitrary mask, so I do a unicast to each CPU instead.
  83. * - mbligh
  84. */
  85. local_irq_save(flags);
  86. for_each_cpu(query_cpu, mask) {
  87. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
  88. query_cpu), vector, APIC_DEST_PHYSICAL);
  89. }
  90. local_irq_restore(flags);
  91. }
  92. void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
  93. int vector)
  94. {
  95. unsigned int this_cpu = smp_processor_id();
  96. unsigned int query_cpu;
  97. unsigned long flags;
  98. /* See Hack comment above */
  99. local_irq_save(flags);
  100. for_each_cpu(query_cpu, mask) {
  101. if (query_cpu == this_cpu)
  102. continue;
  103. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
  104. query_cpu), vector, APIC_DEST_PHYSICAL);
  105. }
  106. local_irq_restore(flags);
  107. }
  108. /*
  109. * Helper function for APICs which insist on cpumasks
  110. */
  111. void default_send_IPI_single(int cpu, int vector)
  112. {
  113. apic->send_IPI_mask(cpumask_of(cpu), vector);
  114. }
  115. #ifdef CONFIG_X86_32
  116. void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
  117. int vector)
  118. {
  119. unsigned long flags;
  120. unsigned int query_cpu;
  121. /*
  122. * Hack. The clustered APIC addressing mode doesn't allow us to send
  123. * to an arbitrary mask, so I do a unicasts to each CPU instead. This
  124. * should be modified to do 1 message per cluster ID - mbligh
  125. */
  126. local_irq_save(flags);
  127. for_each_cpu(query_cpu, mask)
  128. __default_send_IPI_dest_field(
  129. early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  130. vector, apic->dest_logical);
  131. local_irq_restore(flags);
  132. }
  133. void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
  134. int vector)
  135. {
  136. unsigned long flags;
  137. unsigned int query_cpu;
  138. unsigned int this_cpu = smp_processor_id();
  139. /* See Hack comment above */
  140. local_irq_save(flags);
  141. for_each_cpu(query_cpu, mask) {
  142. if (query_cpu == this_cpu)
  143. continue;
  144. __default_send_IPI_dest_field(
  145. early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  146. vector, apic->dest_logical);
  147. }
  148. local_irq_restore(flags);
  149. }
  150. /*
  151. * This is only used on smaller machines.
  152. */
  153. void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
  154. {
  155. unsigned long mask = cpumask_bits(cpumask)[0];
  156. unsigned long flags;
  157. if (!mask)
  158. return;
  159. local_irq_save(flags);
  160. WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
  161. __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
  162. local_irq_restore(flags);
  163. }
  164. void default_send_IPI_allbutself(int vector)
  165. {
  166. /*
  167. * if there are no other CPUs in the system then we get an APIC send
  168. * error if we try to broadcast, thus avoid sending IPIs in this case.
  169. */
  170. if (!(num_online_cpus() > 1))
  171. return;
  172. __default_local_send_IPI_allbutself(vector);
  173. }
  174. void default_send_IPI_all(int vector)
  175. {
  176. __default_local_send_IPI_all(vector);
  177. }
  178. void default_send_IPI_self(int vector)
  179. {
  180. __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
  181. }
  182. /* must come after the send_IPI functions above for inlining */
  183. static int convert_apicid_to_cpu(int apic_id)
  184. {
  185. int i;
  186. for_each_possible_cpu(i) {
  187. if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
  188. return i;
  189. }
  190. return -1;
  191. }
  192. int safe_smp_processor_id(void)
  193. {
  194. int apicid, cpuid;
  195. if (!boot_cpu_has(X86_FEATURE_APIC))
  196. return 0;
  197. apicid = hard_smp_processor_id();
  198. if (apicid == BAD_APICID)
  199. return 0;
  200. cpuid = convert_apicid_to_cpu(apicid);
  201. return cpuid >= 0 ? cpuid : 0;
  202. }
  203. #endif