msr-smp.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. #include <linux/module.h>
  2. #include <linux/preempt.h>
  3. #include <linux/smp.h>
  4. #include <asm/msr.h>
  5. static void __rdmsr_on_cpu(void *info)
  6. {
  7. struct msr_info *rv = info;
  8. struct msr *reg;
  9. int this_cpu = raw_smp_processor_id();
  10. if (rv->msrs)
  11. reg = per_cpu_ptr(rv->msrs, this_cpu);
  12. else
  13. reg = &rv->reg;
  14. rdmsr(rv->msr_no, reg->l, reg->h);
  15. }
  16. static void __wrmsr_on_cpu(void *info)
  17. {
  18. struct msr_info *rv = info;
  19. struct msr *reg;
  20. int this_cpu = raw_smp_processor_id();
  21. if (rv->msrs)
  22. reg = per_cpu_ptr(rv->msrs, this_cpu);
  23. else
  24. reg = &rv->reg;
  25. wrmsr(rv->msr_no, reg->l, reg->h);
  26. }
  27. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  28. {
  29. int err;
  30. struct msr_info rv;
  31. memset(&rv, 0, sizeof(rv));
  32. rv.msr_no = msr_no;
  33. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  34. *l = rv.reg.l;
  35. *h = rv.reg.h;
  36. return err;
  37. }
  38. EXPORT_SYMBOL(rdmsr_on_cpu);
  39. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  40. {
  41. int err;
  42. struct msr_info rv;
  43. memset(&rv, 0, sizeof(rv));
  44. rv.msr_no = msr_no;
  45. rv.reg.l = l;
  46. rv.reg.h = h;
  47. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  48. return err;
  49. }
  50. EXPORT_SYMBOL(wrmsr_on_cpu);
  51. static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
  52. struct msr *msrs,
  53. void (*msr_func) (void *info))
  54. {
  55. struct msr_info rv;
  56. int this_cpu;
  57. memset(&rv, 0, sizeof(rv));
  58. rv.msrs = msrs;
  59. rv.msr_no = msr_no;
  60. this_cpu = get_cpu();
  61. if (cpumask_test_cpu(this_cpu, mask))
  62. msr_func(&rv);
  63. smp_call_function_many(mask, msr_func, &rv, 1);
  64. put_cpu();
  65. }
  66. /* rdmsr on a bunch of CPUs
  67. *
  68. * @mask: which CPUs
  69. * @msr_no: which MSR
  70. * @msrs: array of MSR values
  71. *
  72. */
  73. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  74. {
  75. __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
  76. }
  77. EXPORT_SYMBOL(rdmsr_on_cpus);
  78. /*
  79. * wrmsr on a bunch of CPUs
  80. *
  81. * @mask: which CPUs
  82. * @msr_no: which MSR
  83. * @msrs: array of MSR values
  84. *
  85. */
  86. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  87. {
  88. __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
  89. }
  90. EXPORT_SYMBOL(wrmsr_on_cpus);
  91. /* These "safe" variants are slower and should be used when the target MSR
  92. may not actually exist. */
  93. static void __rdmsr_safe_on_cpu(void *info)
  94. {
  95. struct msr_info *rv = info;
  96. rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
  97. }
  98. static void __wrmsr_safe_on_cpu(void *info)
  99. {
  100. struct msr_info *rv = info;
  101. rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
  102. }
  103. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  104. {
  105. int err;
  106. struct msr_info rv;
  107. memset(&rv, 0, sizeof(rv));
  108. rv.msr_no = msr_no;
  109. err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
  110. *l = rv.reg.l;
  111. *h = rv.reg.h;
  112. return err ? err : rv.err;
  113. }
  114. EXPORT_SYMBOL(rdmsr_safe_on_cpu);
  115. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  116. {
  117. int err;
  118. struct msr_info rv;
  119. memset(&rv, 0, sizeof(rv));
  120. rv.msr_no = msr_no;
  121. rv.reg.l = l;
  122. rv.reg.h = h;
  123. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  124. return err ? err : rv.err;
  125. }
  126. EXPORT_SYMBOL(wrmsr_safe_on_cpu);
  127. /*
  128. * These variants are significantly slower, but allows control over
  129. * the entire 32-bit GPR set.
  130. */
  131. static void __rdmsr_safe_regs_on_cpu(void *info)
  132. {
  133. struct msr_regs_info *rv = info;
  134. rv->err = rdmsr_safe_regs(rv->regs);
  135. }
  136. static void __wrmsr_safe_regs_on_cpu(void *info)
  137. {
  138. struct msr_regs_info *rv = info;
  139. rv->err = wrmsr_safe_regs(rv->regs);
  140. }
  141. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
  142. {
  143. int err;
  144. struct msr_regs_info rv;
  145. rv.regs = regs;
  146. rv.err = -EIO;
  147. err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
  148. return err ? err : rv.err;
  149. }
  150. EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
  151. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
  152. {
  153. int err;
  154. struct msr_regs_info rv;
  155. rv.regs = regs;
  156. rv.err = -EIO;
  157. err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
  158. return err ? err : rv.err;
  159. }
  160. EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);