mce-inject.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /*
  2. * Machine check injection support.
  3. * Copyright 2008 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; version 2
  8. * of the License.
  9. *
  10. * Authors:
  11. * Andi Kleen
  12. * Ying Huang
  13. */
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/timer.h>
  17. #include <linux/kernel.h>
  18. #include <linux/string.h>
  19. #include <linux/fs.h>
  20. #include <linux/preempt.h>
  21. #include <linux/smp.h>
  22. #include <linux/notifier.h>
  23. #include <linux/kdebug.h>
  24. #include <linux/cpu.h>
  25. #include <linux/sched.h>
  26. #include <linux/gfp.h>
  27. #include <asm/mce.h>
  28. #include <asm/apic.h>
  29. #include <asm/nmi.h>
  30. /* Update fake mce registers on current CPU. */
  31. static void inject_mce(struct mce *m)
  32. {
  33. struct mce *i = &per_cpu(injectm, m->extcpu);
  34. /* Make sure no one reads partially written injectm */
  35. i->finished = 0;
  36. mb();
  37. m->finished = 0;
  38. /* First set the fields after finished */
  39. i->extcpu = m->extcpu;
  40. mb();
  41. /* Now write record in order, finished last (except above) */
  42. memcpy(i, m, sizeof(struct mce));
  43. /* Finally activate it */
  44. mb();
  45. i->finished = 1;
  46. }
  47. static void raise_poll(struct mce *m)
  48. {
  49. unsigned long flags;
  50. mce_banks_t b;
  51. memset(&b, 0xff, sizeof(mce_banks_t));
  52. local_irq_save(flags);
  53. machine_check_poll(0, &b);
  54. local_irq_restore(flags);
  55. m->finished = 0;
  56. }
  57. static void raise_exception(struct mce *m, struct pt_regs *pregs)
  58. {
  59. struct pt_regs regs;
  60. unsigned long flags;
  61. if (!pregs) {
  62. memset(&regs, 0, sizeof(struct pt_regs));
  63. regs.ip = m->ip;
  64. regs.cs = m->cs;
  65. pregs = &regs;
  66. }
  67. /* in mcheck exeception handler, irq will be disabled */
  68. local_irq_save(flags);
  69. do_machine_check(pregs, 0);
  70. local_irq_restore(flags);
  71. m->finished = 0;
  72. }
  73. static cpumask_var_t mce_inject_cpumask;
  74. static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
  75. {
  76. int cpu = smp_processor_id();
  77. struct mce *m = &__get_cpu_var(injectm);
  78. if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
  79. return NMI_DONE;
  80. cpumask_clear_cpu(cpu, mce_inject_cpumask);
  81. if (m->inject_flags & MCJ_EXCEPTION)
  82. raise_exception(m, regs);
  83. else if (m->status)
  84. raise_poll(m);
  85. return NMI_HANDLED;
  86. }
  87. static void mce_irq_ipi(void *info)
  88. {
  89. int cpu = smp_processor_id();
  90. struct mce *m = &__get_cpu_var(injectm);
  91. if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
  92. m->inject_flags & MCJ_EXCEPTION) {
  93. cpumask_clear_cpu(cpu, mce_inject_cpumask);
  94. raise_exception(m, NULL);
  95. }
  96. }
  97. /* Inject mce on current CPU */
  98. static int raise_local(void)
  99. {
  100. struct mce *m = &__get_cpu_var(injectm);
  101. int context = MCJ_CTX(m->inject_flags);
  102. int ret = 0;
  103. int cpu = m->extcpu;
  104. if (m->inject_flags & MCJ_EXCEPTION) {
  105. printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu);
  106. switch (context) {
  107. case MCJ_CTX_IRQ:
  108. /*
  109. * Could do more to fake interrupts like
  110. * calling irq_enter, but the necessary
  111. * machinery isn't exported currently.
  112. */
  113. /*FALL THROUGH*/
  114. case MCJ_CTX_PROCESS:
  115. raise_exception(m, NULL);
  116. break;
  117. default:
  118. printk(KERN_INFO "Invalid MCE context\n");
  119. ret = -EINVAL;
  120. }
  121. printk(KERN_INFO "MCE exception done on CPU %d\n", cpu);
  122. } else if (m->status) {
  123. printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu);
  124. raise_poll(m);
  125. mce_notify_irq();
  126. printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu);
  127. } else
  128. m->finished = 0;
  129. return ret;
  130. }
  131. static void raise_mce(struct mce *m)
  132. {
  133. int context = MCJ_CTX(m->inject_flags);
  134. inject_mce(m);
  135. if (context == MCJ_CTX_RANDOM)
  136. return;
  137. #ifdef CONFIG_X86_LOCAL_APIC
  138. if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) {
  139. unsigned long start;
  140. int cpu;
  141. get_online_cpus();
  142. cpumask_copy(mce_inject_cpumask, cpu_online_mask);
  143. cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
  144. for_each_online_cpu(cpu) {
  145. struct mce *mcpu = &per_cpu(injectm, cpu);
  146. if (!mcpu->finished ||
  147. MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
  148. cpumask_clear_cpu(cpu, mce_inject_cpumask);
  149. }
  150. if (!cpumask_empty(mce_inject_cpumask)) {
  151. if (m->inject_flags & MCJ_IRQ_BRAODCAST) {
  152. /*
  153. * don't wait because mce_irq_ipi is necessary
  154. * to be sync with following raise_local
  155. */
  156. preempt_disable();
  157. smp_call_function_many(mce_inject_cpumask,
  158. mce_irq_ipi, NULL, 0);
  159. preempt_enable();
  160. } else if (m->inject_flags & MCJ_NMI_BROADCAST)
  161. apic->send_IPI_mask(mce_inject_cpumask,
  162. NMI_VECTOR);
  163. }
  164. start = jiffies;
  165. while (!cpumask_empty(mce_inject_cpumask)) {
  166. if (!time_before(jiffies, start + 2*HZ)) {
  167. printk(KERN_ERR
  168. "Timeout waiting for mce inject %lx\n",
  169. *cpumask_bits(mce_inject_cpumask));
  170. break;
  171. }
  172. cpu_relax();
  173. }
  174. raise_local();
  175. put_cpu();
  176. put_online_cpus();
  177. } else
  178. #endif
  179. raise_local();
  180. }
  181. /* Error injection interface */
  182. static ssize_t mce_write(struct file *filp, const char __user *ubuf,
  183. size_t usize, loff_t *off)
  184. {
  185. struct mce m;
  186. if (!capable(CAP_SYS_ADMIN))
  187. return -EPERM;
  188. /*
  189. * There are some cases where real MSR reads could slip
  190. * through.
  191. */
  192. if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA))
  193. return -EIO;
  194. if ((unsigned long)usize > sizeof(struct mce))
  195. usize = sizeof(struct mce);
  196. if (copy_from_user(&m, ubuf, usize))
  197. return -EFAULT;
  198. if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
  199. return -EINVAL;
  200. /*
  201. * Need to give user space some time to set everything up,
  202. * so do it a jiffie or two later everywhere.
  203. */
  204. schedule_timeout(2);
  205. raise_mce(&m);
  206. return usize;
  207. }
  208. static int inject_init(void)
  209. {
  210. if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
  211. return -ENOMEM;
  212. printk(KERN_INFO "Machine check injector initialized\n");
  213. register_mce_write_callback(mce_write);
  214. register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
  215. "mce_notify");
  216. return 0;
  217. }
  218. module_init(inject_init);
  219. /*
  220. * Cannot tolerate unloading currently because we cannot
  221. * guarantee all openers of mce_chrdev will get a reference to us.
  222. */
  223. MODULE_LICENSE("GPL");