ip27-nmi.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. #include <linux/kernel.h>
  2. #include <linux/mmzone.h>
  3. #include <linux/nodemask.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/smp.h>
  6. #include <asm/atomic.h>
  7. #include <asm/sn/types.h>
  8. #include <asm/sn/addrs.h>
  9. #include <asm/sn/nmi.h>
  10. #include <asm/sn/arch.h>
  11. #include <asm/sn/sn0/hub.h>
  12. #if 0
  13. #define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
  14. #else
  15. #define NODE_NUM_CPUS(n) CPUS_PER_NODE
  16. #endif
  17. #define CNODEID_NONE (cnodeid_t)-1
  18. typedef unsigned long machreg_t;
  19. static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  20. /*
  21. * Lets see what else we need to do here. Set up sp, gp?
  22. */
  23. void nmi_dump(void)
  24. {
  25. void cont_nmi_dump(void);
  26. cont_nmi_dump();
  27. }
  28. void install_cpu_nmi_handler(int slice)
  29. {
  30. nmi_t *nmi_addr;
  31. nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
  32. if (nmi_addr->call_addr)
  33. return;
  34. nmi_addr->magic = NMI_MAGIC;
  35. nmi_addr->call_addr = (void *)nmi_dump;
  36. nmi_addr->call_addr_c =
  37. (void *)(~((unsigned long)(nmi_addr->call_addr)));
  38. nmi_addr->call_parm = 0;
  39. }
  40. /*
  41. * Copy the cpu registers which have been saved in the IP27prom format
  42. * into the eframe format for the node under consideration.
  43. */
  44. void nmi_cpu_eframe_save(nasid_t nasid, int slice)
  45. {
  46. struct reg_struct *nr;
  47. int i;
  48. /* Get the pointer to the current cpu's register set. */
  49. nr = (struct reg_struct *)
  50. (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
  51. slice * IP27_NMI_KREGS_CPU_SIZE);
  52. printk("NMI nasid %d: slice %d\n", nasid, slice);
  53. /*
  54. * Saved main processor registers
  55. */
  56. for (i = 0; i < 32; ) {
  57. if ((i % 4) == 0)
  58. printk("$%2d :", i);
  59. printk(" %016lx", nr->gpr[i]);
  60. i++;
  61. if ((i % 4) == 0)
  62. printk("\n");
  63. }
  64. printk("Hi : (value lost)\n");
  65. printk("Lo : (value lost)\n");
  66. /*
  67. * Saved cp0 registers
  68. */
  69. printk("epc : %016lx %pS\n", nr->epc, (void *) nr->epc);
  70. printk("%s\n", print_tainted());
  71. printk("ErrEPC: %016lx %pS\n", nr->error_epc, (void *) nr->error_epc);
  72. printk("ra : %016lx %pS\n", nr->gpr[31], (void *) nr->gpr[31]);
  73. printk("Status: %08lx ", nr->sr);
  74. if (nr->sr & ST0_KX)
  75. printk("KX ");
  76. if (nr->sr & ST0_SX)
  77. printk("SX ");
  78. if (nr->sr & ST0_UX)
  79. printk("UX ");
  80. switch (nr->sr & ST0_KSU) {
  81. case KSU_USER:
  82. printk("USER ");
  83. break;
  84. case KSU_SUPERVISOR:
  85. printk("SUPERVISOR ");
  86. break;
  87. case KSU_KERNEL:
  88. printk("KERNEL ");
  89. break;
  90. default:
  91. printk("BAD_MODE ");
  92. break;
  93. }
  94. if (nr->sr & ST0_ERL)
  95. printk("ERL ");
  96. if (nr->sr & ST0_EXL)
  97. printk("EXL ");
  98. if (nr->sr & ST0_IE)
  99. printk("IE ");
  100. printk("\n");
  101. printk("Cause : %08lx\n", nr->cause);
  102. printk("PrId : %08x\n", read_c0_prid());
  103. printk("BadVA : %016lx\n", nr->badva);
  104. printk("CErr : %016lx\n", nr->cache_err);
  105. printk("NMI_SR: %016lx\n", nr->nmi_sr);
  106. printk("\n");
  107. }
  108. void nmi_dump_hub_irq(nasid_t nasid, int slice)
  109. {
  110. hubreg_t mask0, mask1, pend0, pend1;
  111. if (slice == 0) { /* Slice A */
  112. mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
  113. mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
  114. } else { /* Slice B */
  115. mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
  116. mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
  117. }
  118. pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
  119. pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
  120. printk("PI_INT_MASK0: %16Lx PI_INT_MASK1: %16Lx\n", mask0, mask1);
  121. printk("PI_INT_PEND0: %16Lx PI_INT_PEND1: %16Lx\n", pend0, pend1);
  122. printk("\n\n");
  123. }
  124. /*
  125. * Copy the cpu registers which have been saved in the IP27prom format
  126. * into the eframe format for the node under consideration.
  127. */
  128. void nmi_node_eframe_save(cnodeid_t cnode)
  129. {
  130. nasid_t nasid;
  131. int slice;
  132. /* Make sure that we have a valid node */
  133. if (cnode == CNODEID_NONE)
  134. return;
  135. nasid = COMPACT_TO_NASID_NODEID(cnode);
  136. if (nasid == INVALID_NASID)
  137. return;
  138. /* Save the registers into eframe for each cpu */
  139. for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
  140. nmi_cpu_eframe_save(nasid, slice);
  141. nmi_dump_hub_irq(nasid, slice);
  142. }
  143. }
  144. /*
  145. * Save the nmi cpu registers for all cpus in the system.
  146. */
  147. void
  148. nmi_eframes_save(void)
  149. {
  150. cnodeid_t cnode;
  151. for_each_online_node(cnode)
  152. nmi_node_eframe_save(cnode);
  153. }
  154. void
  155. cont_nmi_dump(void)
  156. {
  157. #ifndef REAL_NMI_SIGNAL
  158. static atomic_t nmied_cpus = ATOMIC_INIT(0);
  159. atomic_inc(&nmied_cpus);
  160. #endif
  161. /*
  162. * Only allow 1 cpu to proceed
  163. */
  164. arch_spin_lock(&nmi_lock);
  165. #ifdef REAL_NMI_SIGNAL
  166. /*
  167. * Wait up to 15 seconds for the other cpus to respond to the NMI.
  168. * If a cpu has not responded after 10 sec, send it 1 additional NMI.
  169. * This is for 2 reasons:
  170. * - sometimes a MMSC fail to NMI all cpus.
  171. * - on 512p SN0 system, the MMSC will only send NMIs to
  172. * half the cpus. Unfortunately, we don't know which cpus may be
  173. * NMIed - it depends on how the site chooses to configure.
  174. *
  175. * Note: it has been measure that it takes the MMSC up to 2.3 secs to
  176. * send NMIs to all cpus on a 256p system.
  177. */
  178. for (i=0; i < 1500; i++) {
  179. for_each_online_node(node)
  180. if (NODEPDA(node)->dump_count == 0)
  181. break;
  182. if (node == MAX_NUMNODES)
  183. break;
  184. if (i == 1000) {
  185. for_each_online_node(node)
  186. if (NODEPDA(node)->dump_count == 0) {
  187. cpu = cpumask_first(cpumask_of_node(node));
  188. for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
  189. CPUMASK_SETB(nmied_cpus, cpu);
  190. /*
  191. * cputonasid, cputoslice
  192. * needs kernel cpuid
  193. */
  194. SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
  195. }
  196. }
  197. }
  198. udelay(10000);
  199. }
  200. #else
  201. while (atomic_read(&nmied_cpus) != num_online_cpus());
  202. #endif
  203. /*
  204. * Save the nmi cpu registers for all cpu in the eframe format.
  205. */
  206. nmi_eframes_save();
  207. LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
  208. }