core_t2.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. /*
  2. * linux/arch/alpha/kernel/core_t2.c
  3. *
  4. * Written by Jay A Estabrook (jestabro@amt.tay1.dec.com).
  5. * December 1996.
  6. *
  7. * based on CIA code by David A Rusling (david.rusling@reo.mts.dec.com)
  8. *
  9. * Code common to all T2 core logic chips.
  10. */
  11. #define __EXTERN_INLINE
  12. #include <asm/io.h>
  13. #include <asm/core_t2.h>
  14. #undef __EXTERN_INLINE
  15. #include <linux/types.h>
  16. #include <linux/pci.h>
  17. #include <linux/sched.h>
  18. #include <linux/init.h>
  19. #include <asm/ptrace.h>
  20. #include <asm/delay.h>
  21. #include "proto.h"
  22. #include "pci_impl.h"
  23. /* For dumping initial DMA window settings. */
  24. #define DEBUG_PRINT_INITIAL_SETTINGS 0
  25. /* For dumping final DMA window settings. */
  26. #define DEBUG_PRINT_FINAL_SETTINGS 0
  27. /*
  28. * By default, we direct-map starting at 2GB, in order to allow the
  29. * maximum size direct-map window (2GB) to match the maximum amount of
  30. * memory (2GB) that can be present on SABLEs. But that limits the
  31. * floppy to DMA only via the scatter/gather window set up for 8MB
  32. * ISA DMA, since the maximum ISA DMA address is 2GB-1.
  33. *
  34. * For now, this seems a reasonable trade-off: even though most SABLEs
  35. * have less than 1GB of memory, floppy usage/performance will not
  36. * really be affected by forcing it to go via scatter/gather...
  37. */
  38. #define T2_DIRECTMAP_2G 1
  39. #if T2_DIRECTMAP_2G
  40. # define T2_DIRECTMAP_START 0x80000000UL
  41. # define T2_DIRECTMAP_LENGTH 0x80000000UL
  42. #else
  43. # define T2_DIRECTMAP_START 0x40000000UL
  44. # define T2_DIRECTMAP_LENGTH 0x40000000UL
  45. #endif
  46. /* The ISA scatter/gather window settings. */
  47. #define T2_ISA_SG_START 0x00800000UL
  48. #define T2_ISA_SG_LENGTH 0x00800000UL
  49. /*
  50. * NOTE: Herein lie back-to-back mb instructions. They are magic.
  51. * One plausible explanation is that the i/o controller does not properly
  52. * handle the system transaction. Another involves timing. Ho hum.
  53. */
  54. /*
  55. * BIOS32-style PCI interface:
  56. */
  57. #define DEBUG_CONFIG 0
  58. #if DEBUG_CONFIG
  59. # define DBG(args) printk args
  60. #else
  61. # define DBG(args)
  62. #endif
  63. static volatile unsigned int t2_mcheck_any_expected;
  64. static volatile unsigned int t2_mcheck_last_taken;
  65. /* Place to save the DMA Window registers as set up by SRM
  66. for restoration during shutdown. */
  67. static struct
  68. {
  69. struct {
  70. unsigned long wbase;
  71. unsigned long wmask;
  72. unsigned long tbase;
  73. } window[2];
  74. unsigned long hae_1;
  75. unsigned long hae_2;
  76. unsigned long hae_3;
  77. unsigned long hae_4;
  78. unsigned long hbase;
  79. } t2_saved_config __attribute((common));
  80. /*
  81. * Given a bus, device, and function number, compute resulting
  82. * configuration space address and setup the T2_HAXR2 register
  83. * accordingly. It is therefore not safe to have concurrent
  84. * invocations to configuration space access routines, but there
  85. * really shouldn't be any need for this.
  86. *
  87. * Type 0:
  88. *
  89. * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
  90. * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
  91. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  92. * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
  93. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  94. *
  95. * 31:11 Device select bit.
  96. * 10:8 Function number
  97. * 7:2 Register number
  98. *
  99. * Type 1:
  100. *
  101. * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
  102. * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
  103. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  104. * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
  105. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  106. *
  107. * 31:24 reserved
  108. * 23:16 bus number (8 bits = 128 possible buses)
  109. * 15:11 Device number (5 bits)
  110. * 10:8 function number
  111. * 7:2 register number
  112. *
  113. * Notes:
  114. * The function number selects which function of a multi-function device
  115. * (e.g., SCSI and Ethernet).
  116. *
  117. * The register selects a DWORD (32 bit) register offset. Hence it
  118. * doesn't get shifted by 2 bits as we want to "drop" the bottom two
  119. * bits.
  120. */
  121. static int
  122. mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
  123. unsigned long *pci_addr, unsigned char *type1)
  124. {
  125. unsigned long addr;
  126. u8 bus = pbus->number;
  127. DBG(("mk_conf_addr(bus=%d, dfn=0x%x, where=0x%x,"
  128. " addr=0x%lx, type1=0x%x)\n",
  129. bus, device_fn, where, pci_addr, type1));
  130. if (bus == 0) {
  131. int device = device_fn >> 3;
  132. /* Type 0 configuration cycle. */
  133. if (device > 8) {
  134. DBG(("mk_conf_addr: device (%d)>20, returning -1\n",
  135. device));
  136. return -1;
  137. }
  138. *type1 = 0;
  139. addr = (0x0800L << device) | ((device_fn & 7) << 8) | (where);
  140. } else {
  141. /* Type 1 configuration cycle. */
  142. *type1 = 1;
  143. addr = (bus << 16) | (device_fn << 8) | (where);
  144. }
  145. *pci_addr = addr;
  146. DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
  147. return 0;
  148. }
  149. /*
  150. * NOTE: both conf_read() and conf_write() may set HAE_3 when needing
  151. * to do type1 access. This is protected by the use of spinlock IRQ
  152. * primitives in the wrapper functions pci_{read,write}_config_*()
  153. * defined in drivers/pci/pci.c.
  154. */
  155. static unsigned int
  156. conf_read(unsigned long addr, unsigned char type1)
  157. {
  158. unsigned int value, cpu, taken;
  159. unsigned long t2_cfg = 0;
  160. cpu = smp_processor_id();
  161. DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
  162. /* If Type1 access, must set T2 CFG. */
  163. if (type1) {
  164. t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
  165. *(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg;
  166. mb();
  167. }
  168. mb();
  169. draina();
  170. mcheck_expected(cpu) = 1;
  171. mcheck_taken(cpu) = 0;
  172. t2_mcheck_any_expected |= (1 << cpu);
  173. mb();
  174. /* Access configuration space. */
  175. value = *(vuip)addr;
  176. mb();
  177. mb(); /* magic */
  178. /* Wait for possible mcheck. Also, this lets other CPUs clear
  179. their mchecks as well, as they can reliably tell when
  180. another CPU is in the midst of handling a real mcheck via
  181. the "taken" function. */
  182. udelay(100);
  183. if ((taken = mcheck_taken(cpu))) {
  184. mcheck_taken(cpu) = 0;
  185. t2_mcheck_last_taken |= (1 << cpu);
  186. value = 0xffffffffU;
  187. mb();
  188. }
  189. mcheck_expected(cpu) = 0;
  190. t2_mcheck_any_expected = 0;
  191. mb();
  192. /* If Type1 access, must reset T2 CFG so normal IO space ops work. */
  193. if (type1) {
  194. *(vulp)T2_HAE_3 = t2_cfg;
  195. mb();
  196. }
  197. return value;
  198. }
  199. static void
  200. conf_write(unsigned long addr, unsigned int value, unsigned char type1)
  201. {
  202. unsigned int cpu, taken;
  203. unsigned long t2_cfg = 0;
  204. cpu = smp_processor_id();
  205. /* If Type1 access, must set T2 CFG. */
  206. if (type1) {
  207. t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
  208. *(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL;
  209. mb();
  210. }
  211. mb();
  212. draina();
  213. mcheck_expected(cpu) = 1;
  214. mcheck_taken(cpu) = 0;
  215. t2_mcheck_any_expected |= (1 << cpu);
  216. mb();
  217. /* Access configuration space. */
  218. *(vuip)addr = value;
  219. mb();
  220. mb(); /* magic */
  221. /* Wait for possible mcheck. Also, this lets other CPUs clear
  222. their mchecks as well, as they can reliably tell when
  223. this CPU is in the midst of handling a real mcheck via
  224. the "taken" function. */
  225. udelay(100);
  226. if ((taken = mcheck_taken(cpu))) {
  227. mcheck_taken(cpu) = 0;
  228. t2_mcheck_last_taken |= (1 << cpu);
  229. mb();
  230. }
  231. mcheck_expected(cpu) = 0;
  232. t2_mcheck_any_expected = 0;
  233. mb();
  234. /* If Type1 access, must reset T2 CFG so normal IO space ops work. */
  235. if (type1) {
  236. *(vulp)T2_HAE_3 = t2_cfg;
  237. mb();
  238. }
  239. }
  240. static int
  241. t2_read_config(struct pci_bus *bus, unsigned int devfn, int where,
  242. int size, u32 *value)
  243. {
  244. unsigned long addr, pci_addr;
  245. unsigned char type1;
  246. int shift;
  247. long mask;
  248. if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
  249. return PCIBIOS_DEVICE_NOT_FOUND;
  250. mask = (size - 1) * 8;
  251. shift = (where & 3) * 8;
  252. addr = (pci_addr << 5) + mask + T2_CONF;
  253. *value = conf_read(addr, type1) >> (shift);
  254. return PCIBIOS_SUCCESSFUL;
  255. }
  256. static int
  257. t2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
  258. u32 value)
  259. {
  260. unsigned long addr, pci_addr;
  261. unsigned char type1;
  262. long mask;
  263. if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
  264. return PCIBIOS_DEVICE_NOT_FOUND;
  265. mask = (size - 1) * 8;
  266. addr = (pci_addr << 5) + mask + T2_CONF;
  267. conf_write(addr, value << ((where & 3) * 8), type1);
  268. return PCIBIOS_SUCCESSFUL;
  269. }
  270. struct pci_ops t2_pci_ops =
  271. {
  272. .read = t2_read_config,
  273. .write = t2_write_config,
  274. };
  275. static void __init
  276. t2_direct_map_window1(unsigned long base, unsigned long length)
  277. {
  278. unsigned long temp;
  279. __direct_map_base = base;
  280. __direct_map_size = length;
  281. temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
  282. *(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */
  283. temp = (length - 1) & 0xfff00000UL;
  284. *(vulp)T2_WMASK1 = temp;
  285. *(vulp)T2_TBASE1 = 0;
  286. #if DEBUG_PRINT_FINAL_SETTINGS
  287. printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
  288. __func__, *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
  289. #endif
  290. }
  291. static void __init
  292. t2_sg_map_window2(struct pci_controller *hose,
  293. unsigned long base,
  294. unsigned long length)
  295. {
  296. unsigned long temp;
  297. /* Note we can only do 1 SG window, as the other is for direct, so
  298. do an ISA SG area, especially for the floppy. */
  299. hose->sg_isa = iommu_arena_new(hose, base, length, 0);
  300. hose->sg_pci = NULL;
  301. temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
  302. *(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */
  303. temp = (length - 1) & 0xfff00000UL;
  304. *(vulp)T2_WMASK2 = temp;
  305. *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
  306. mb();
  307. t2_pci_tbi(hose, 0, -1); /* flush TLB all */
  308. #if DEBUG_PRINT_FINAL_SETTINGS
  309. printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
  310. __func__, *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
  311. #endif
  312. }
  313. static void __init
  314. t2_save_configuration(void)
  315. {
  316. #if DEBUG_PRINT_INITIAL_SETTINGS
  317. printk("%s: HAE_1 was 0x%lx\n", __func__, srm_hae); /* HW is 0 */
  318. printk("%s: HAE_2 was 0x%lx\n", __func__, *(vulp)T2_HAE_2);
  319. printk("%s: HAE_3 was 0x%lx\n", __func__, *(vulp)T2_HAE_3);
  320. printk("%s: HAE_4 was 0x%lx\n", __func__, *(vulp)T2_HAE_4);
  321. printk("%s: HBASE was 0x%lx\n", __func__, *(vulp)T2_HBASE);
  322. printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __func__,
  323. *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
  324. printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __func__,
  325. *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
  326. #endif
  327. /*
  328. * Save the DMA Window registers.
  329. */
  330. t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1;
  331. t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1;
  332. t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1;
  333. t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2;
  334. t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2;
  335. t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2;
  336. t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */
  337. t2_saved_config.hae_2 = *(vulp)T2_HAE_2;
  338. t2_saved_config.hae_3 = *(vulp)T2_HAE_3;
  339. t2_saved_config.hae_4 = *(vulp)T2_HAE_4;
  340. t2_saved_config.hbase = *(vulp)T2_HBASE;
  341. }
  342. void __init
  343. t2_init_arch(void)
  344. {
  345. struct pci_controller *hose;
  346. struct resource *hae_mem;
  347. unsigned long temp;
  348. unsigned int i;
  349. for (i = 0; i < NR_CPUS; i++) {
  350. mcheck_expected(i) = 0;
  351. mcheck_taken(i) = 0;
  352. }
  353. t2_mcheck_any_expected = 0;
  354. t2_mcheck_last_taken = 0;
  355. /* Enable scatter/gather TLB use. */
  356. temp = *(vulp)T2_IOCSR;
  357. if (!(temp & (0x1UL << 26))) {
  358. printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n",
  359. temp);
  360. *(vulp)T2_IOCSR = temp | (0x1UL << 26);
  361. mb();
  362. *(vulp)T2_IOCSR; /* read it back to make sure */
  363. }
  364. t2_save_configuration();
  365. /*
  366. * Create our single hose.
  367. */
  368. pci_isa_hose = hose = alloc_pci_controller();
  369. hose->io_space = &ioport_resource;
  370. hae_mem = alloc_resource();
  371. hae_mem->start = 0;
  372. hae_mem->end = T2_MEM_R1_MASK;
  373. hae_mem->name = pci_hae0_name;
  374. if (request_resource(&iomem_resource, hae_mem) < 0)
  375. printk(KERN_ERR "Failed to request HAE_MEM\n");
  376. hose->mem_space = hae_mem;
  377. hose->index = 0;
  378. hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
  379. hose->dense_mem_base = T2_DENSE_MEM - IDENT_ADDR;
  380. hose->sparse_io_base = T2_IO - IDENT_ADDR;
  381. hose->dense_io_base = 0;
  382. /*
  383. * Set up the PCI->physical memory translation windows.
  384. *
  385. * Window 1 is direct mapped.
  386. * Window 2 is scatter/gather (for ISA).
  387. */
  388. t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH);
  389. /* Always make an ISA DMA window. */
  390. t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH);
  391. *(vulp)T2_HBASE = 0x0; /* Disable HOLES. */
  392. /* Zero HAE. */
  393. *(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */
  394. *(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */
  395. *(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */
  396. /*
  397. * We also now zero out HAE_4, the dense memory HAE, so that
  398. * we need not account for its "offset" when accessing dense
  399. * memory resources which we allocated in our normal way. This
  400. * HAE would need to stay untouched were we to keep the SRM
  401. * resource settings.
  402. *
  403. * Thus we can now run standard X servers on SABLE/LYNX. :-)
  404. */
  405. *(vulp)T2_HAE_4 = 0; mb();
  406. }
  407. void
  408. t2_kill_arch(int mode)
  409. {
  410. /*
  411. * Restore the DMA Window registers.
  412. */
  413. *(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase;
  414. *(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask;
  415. *(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase;
  416. *(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase;
  417. *(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask;
  418. *(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase;
  419. mb();
  420. *(vulp)T2_HAE_1 = srm_hae;
  421. *(vulp)T2_HAE_2 = t2_saved_config.hae_2;
  422. *(vulp)T2_HAE_3 = t2_saved_config.hae_3;
  423. *(vulp)T2_HAE_4 = t2_saved_config.hae_4;
  424. *(vulp)T2_HBASE = t2_saved_config.hbase;
  425. mb();
  426. *(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */
  427. }
  428. void
  429. t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
  430. {
  431. unsigned long t2_iocsr;
  432. t2_iocsr = *(vulp)T2_IOCSR;
  433. /* set the TLB Clear bit */
  434. *(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 28);
  435. mb();
  436. *(vulp)T2_IOCSR; /* read it back to make sure */
  437. /* clear the TLB Clear bit */
  438. *(vulp)T2_IOCSR = t2_iocsr & ~(0x1UL << 28);
  439. mb();
  440. *(vulp)T2_IOCSR; /* read it back to make sure */
  441. }
  442. #define SIC_SEIC (1UL << 33) /* System Event Clear */
  443. static void
  444. t2_clear_errors(int cpu)
  445. {
  446. struct sable_cpu_csr *cpu_regs;
  447. cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
  448. cpu_regs->sic &= ~SIC_SEIC;
  449. /* Clear CPU errors. */
  450. cpu_regs->bcce |= cpu_regs->bcce;
  451. cpu_regs->cbe |= cpu_regs->cbe;
  452. cpu_regs->bcue |= cpu_regs->bcue;
  453. cpu_regs->dter |= cpu_regs->dter;
  454. *(vulp)T2_CERR1 |= *(vulp)T2_CERR1;
  455. *(vulp)T2_PERR1 |= *(vulp)T2_PERR1;
  456. mb();
  457. mb(); /* magic */
  458. }
  459. /*
  460. * SABLE seems to have a "broadcast" style machine check, in that all
  461. * CPUs receive it. And, the issuing CPU, in the case of PCI Config
  462. * space read/write faults, will also receive a second mcheck, upon
  463. * lowering IPL during completion processing in pci_read_config_byte()
  464. * et al.
  465. *
  466. * Hence all the taken/expected/any_expected/last_taken stuff...
  467. */
  468. void
  469. t2_machine_check(unsigned long vector, unsigned long la_ptr)
  470. {
  471. int cpu = smp_processor_id();
  472. #ifdef CONFIG_VERBOSE_MCHECK
  473. struct el_common *mchk_header = (struct el_common *)la_ptr;
  474. #endif
  475. /* Clear the error before any reporting. */
  476. mb();
  477. mb(); /* magic */
  478. draina();
  479. t2_clear_errors(cpu);
  480. /* This should not actually be done until the logout frame is
  481. examined, but, since we don't do that, go on and do this... */
  482. wrmces(0x7);
  483. mb();
  484. /* Now, do testing for the anomalous conditions. */
  485. if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
  486. /*
  487. * FUNKY: Received mcheck on a CPU and not
  488. * expecting it, but another CPU is expecting one.
  489. *
  490. * Just dismiss it for now on this CPU...
  491. */
  492. #ifdef CONFIG_VERBOSE_MCHECK
  493. if (alpha_verbose_mcheck > 1) {
  494. printk("t2_machine_check(cpu%d): any_expected 0x%x -"
  495. " (assumed) spurious -"
  496. " code 0x%x\n", cpu, t2_mcheck_any_expected,
  497. (unsigned int)mchk_header->code);
  498. }
  499. #endif
  500. return;
  501. }
  502. if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
  503. if (t2_mcheck_last_taken & (1 << cpu)) {
  504. #ifdef CONFIG_VERBOSE_MCHECK
  505. if (alpha_verbose_mcheck > 1) {
  506. printk("t2_machine_check(cpu%d): last_taken 0x%x - "
  507. "unexpected mcheck - code 0x%x\n",
  508. cpu, t2_mcheck_last_taken,
  509. (unsigned int)mchk_header->code);
  510. }
  511. #endif
  512. t2_mcheck_last_taken = 0;
  513. mb();
  514. return;
  515. } else {
  516. t2_mcheck_last_taken = 0;
  517. mb();
  518. }
  519. }
  520. #ifdef CONFIG_VERBOSE_MCHECK
  521. if (alpha_verbose_mcheck > 1) {
  522. printk("%s t2_mcheck(cpu%d): last_taken 0x%x - "
  523. "any_expected 0x%x - code 0x%x\n",
  524. (mcheck_expected(cpu) ? "EX" : "UN"), cpu,
  525. t2_mcheck_last_taken, t2_mcheck_any_expected,
  526. (unsigned int)mchk_header->code);
  527. }
  528. #endif
  529. process_mcheck_info(vector, la_ptr, "T2", mcheck_expected(cpu));
  530. }