core_wildfire.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /*
  2. * linux/arch/alpha/kernel/core_wildfire.c
  3. *
  4. * Wildfire support.
  5. *
  6. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  7. */
  8. #define __EXTERN_INLINE inline
  9. #include <asm/io.h>
  10. #include <asm/core_wildfire.h>
  11. #undef __EXTERN_INLINE
  12. #include <linux/types.h>
  13. #include <linux/pci.h>
  14. #include <linux/sched.h>
  15. #include <linux/init.h>
  16. #include <asm/ptrace.h>
  17. #include <asm/smp.h>
  18. #include "proto.h"
  19. #include "pci_impl.h"
  20. #define DEBUG_CONFIG 0
  21. #define DEBUG_DUMP_REGS 0
  22. #define DEBUG_DUMP_CONFIG 1
  23. #if DEBUG_CONFIG
  24. # define DBG_CFG(args) printk args
  25. #else
  26. # define DBG_CFG(args)
  27. #endif
  28. #if DEBUG_DUMP_REGS
  29. static void wildfire_dump_pci_regs(int qbbno, int hoseno);
  30. static void wildfire_dump_pca_regs(int qbbno, int pcano);
  31. static void wildfire_dump_qsa_regs(int qbbno);
  32. static void wildfire_dump_qsd_regs(int qbbno);
  33. static void wildfire_dump_iop_regs(int qbbno);
  34. static void wildfire_dump_gp_regs(int qbbno);
  35. #endif
  36. #if DEBUG_DUMP_CONFIG
  37. static void wildfire_dump_hardware_config(void);
  38. #endif
  39. unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
  40. unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
  41. #define QBB_MAP_EMPTY 0xff
  42. unsigned long wildfire_hard_qbb_mask;
  43. unsigned long wildfire_soft_qbb_mask;
  44. unsigned long wildfire_gp_mask;
  45. unsigned long wildfire_hs_mask;
  46. unsigned long wildfire_iop_mask;
  47. unsigned long wildfire_ior_mask;
  48. unsigned long wildfire_pca_mask;
  49. unsigned long wildfire_cpu_mask;
  50. unsigned long wildfire_mem_mask;
  51. void __init
  52. wildfire_init_hose(int qbbno, int hoseno)
  53. {
  54. struct pci_controller *hose;
  55. wildfire_pci *pci;
  56. hose = alloc_pci_controller();
  57. hose->io_space = alloc_resource();
  58. hose->mem_space = alloc_resource();
  59. /* This is for userland consumption. */
  60. hose->sparse_mem_base = 0;
  61. hose->sparse_io_base = 0;
  62. hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno);
  63. hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno);
  64. hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
  65. hose->index = (qbbno << 3) + hoseno;
  66. hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
  67. hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
  68. hose->io_space->name = pci_io_names[hoseno];
  69. hose->io_space->flags = IORESOURCE_IO;
  70. hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
  71. hose->mem_space->end = hose->mem_space->start + 0xffffffff;
  72. hose->mem_space->name = pci_mem_names[hoseno];
  73. hose->mem_space->flags = IORESOURCE_MEM;
  74. if (request_resource(&ioport_resource, hose->io_space) < 0)
  75. printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
  76. qbbno, hoseno);
  77. if (request_resource(&iomem_resource, hose->mem_space) < 0)
  78. printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
  79. qbbno, hoseno);
  80. #if DEBUG_DUMP_REGS
  81. wildfire_dump_pci_regs(qbbno, hoseno);
  82. #endif
  83. /*
  84. * Set up the PCI to main memory translation windows.
  85. *
  86. * Note: Window 3 is scatter-gather only
  87. *
  88. * Window 0 is scatter-gather 8MB at 8MB (for isa)
  89. * Window 1 is direct access 1GB at 1GB
  90. * Window 2 is direct access 1GB at 2GB
  91. * Window 3 is scatter-gather 128MB at 3GB
  92. * ??? We ought to scale window 3 memory.
  93. *
  94. */
  95. hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
  96. hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
  97. pci = WILDFIRE_pci(qbbno, hoseno);
  98. pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
  99. pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
  100. pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
  101. pci->pci_window[1].wbase.csr = 0x40000000 | 1;
  102. pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
  103. pci->pci_window[1].tbase.csr = 0;
  104. pci->pci_window[2].wbase.csr = 0x80000000 | 1;
  105. pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
  106. pci->pci_window[2].tbase.csr = 0x40000000;
  107. pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
  108. pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
  109. pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
  110. wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
  111. }
  112. void __init
  113. wildfire_init_pca(int qbbno, int pcano)
  114. {
  115. /* Test for PCA existence first. */
  116. if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
  117. return;
  118. #if DEBUG_DUMP_REGS
  119. wildfire_dump_pca_regs(qbbno, pcano);
  120. #endif
  121. /* Do both hoses of the PCA. */
  122. wildfire_init_hose(qbbno, (pcano << 1) + 0);
  123. wildfire_init_hose(qbbno, (pcano << 1) + 1);
  124. }
  125. void __init
  126. wildfire_init_qbb(int qbbno)
  127. {
  128. int pcano;
  129. /* Test for QBB existence first. */
  130. if (!WILDFIRE_QBB_EXISTS(qbbno))
  131. return;
  132. #if DEBUG_DUMP_REGS
  133. wildfire_dump_qsa_regs(qbbno);
  134. wildfire_dump_qsd_regs(qbbno);
  135. wildfire_dump_iop_regs(qbbno);
  136. wildfire_dump_gp_regs(qbbno);
  137. #endif
  138. /* Init all PCAs here. */
  139. for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
  140. wildfire_init_pca(qbbno, pcano);
  141. }
  142. }
  143. void __init
  144. wildfire_hardware_probe(void)
  145. {
  146. unsigned long temp;
  147. unsigned int hard_qbb, soft_qbb;
  148. wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
  149. wildfire_qsd *qsd;
  150. wildfire_qsa *qsa;
  151. wildfire_iop *iop;
  152. wildfire_gp *gp;
  153. wildfire_ne *ne;
  154. wildfire_fe *fe;
  155. int i;
  156. temp = fast->qsd_whami.csr;
  157. #if 0
  158. printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
  159. #endif
  160. hard_qbb = (temp >> 8) & 7;
  161. soft_qbb = (temp >> 4) & 7;
  162. /* Init the HW configuration variables. */
  163. wildfire_hard_qbb_mask = (1 << hard_qbb);
  164. wildfire_soft_qbb_mask = (1 << soft_qbb);
  165. wildfire_gp_mask = 0;
  166. wildfire_hs_mask = 0;
  167. wildfire_iop_mask = 0;
  168. wildfire_ior_mask = 0;
  169. wildfire_pca_mask = 0;
  170. wildfire_cpu_mask = 0;
  171. wildfire_mem_mask = 0;
  172. memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
  173. memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
  174. /* First, determine which QBBs are present. */
  175. qsa = WILDFIRE_qsa(soft_qbb);
  176. temp = qsa->qsa_qbb_id.csr;
  177. #if 0
  178. printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
  179. #endif
  180. if (temp & 0x40) /* Is there an HS? */
  181. wildfire_hs_mask = 1;
  182. if (temp & 0x20) { /* Is there a GP? */
  183. gp = WILDFIRE_gp(soft_qbb);
  184. temp = 0;
  185. for (i = 0; i < 4; i++) {
  186. temp |= gp->gpa_qbb_map[i].csr << (i * 8);
  187. #if 0
  188. printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
  189. i, gp, temp);
  190. #endif
  191. }
  192. for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
  193. if (temp & 8) { /* Is there a QBB? */
  194. soft_qbb = temp & 7;
  195. wildfire_hard_qbb_mask |= (1 << hard_qbb);
  196. wildfire_soft_qbb_mask |= (1 << soft_qbb);
  197. }
  198. temp >>= 4;
  199. }
  200. wildfire_gp_mask = wildfire_soft_qbb_mask;
  201. }
  202. /* Next determine each QBBs resources. */
  203. for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
  204. if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
  205. qsd = WILDFIRE_qsd(soft_qbb);
  206. temp = qsd->qsd_whami.csr;
  207. #if 0
  208. printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
  209. #endif
  210. hard_qbb = (temp >> 8) & 7;
  211. wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
  212. wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
  213. qsa = WILDFIRE_qsa(soft_qbb);
  214. temp = qsa->qsa_qbb_pop[0].csr;
  215. #if 0
  216. printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
  217. #endif
  218. wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
  219. wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
  220. temp = qsa->qsa_qbb_pop[1].csr;
  221. #if 0
  222. printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
  223. #endif
  224. wildfire_iop_mask |= (1 << soft_qbb);
  225. wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
  226. temp = qsa->qsa_qbb_id.csr;
  227. #if 0
  228. printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
  229. #endif
  230. if (temp & 0x20)
  231. wildfire_gp_mask |= (1 << soft_qbb);
  232. /* Probe for PCA existence here. */
  233. for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
  234. iop = WILDFIRE_iop(soft_qbb);
  235. ne = WILDFIRE_ne(soft_qbb, i);
  236. fe = WILDFIRE_fe(soft_qbb, i);
  237. if ((iop->iop_hose[i].init.csr & 1) == 1 &&
  238. ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) &&
  239. ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL))
  240. {
  241. wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
  242. }
  243. }
  244. }
  245. }
  246. #if DEBUG_DUMP_CONFIG
  247. wildfire_dump_hardware_config();
  248. #endif
  249. }
  250. void __init
  251. wildfire_init_arch(void)
  252. {
  253. int qbbno;
  254. /* With multiple PCI buses, we play with I/O as physical addrs. */
  255. ioport_resource.end = ~0UL;
  256. /* Probe the hardware for info about configuration. */
  257. wildfire_hardware_probe();
  258. /* Now init all the found QBBs. */
  259. for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
  260. wildfire_init_qbb(qbbno);
  261. }
  262. /* Normal direct PCI DMA mapping. */
  263. __direct_map_base = 0x40000000UL;
  264. __direct_map_size = 0x80000000UL;
  265. }
  266. void
  267. wildfire_machine_check(unsigned long vector, unsigned long la_ptr)
  268. {
  269. mb();
  270. mb(); /* magic */
  271. draina();
  272. /* FIXME: clear pci errors */
  273. wrmces(0x7);
  274. mb();
  275. process_mcheck_info(vector, la_ptr, "WILDFIRE",
  276. mcheck_expected(smp_processor_id()));
  277. }
  278. void
  279. wildfire_kill_arch(int mode)
  280. {
  281. }
  282. void
  283. wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
  284. {
  285. int qbbno = hose->index >> 3;
  286. int hoseno = hose->index & 7;
  287. wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
  288. mb();
  289. pci->pci_flush_tlb.csr; /* reading does the trick */
  290. }
  291. static int
  292. mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
  293. unsigned long *pci_addr, unsigned char *type1)
  294. {
  295. struct pci_controller *hose = pbus->sysdata;
  296. unsigned long addr;
  297. u8 bus = pbus->number;
  298. DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
  299. "pci_addr=0x%p, type1=0x%p)\n",
  300. bus, device_fn, where, pci_addr, type1));
  301. if (!pbus->parent) /* No parent means peer PCI bus. */
  302. bus = 0;
  303. *type1 = (bus != 0);
  304. addr = (bus << 16) | (device_fn << 8) | where;
  305. addr |= hose->config_space_base;
  306. *pci_addr = addr;
  307. DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
  308. return 0;
  309. }
  310. static int
  311. wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where,
  312. int size, u32 *value)
  313. {
  314. unsigned long addr;
  315. unsigned char type1;
  316. if (mk_conf_addr(bus, devfn, where, &addr, &type1))
  317. return PCIBIOS_DEVICE_NOT_FOUND;
  318. switch (size) {
  319. case 1:
  320. *value = __kernel_ldbu(*(vucp)addr);
  321. break;
  322. case 2:
  323. *value = __kernel_ldwu(*(vusp)addr);
  324. break;
  325. case 4:
  326. *value = *(vuip)addr;
  327. break;
  328. }
  329. return PCIBIOS_SUCCESSFUL;
  330. }
  331. static int
  332. wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
  333. int size, u32 value)
  334. {
  335. unsigned long addr;
  336. unsigned char type1;
  337. if (mk_conf_addr(bus, devfn, where, &addr, &type1))
  338. return PCIBIOS_DEVICE_NOT_FOUND;
  339. switch (size) {
  340. case 1:
  341. __kernel_stb(value, *(vucp)addr);
  342. mb();
  343. __kernel_ldbu(*(vucp)addr);
  344. break;
  345. case 2:
  346. __kernel_stw(value, *(vusp)addr);
  347. mb();
  348. __kernel_ldwu(*(vusp)addr);
  349. break;
  350. case 4:
  351. *(vuip)addr = value;
  352. mb();
  353. *(vuip)addr;
  354. break;
  355. }
  356. return PCIBIOS_SUCCESSFUL;
  357. }
  358. struct pci_ops wildfire_pci_ops =
  359. {
  360. .read = wildfire_read_config,
  361. .write = wildfire_write_config,
  362. };
  363. /*
  364. * NUMA Support
  365. */
  366. int wildfire_pa_to_nid(unsigned long pa)
  367. {
  368. return pa >> 36;
  369. }
  370. int wildfire_cpuid_to_nid(int cpuid)
  371. {
  372. /* assume 4 CPUs per node */
  373. return cpuid >> 2;
  374. }
  375. unsigned long wildfire_node_mem_start(int nid)
  376. {
  377. /* 64GB per node */
  378. return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
  379. }
  380. unsigned long wildfire_node_mem_size(int nid)
  381. {
  382. /* 64GB per node */
  383. return 64UL * 1024 * 1024 * 1024;
  384. }
  385. #if DEBUG_DUMP_REGS
  386. static void __init
  387. wildfire_dump_pci_regs(int qbbno, int hoseno)
  388. {
  389. wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
  390. int i;
  391. printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
  392. qbbno, hoseno, pci);
  393. printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
  394. pci->pci_io_addr_ext.csr);
  395. printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr);
  396. printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr);
  397. printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr);
  398. printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr);
  399. printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr);
  400. printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr);
  401. printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
  402. qbbno, hoseno, pci);
  403. for (i = 0; i < 4; i++) {
  404. printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
  405. pci->pci_window[i].wbase.csr,
  406. pci->pci_window[i].wmask.csr,
  407. pci->pci_window[i].tbase.csr);
  408. }
  409. printk(KERN_ERR "\n");
  410. }
  411. static void __init
  412. wildfire_dump_pca_regs(int qbbno, int pcano)
  413. {
  414. wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
  415. int i;
  416. printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
  417. qbbno, pcano, pca);
  418. printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
  419. printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr);
  420. printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr);
  421. printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr);
  422. printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n",
  423. pca->pca_stdio_edge_level.csr);
  424. printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
  425. qbbno, pcano, pca);
  426. for (i = 0; i < 4; i++) {
  427. printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i,
  428. pca->pca_int[i].target.csr,
  429. pca->pca_int[i].enable.csr);
  430. }
  431. printk(KERN_ERR "\n");
  432. }
  433. static void __init
  434. wildfire_dump_qsa_regs(int qbbno)
  435. {
  436. wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
  437. int i;
  438. printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
  439. printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr);
  440. printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr);
  441. printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr);
  442. for (i = 0; i < 5; i++)
  443. printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n",
  444. i, qsa->qsa_config[i].csr);
  445. for (i = 0; i < 2; i++)
  446. printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n",
  447. i, qsa->qsa_qbb_pop[0].csr);
  448. printk(KERN_ERR "\n");
  449. }
  450. static void __init
  451. wildfire_dump_qsd_regs(int qbbno)
  452. {
  453. wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
  454. printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
  455. printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr);
  456. printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr);
  457. printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n",
  458. qsd->qsd_port_present.csr);
  459. printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n",
  460. qsd->qsd_port_active.csr);
  461. printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n",
  462. qsd->qsd_fault_ena.csr);
  463. printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n",
  464. qsd->qsd_cpu_int_ena.csr);
  465. printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n",
  466. qsd->qsd_mem_config.csr);
  467. printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n",
  468. qsd->qsd_err_sum.csr);
  469. printk(KERN_ERR "\n");
  470. }
  471. static void __init
  472. wildfire_dump_iop_regs(int qbbno)
  473. {
  474. wildfire_iop *iop = WILDFIRE_iop(qbbno);
  475. int i;
  476. printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
  477. printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr);
  478. printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr);
  479. printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n",
  480. iop->iop_switch_credits.csr);
  481. printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n",
  482. iop->iop_hose_credits.csr);
  483. for (i = 0; i < 4; i++)
  484. printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n",
  485. i, iop->iop_hose[i].init.csr);
  486. for (i = 0; i < 4; i++)
  487. printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
  488. i, iop->iop_dev_int[i].target.csr);
  489. printk(KERN_ERR "\n");
  490. }
  491. static void __init
  492. wildfire_dump_gp_regs(int qbbno)
  493. {
  494. wildfire_gp *gp = WILDFIRE_gp(qbbno);
  495. int i;
  496. printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
  497. for (i = 0; i < 4; i++)
  498. printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n",
  499. i, gp->gpa_qbb_map[i].csr);
  500. printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n",
  501. gp->gpa_mem_pop_map.csr);
  502. printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr);
  503. printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr);
  504. printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr);
  505. printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr);
  506. printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr);
  507. printk(KERN_ERR "\n");
  508. }
  509. #endif /* DUMP_REGS */
  510. #if DEBUG_DUMP_CONFIG
  511. static void __init
  512. wildfire_dump_hardware_config(void)
  513. {
  514. int i;
  515. printk(KERN_ERR "Probed Hardware Configuration\n");
  516. printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask);
  517. printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask);
  518. printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask);
  519. printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask);
  520. printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask);
  521. printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask);
  522. printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask);
  523. printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask);
  524. printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask);
  525. printk(" hard_qbb_map: ");
  526. for (i = 0; i < WILDFIRE_MAX_QBB; i++)
  527. if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
  528. printk("--- ");
  529. else
  530. printk("%3d ", wildfire_hard_qbb_map[i]);
  531. printk("\n");
  532. printk(" soft_qbb_map: ");
  533. for (i = 0; i < WILDFIRE_MAX_QBB; i++)
  534. if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
  535. printk("--- ");
  536. else
  537. printk("%3d ", wildfire_soft_qbb_map[i]);
  538. printk("\n");
  539. }
  540. #endif /* DUMP_CONFIG */