core_wildfire.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /*
  2. * linux/arch/alpha/kernel/core_wildfire.c
  3. *
  4. * Wildfire support.
  5. *
  6. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  7. */
  8. #define __EXTERN_INLINE inline
  9. #include <asm/io.h>
  10. #include <asm/core_wildfire.h>
  11. #undef __EXTERN_INLINE
  12. #include <linux/types.h>
  13. #include <linux/pci.h>
  14. #include <linux/sched.h>
  15. #include <linux/init.h>
  16. #include <asm/ptrace.h>
  17. #include <asm/smp.h>
  18. #include "proto.h"
  19. #include "pci_impl.h"
  20. #define DEBUG_CONFIG 0
  21. #define DEBUG_DUMP_REGS 0
  22. #define DEBUG_DUMP_CONFIG 1
  23. #if DEBUG_CONFIG
  24. # define DBG_CFG(args) printk args
  25. #else
  26. # define DBG_CFG(args)
  27. #endif
  28. #if DEBUG_DUMP_REGS
  29. static void wildfire_dump_pci_regs(int qbbno, int hoseno);
  30. static void wildfire_dump_pca_regs(int qbbno, int pcano);
  31. static void wildfire_dump_qsa_regs(int qbbno);
  32. static void wildfire_dump_qsd_regs(int qbbno);
  33. static void wildfire_dump_iop_regs(int qbbno);
  34. static void wildfire_dump_gp_regs(int qbbno);
  35. #endif
  36. #if DEBUG_DUMP_CONFIG
  37. static void wildfire_dump_hardware_config(void);
  38. #endif
  39. unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
  40. unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
  41. #define QBB_MAP_EMPTY 0xff
  42. unsigned long wildfire_hard_qbb_mask;
  43. unsigned long wildfire_soft_qbb_mask;
  44. unsigned long wildfire_gp_mask;
  45. unsigned long wildfire_hs_mask;
  46. unsigned long wildfire_iop_mask;
  47. unsigned long wildfire_ior_mask;
  48. unsigned long wildfire_pca_mask;
  49. unsigned long wildfire_cpu_mask;
  50. unsigned long wildfire_mem_mask;
  51. void __init
  52. wildfire_init_hose(int qbbno, int hoseno)
  53. {
  54. struct pci_controller *hose;
  55. wildfire_pci *pci;
  56. hose = alloc_pci_controller();
  57. hose->io_space = alloc_resource();
  58. hose->mem_space = alloc_resource();
  59. /* This is for userland consumption. */
  60. hose->sparse_mem_base = 0;
  61. hose->sparse_io_base = 0;
  62. hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno);
  63. hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno);
  64. hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
  65. hose->index = (qbbno << 3) + hoseno;
  66. hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
  67. hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
  68. hose->io_space->name = pci_io_names[hoseno];
  69. hose->io_space->flags = IORESOURCE_IO;
  70. hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
  71. hose->mem_space->end = hose->mem_space->start + 0xffffffff;
  72. hose->mem_space->name = pci_mem_names[hoseno];
  73. hose->mem_space->flags = IORESOURCE_MEM;
  74. if (request_resource(&ioport_resource, hose->io_space) < 0)
  75. printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
  76. qbbno, hoseno);
  77. if (request_resource(&iomem_resource, hose->mem_space) < 0)
  78. printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
  79. qbbno, hoseno);
  80. #if DEBUG_DUMP_REGS
  81. wildfire_dump_pci_regs(qbbno, hoseno);
  82. #endif
  83. /*
  84. * Set up the PCI to main memory translation windows.
  85. *
  86. * Note: Window 3 is scatter-gather only
  87. *
  88. * Window 0 is scatter-gather 8MB at 8MB (for isa)
  89. * Window 1 is direct access 1GB at 1GB
  90. * Window 2 is direct access 1GB at 2GB
  91. * Window 3 is scatter-gather 128MB at 3GB
  92. * ??? We ought to scale window 3 memory.
  93. *
  94. */
  95. hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
  96. hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
  97. pci = WILDFIRE_pci(qbbno, hoseno);
  98. pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
  99. pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
  100. pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
  101. pci->pci_window[1].wbase.csr = 0x40000000 | 1;
  102. pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
  103. pci->pci_window[1].tbase.csr = 0;
  104. pci->pci_window[2].wbase.csr = 0x80000000 | 1;
  105. pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
  106. pci->pci_window[2].tbase.csr = 0x40000000;
  107. pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
  108. pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
  109. pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
  110. wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
  111. }
  112. void __init
  113. wildfire_init_pca(int qbbno, int pcano)
  114. {
  115. /* Test for PCA existence first. */
  116. if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
  117. return;
  118. #if DEBUG_DUMP_REGS
  119. wildfire_dump_pca_regs(qbbno, pcano);
  120. #endif
  121. /* Do both hoses of the PCA. */
  122. wildfire_init_hose(qbbno, (pcano << 1) + 0);
  123. wildfire_init_hose(qbbno, (pcano << 1) + 1);
  124. }
  125. void __init
  126. wildfire_init_qbb(int qbbno)
  127. {
  128. int pcano;
  129. /* Test for QBB existence first. */
  130. if (!WILDFIRE_QBB_EXISTS(qbbno))
  131. return;
  132. #if DEBUG_DUMP_REGS
  133. wildfire_dump_qsa_regs(qbbno);
  134. wildfire_dump_qsd_regs(qbbno);
  135. wildfire_dump_iop_regs(qbbno);
  136. wildfire_dump_gp_regs(qbbno);
  137. #endif
  138. /* Init all PCAs here. */
  139. for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
  140. wildfire_init_pca(qbbno, pcano);
  141. }
  142. }
  143. void __init
  144. wildfire_hardware_probe(void)
  145. {
  146. unsigned long temp;
  147. unsigned int hard_qbb, soft_qbb;
  148. wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
  149. wildfire_qsd *qsd;
  150. wildfire_qsa *qsa;
  151. wildfire_iop *iop;
  152. wildfire_gp *gp;
  153. wildfire_ne *ne;
  154. wildfire_fe *fe;
  155. int i;
  156. temp = fast->qsd_whami.csr;
  157. #if 0
  158. printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
  159. #endif
  160. hard_qbb = (temp >> 8) & 7;
  161. soft_qbb = (temp >> 4) & 7;
  162. /* Init the HW configuration variables. */
  163. wildfire_hard_qbb_mask = (1 << hard_qbb);
  164. wildfire_soft_qbb_mask = (1 << soft_qbb);
  165. wildfire_gp_mask = 0;
  166. wildfire_hs_mask = 0;
  167. wildfire_iop_mask = 0;
  168. wildfire_ior_mask = 0;
  169. wildfire_pca_mask = 0;
  170. wildfire_cpu_mask = 0;
  171. wildfire_mem_mask = 0;
  172. memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
  173. memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
  174. /* First, determine which QBBs are present. */
  175. qsa = WILDFIRE_qsa(soft_qbb);
  176. temp = qsa->qsa_qbb_id.csr;
  177. #if 0
  178. printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
  179. #endif
  180. if (temp & 0x40) /* Is there an HS? */
  181. wildfire_hs_mask = 1;
  182. if (temp & 0x20) { /* Is there a GP? */
  183. gp = WILDFIRE_gp(soft_qbb);
  184. temp = 0;
  185. for (i = 0; i < 4; i++) {
  186. temp |= gp->gpa_qbb_map[i].csr << (i * 8);
  187. #if 0
  188. printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
  189. i, gp, temp);
  190. #endif
  191. }
  192. for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
  193. if (temp & 8) { /* Is there a QBB? */
  194. soft_qbb = temp & 7;
  195. wildfire_hard_qbb_mask |= (1 << hard_qbb);
  196. wildfire_soft_qbb_mask |= (1 << soft_qbb);
  197. }
  198. temp >>= 4;
  199. }
  200. wildfire_gp_mask = wildfire_soft_qbb_mask;
  201. }
  202. /* Next determine each QBBs resources. */
  203. for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
  204. if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
  205. qsd = WILDFIRE_qsd(soft_qbb);
  206. temp = qsd->qsd_whami.csr;
  207. #if 0
  208. printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
  209. #endif
  210. hard_qbb = (temp >> 8) & 7;
  211. wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
  212. wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
  213. qsa = WILDFIRE_qsa(soft_qbb);
  214. temp = qsa->qsa_qbb_pop[0].csr;
  215. #if 0
  216. printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
  217. #endif
  218. wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
  219. wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
  220. temp = qsa->qsa_qbb_pop[1].csr;
  221. #if 0
  222. printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
  223. #endif
  224. wildfire_iop_mask |= (1 << soft_qbb);
  225. wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
  226. temp = qsa->qsa_qbb_id.csr;
  227. #if 0
  228. printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
  229. #endif
  230. if (temp & 0x20)
  231. wildfire_gp_mask |= (1 << soft_qbb);
  232. /* Probe for PCA existence here. */
  233. for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
  234. iop = WILDFIRE_iop(soft_qbb);
  235. ne = WILDFIRE_ne(soft_qbb, i);
  236. fe = WILDFIRE_fe(soft_qbb, i);
  237. if ((iop->iop_hose[i].init.csr & 1) == 1 &&
  238. ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) &&
  239. ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL))
  240. {
  241. wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
  242. }
  243. }
  244. }
  245. }
  246. #if DEBUG_DUMP_CONFIG
  247. wildfire_dump_hardware_config();
  248. #endif
  249. }
  250. void __init
  251. wildfire_init_arch(void)
  252. {
  253. int qbbno;
  254. /* With multiple PCI buses, we play with I/O as physical addrs. */
  255. ioport_resource.end = ~0UL;
  256. /* Probe the hardware for info about configuration. */
  257. wildfire_hardware_probe();
  258. /* Now init all the found QBBs. */
  259. for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
  260. wildfire_init_qbb(qbbno);
  261. }
  262. /* Normal direct PCI DMA mapping. */
  263. __direct_map_base = 0x40000000UL;
  264. __direct_map_size = 0x80000000UL;
  265. }
  266. void
  267. wildfire_machine_check(unsigned long vector, unsigned long la_ptr,
  268. struct pt_regs * regs)
  269. {
  270. mb();
  271. mb(); /* magic */
  272. draina();
  273. /* FIXME: clear pci errors */
  274. wrmces(0x7);
  275. mb();
  276. process_mcheck_info(vector, la_ptr, regs, "WILDFIRE",
  277. mcheck_expected(smp_processor_id()));
  278. }
  279. void
  280. wildfire_kill_arch(int mode)
  281. {
  282. }
  283. void
  284. wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
  285. {
  286. int qbbno = hose->index >> 3;
  287. int hoseno = hose->index & 7;
  288. wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
  289. mb();
  290. pci->pci_flush_tlb.csr; /* reading does the trick */
  291. }
  292. static int
  293. mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
  294. unsigned long *pci_addr, unsigned char *type1)
  295. {
  296. struct pci_controller *hose = pbus->sysdata;
  297. unsigned long addr;
  298. u8 bus = pbus->number;
  299. DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
  300. "pci_addr=0x%p, type1=0x%p)\n",
  301. bus, device_fn, where, pci_addr, type1));
  302. if (!pbus->parent) /* No parent means peer PCI bus. */
  303. bus = 0;
  304. *type1 = (bus != 0);
  305. addr = (bus << 16) | (device_fn << 8) | where;
  306. addr |= hose->config_space_base;
  307. *pci_addr = addr;
  308. DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
  309. return 0;
  310. }
  311. static int
  312. wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where,
  313. int size, u32 *value)
  314. {
  315. unsigned long addr;
  316. unsigned char type1;
  317. if (mk_conf_addr(bus, devfn, where, &addr, &type1))
  318. return PCIBIOS_DEVICE_NOT_FOUND;
  319. switch (size) {
  320. case 1:
  321. *value = __kernel_ldbu(*(vucp)addr);
  322. break;
  323. case 2:
  324. *value = __kernel_ldwu(*(vusp)addr);
  325. break;
  326. case 4:
  327. *value = *(vuip)addr;
  328. break;
  329. }
  330. return PCIBIOS_SUCCESSFUL;
  331. }
  332. static int
  333. wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
  334. int size, u32 value)
  335. {
  336. unsigned long addr;
  337. unsigned char type1;
  338. if (mk_conf_addr(bus, devfn, where, &addr, &type1))
  339. return PCIBIOS_DEVICE_NOT_FOUND;
  340. switch (size) {
  341. case 1:
  342. __kernel_stb(value, *(vucp)addr);
  343. mb();
  344. __kernel_ldbu(*(vucp)addr);
  345. break;
  346. case 2:
  347. __kernel_stw(value, *(vusp)addr);
  348. mb();
  349. __kernel_ldwu(*(vusp)addr);
  350. break;
  351. case 4:
  352. *(vuip)addr = value;
  353. mb();
  354. *(vuip)addr;
  355. break;
  356. }
  357. return PCIBIOS_SUCCESSFUL;
  358. }
  359. struct pci_ops wildfire_pci_ops =
  360. {
  361. .read = wildfire_read_config,
  362. .write = wildfire_write_config,
  363. };
  364. /*
  365. * NUMA Support
  366. */
  367. int wildfire_pa_to_nid(unsigned long pa)
  368. {
  369. return pa >> 36;
  370. }
  371. int wildfire_cpuid_to_nid(int cpuid)
  372. {
  373. /* assume 4 CPUs per node */
  374. return cpuid >> 2;
  375. }
  376. unsigned long wildfire_node_mem_start(int nid)
  377. {
  378. /* 64GB per node */
  379. return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
  380. }
  381. unsigned long wildfire_node_mem_size(int nid)
  382. {
  383. /* 64GB per node */
  384. return 64UL * 1024 * 1024 * 1024;
  385. }
  386. #if DEBUG_DUMP_REGS
  387. static void __init
  388. wildfire_dump_pci_regs(int qbbno, int hoseno)
  389. {
  390. wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
  391. int i;
  392. printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
  393. qbbno, hoseno, pci);
  394. printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
  395. pci->pci_io_addr_ext.csr);
  396. printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr);
  397. printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr);
  398. printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr);
  399. printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr);
  400. printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr);
  401. printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr);
  402. printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
  403. qbbno, hoseno, pci);
  404. for (i = 0; i < 4; i++) {
  405. printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
  406. pci->pci_window[i].wbase.csr,
  407. pci->pci_window[i].wmask.csr,
  408. pci->pci_window[i].tbase.csr);
  409. }
  410. printk(KERN_ERR "\n");
  411. }
  412. static void __init
  413. wildfire_dump_pca_regs(int qbbno, int pcano)
  414. {
  415. wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
  416. int i;
  417. printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
  418. qbbno, pcano, pca);
  419. printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
  420. printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr);
  421. printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr);
  422. printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr);
  423. printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n",
  424. pca->pca_stdio_edge_level.csr);
  425. printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
  426. qbbno, pcano, pca);
  427. for (i = 0; i < 4; i++) {
  428. printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i,
  429. pca->pca_int[i].target.csr,
  430. pca->pca_int[i].enable.csr);
  431. }
  432. printk(KERN_ERR "\n");
  433. }
  434. static void __init
  435. wildfire_dump_qsa_regs(int qbbno)
  436. {
  437. wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
  438. int i;
  439. printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
  440. printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr);
  441. printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr);
  442. printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr);
  443. for (i = 0; i < 5; i++)
  444. printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n",
  445. i, qsa->qsa_config[i].csr);
  446. for (i = 0; i < 2; i++)
  447. printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n",
  448. i, qsa->qsa_qbb_pop[0].csr);
  449. printk(KERN_ERR "\n");
  450. }
  451. static void __init
  452. wildfire_dump_qsd_regs(int qbbno)
  453. {
  454. wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
  455. printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
  456. printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr);
  457. printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr);
  458. printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n",
  459. qsd->qsd_port_present.csr);
  460. printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n",
  461. qsd->qsd_port_active.csr);
  462. printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n",
  463. qsd->qsd_fault_ena.csr);
  464. printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n",
  465. qsd->qsd_cpu_int_ena.csr);
  466. printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n",
  467. qsd->qsd_mem_config.csr);
  468. printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n",
  469. qsd->qsd_err_sum.csr);
  470. printk(KERN_ERR "\n");
  471. }
  472. static void __init
  473. wildfire_dump_iop_regs(int qbbno)
  474. {
  475. wildfire_iop *iop = WILDFIRE_iop(qbbno);
  476. int i;
  477. printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
  478. printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr);
  479. printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr);
  480. printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n",
  481. iop->iop_switch_credits.csr);
  482. printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n",
  483. iop->iop_hose_credits.csr);
  484. for (i = 0; i < 4; i++)
  485. printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n",
  486. i, iop->iop_hose[i].init.csr);
  487. for (i = 0; i < 4; i++)
  488. printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
  489. i, iop->iop_dev_int[i].target.csr);
  490. printk(KERN_ERR "\n");
  491. }
  492. static void __init
  493. wildfire_dump_gp_regs(int qbbno)
  494. {
  495. wildfire_gp *gp = WILDFIRE_gp(qbbno);
  496. int i;
  497. printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
  498. for (i = 0; i < 4; i++)
  499. printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n",
  500. i, gp->gpa_qbb_map[i].csr);
  501. printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n",
  502. gp->gpa_mem_pop_map.csr);
  503. printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr);
  504. printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr);
  505. printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr);
  506. printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr);
  507. printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr);
  508. printk(KERN_ERR "\n");
  509. }
  510. #endif /* DUMP_REGS */
  511. #if DEBUG_DUMP_CONFIG
  512. static void __init
  513. wildfire_dump_hardware_config(void)
  514. {
  515. int i;
  516. printk(KERN_ERR "Probed Hardware Configuration\n");
  517. printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask);
  518. printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask);
  519. printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask);
  520. printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask);
  521. printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask);
  522. printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask);
  523. printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask);
  524. printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask);
  525. printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask);
  526. printk(" hard_qbb_map: ");
  527. for (i = 0; i < WILDFIRE_MAX_QBB; i++)
  528. if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
  529. printk("--- ");
  530. else
  531. printk("%3d ", wildfire_hard_qbb_map[i]);
  532. printk("\n");
  533. printk(" soft_qbb_map: ");
  534. for (i = 0; i < WILDFIRE_MAX_QBB; i++)
  535. if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
  536. printk("--- ");
  537. else
  538. printk("%3d ", wildfire_soft_qbb_map[i]);
  539. printk("\n");
  540. }
  541. #endif /* DUMP_CONFIG */