1/*
2 *  linux/arch/alpha/kernel/core_wildfire.c
3 *
4 *  Wildfire support.
5 *
6 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 */
8
9#define __EXTERN_INLINE inline
10#include <asm/io.h>
11#include <asm/core_wildfire.h>
12#undef __EXTERN_INLINE
13
14#include <linux/types.h>
15#include <linux/pci.h>
16#include <linux/sched.h>
17#include <linux/init.h>
18
19#include <asm/ptrace.h>
20#include <asm/smp.h>
21
22#include "proto.h"
23#include "pci_impl.h"
24
25#define DEBUG_CONFIG 0
26#define DEBUG_DUMP_REGS 0
27#define DEBUG_DUMP_CONFIG 1
28
29#if DEBUG_CONFIG
30# define DBG_CFG(args)	printk args
31#else
32# define DBG_CFG(args)
33#endif
34
35#if DEBUG_DUMP_REGS
36static void wildfire_dump_pci_regs(int qbbno, int hoseno);
37static void wildfire_dump_pca_regs(int qbbno, int pcano);
38static void wildfire_dump_qsa_regs(int qbbno);
39static void wildfire_dump_qsd_regs(int qbbno);
40static void wildfire_dump_iop_regs(int qbbno);
41static void wildfire_dump_gp_regs(int qbbno);
42#endif
43#if DEBUG_DUMP_CONFIG
44static void wildfire_dump_hardware_config(void);
45#endif
46
47unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
48unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
49#define QBB_MAP_EMPTY	0xff
50
51unsigned long wildfire_hard_qbb_mask;
52unsigned long wildfire_soft_qbb_mask;
53unsigned long wildfire_gp_mask;
54unsigned long wildfire_hs_mask;
55unsigned long wildfire_iop_mask;
56unsigned long wildfire_ior_mask;
57unsigned long wildfire_pca_mask;
58unsigned long wildfire_cpu_mask;
59unsigned long wildfire_mem_mask;
60
61void __init
62wildfire_init_hose(int qbbno, int hoseno)
63{
64	struct pci_controller *hose;
65	wildfire_pci *pci;
66
67	hose = alloc_pci_controller();
68	hose->io_space = alloc_resource();
69	hose->mem_space = alloc_resource();
70
71        /* This is for userland consumption. */
72        hose->sparse_mem_base = 0;
73        hose->sparse_io_base  = 0;
74        hose->dense_mem_base  = WILDFIRE_MEM(qbbno, hoseno);
75        hose->dense_io_base   = WILDFIRE_IO(qbbno, hoseno);
76
77	hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
78	hose->index = (qbbno << 3) + hoseno;
79
80	hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
81	hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
82	hose->io_space->name = pci_io_names[hoseno];
83	hose->io_space->flags = IORESOURCE_IO;
84
85	hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
86	hose->mem_space->end = hose->mem_space->start + 0xffffffff;
87	hose->mem_space->name = pci_mem_names[hoseno];
88	hose->mem_space->flags = IORESOURCE_MEM;
89
90	if (request_resource(&ioport_resource, hose->io_space) < 0)
91		printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
92		       qbbno, hoseno);
93	if (request_resource(&iomem_resource, hose->mem_space) < 0)
94		printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
95		       qbbno, hoseno);
96
97#if DEBUG_DUMP_REGS
98	wildfire_dump_pci_regs(qbbno, hoseno);
99#endif
100
101        /*
102         * Set up the PCI to main memory translation windows.
103         *
104         * Note: Window 3 is scatter-gather only
105         *
106         * Window 0 is scatter-gather 8MB at 8MB (for isa)
107	 * Window 1 is direct access 1GB at 1GB
108	 * Window 2 is direct access 1GB at 2GB
109         * Window 3 is scatter-gather 128MB at 3GB
110         * ??? We ought to scale window 3 memory.
111         *
112         */
113        hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
114        hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
115
116	pci = WILDFIRE_pci(qbbno, hoseno);
117
118	pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
119	pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
120	pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
121
122	pci->pci_window[1].wbase.csr = 0x40000000 | 1;
123	pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
124	pci->pci_window[1].tbase.csr = 0;
125
126	pci->pci_window[2].wbase.csr = 0x80000000 | 1;
127	pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
128	pci->pci_window[2].tbase.csr = 0x40000000;
129
130	pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
131	pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
132	pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
133
134	wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
135}
136
137void __init
138wildfire_init_pca(int qbbno, int pcano)
139{
140
141	/* Test for PCA existence first. */
142	if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
143	    return;
144
145#if DEBUG_DUMP_REGS
146	wildfire_dump_pca_regs(qbbno, pcano);
147#endif
148
149	/* Do both hoses of the PCA. */
150	wildfire_init_hose(qbbno, (pcano << 1) + 0);
151	wildfire_init_hose(qbbno, (pcano << 1) + 1);
152}
153
154void __init
155wildfire_init_qbb(int qbbno)
156{
157	int pcano;
158
159	/* Test for QBB existence first. */
160	if (!WILDFIRE_QBB_EXISTS(qbbno))
161		return;
162
163#if DEBUG_DUMP_REGS
164	wildfire_dump_qsa_regs(qbbno);
165	wildfire_dump_qsd_regs(qbbno);
166	wildfire_dump_iop_regs(qbbno);
167	wildfire_dump_gp_regs(qbbno);
168#endif
169
170	/* Init all PCAs here. */
171	for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
172		wildfire_init_pca(qbbno, pcano);
173	}
174}
175
176void __init
177wildfire_hardware_probe(void)
178{
179	unsigned long temp;
180	unsigned int hard_qbb, soft_qbb;
181	wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
182	wildfire_qsd *qsd;
183	wildfire_qsa *qsa;
184	wildfire_iop *iop;
185	wildfire_gp *gp;
186	wildfire_ne *ne;
187	wildfire_fe *fe;
188	int i;
189
190	temp = fast->qsd_whami.csr;
191#if 0
192	printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
193#endif
194
195	hard_qbb = (temp >> 8) & 7;
196	soft_qbb = (temp >> 4) & 7;
197
198	/* Init the HW configuration variables. */
199	wildfire_hard_qbb_mask = (1 << hard_qbb);
200	wildfire_soft_qbb_mask = (1 << soft_qbb);
201
202	wildfire_gp_mask = 0;
203	wildfire_hs_mask = 0;
204	wildfire_iop_mask = 0;
205	wildfire_ior_mask = 0;
206	wildfire_pca_mask = 0;
207
208	wildfire_cpu_mask = 0;
209	wildfire_mem_mask = 0;
210
211	memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
212	memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
213
214	/* First, determine which QBBs are present. */
215	qsa = WILDFIRE_qsa(soft_qbb);
216
217	temp = qsa->qsa_qbb_id.csr;
218#if 0
219	printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
220#endif
221
222	if (temp & 0x40) /* Is there an HS? */
223		wildfire_hs_mask = 1;
224
225	if (temp & 0x20) { /* Is there a GP? */
226		gp = WILDFIRE_gp(soft_qbb);
227		temp = 0;
228		for (i = 0; i < 4; i++) {
229			temp |= gp->gpa_qbb_map[i].csr << (i * 8);
230#if 0
231			printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
232			       i, gp, temp);
233#endif
234		}
235
236		for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
237			if (temp & 8) { /* Is there a QBB? */
238				soft_qbb = temp & 7;
239				wildfire_hard_qbb_mask |= (1 << hard_qbb);
240				wildfire_soft_qbb_mask |= (1 << soft_qbb);
241			}
242			temp >>= 4;
243		}
244		wildfire_gp_mask = wildfire_soft_qbb_mask;
245        }
246
247	/* Next determine each QBBs resources. */
248	for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
249	    if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
250	        qsd = WILDFIRE_qsd(soft_qbb);
251		temp = qsd->qsd_whami.csr;
252#if 0
253	printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
254#endif
255		hard_qbb = (temp >> 8) & 7;
256		wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
257		wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
258
259		qsa = WILDFIRE_qsa(soft_qbb);
260		temp = qsa->qsa_qbb_pop[0].csr;
261#if 0
262	printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
263#endif
264		wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
265		wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
266
267		temp = qsa->qsa_qbb_pop[1].csr;
268#if 0
269	printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
270#endif
271		wildfire_iop_mask |= (1 << soft_qbb);
272		wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
273
274		temp = qsa->qsa_qbb_id.csr;
275#if 0
276	printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
277#endif
278		if (temp & 0x20)
279		    wildfire_gp_mask |= (1 << soft_qbb);
280
281		/* Probe for PCA existence here. */
282		for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
283		    iop = WILDFIRE_iop(soft_qbb);
284		    ne = WILDFIRE_ne(soft_qbb, i);
285		    fe = WILDFIRE_fe(soft_qbb, i);
286
287		    if ((iop->iop_hose[i].init.csr & 1) == 1 &&
288			((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) &&
289			((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL))
290		    {
291		        wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
292		    }
293		}
294
295	    }
296	}
297#if DEBUG_DUMP_CONFIG
298	wildfire_dump_hardware_config();
299#endif
300}
301
302void __init
303wildfire_init_arch(void)
304{
305	int qbbno;
306
307	/* With multiple PCI buses, we play with I/O as physical addrs.  */
308	ioport_resource.end = ~0UL;
309
310
311	/* Probe the hardware for info about configuration. */
312	wildfire_hardware_probe();
313
314	/* Now init all the found QBBs. */
315	for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
316		wildfire_init_qbb(qbbno);
317	}
318
319	/* Normal direct PCI DMA mapping. */
320	__direct_map_base = 0x40000000UL;
321	__direct_map_size = 0x80000000UL;
322}
323
324void
325wildfire_machine_check(unsigned long vector, unsigned long la_ptr)
326{
327	mb();
328	mb();  /* magic */
329	draina();
330	/* FIXME: clear pci errors */
331	wrmces(0x7);
332	mb();
333
334	process_mcheck_info(vector, la_ptr, "WILDFIRE",
335			    mcheck_expected(smp_processor_id()));
336}
337
338void
339wildfire_kill_arch(int mode)
340{
341}
342
343void
344wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
345{
346	int qbbno = hose->index >> 3;
347	int hoseno = hose->index & 7;
348	wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
349
350	mb();
351	pci->pci_flush_tlb.csr; /* reading does the trick */
352}
353
354static int
355mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
356	     unsigned long *pci_addr, unsigned char *type1)
357{
358	struct pci_controller *hose = pbus->sysdata;
359	unsigned long addr;
360	u8 bus = pbus->number;
361
362	DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
363		 "pci_addr=0x%p, type1=0x%p)\n",
364		 bus, device_fn, where, pci_addr, type1));
365
366	if (!pbus->parent) /* No parent means peer PCI bus. */
367		bus = 0;
368	*type1 = (bus != 0);
369
370	addr = (bus << 16) | (device_fn << 8) | where;
371	addr |= hose->config_space_base;
372
373	*pci_addr = addr;
374	DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
375	return 0;
376}
377
378static int
379wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where,
380		     int size, u32 *value)
381{
382	unsigned long addr;
383	unsigned char type1;
384
385	if (mk_conf_addr(bus, devfn, where, &addr, &type1))
386		return PCIBIOS_DEVICE_NOT_FOUND;
387
388	switch (size) {
389	case 1:
390		*value = __kernel_ldbu(*(vucp)addr);
391		break;
392	case 2:
393		*value = __kernel_ldwu(*(vusp)addr);
394		break;
395	case 4:
396		*value = *(vuip)addr;
397		break;
398	}
399
400	return PCIBIOS_SUCCESSFUL;
401}
402
403static int
404wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
405		      int size, u32 value)
406{
407	unsigned long addr;
408	unsigned char type1;
409
410	if (mk_conf_addr(bus, devfn, where, &addr, &type1))
411		return PCIBIOS_DEVICE_NOT_FOUND;
412
413	switch (size) {
414	case 1:
415		__kernel_stb(value, *(vucp)addr);
416		mb();
417		__kernel_ldbu(*(vucp)addr);
418		break;
419	case 2:
420		__kernel_stw(value, *(vusp)addr);
421		mb();
422		__kernel_ldwu(*(vusp)addr);
423		break;
424	case 4:
425		*(vuip)addr = value;
426		mb();
427		*(vuip)addr;
428		break;
429	}
430
431	return PCIBIOS_SUCCESSFUL;
432}
433
434struct pci_ops wildfire_pci_ops =
435{
436	.read =		wildfire_read_config,
437	.write =	wildfire_write_config,
438};
439
440
441/*
442 * NUMA Support
443 */
444int wildfire_pa_to_nid(unsigned long pa)
445{
446	return pa >> 36;
447}
448
449int wildfire_cpuid_to_nid(int cpuid)
450{
451	/* assume 4 CPUs per node */
452	return cpuid >> 2;
453}
454
455unsigned long wildfire_node_mem_start(int nid)
456{
457	/* 64GB per node */
458	return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
459}
460
461unsigned long wildfire_node_mem_size(int nid)
462{
463	/* 64GB per node */
464	return 64UL * 1024 * 1024 * 1024;
465}
466
467#if DEBUG_DUMP_REGS
468
469static void __init
470wildfire_dump_pci_regs(int qbbno, int hoseno)
471{
472	wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
473	int i;
474
475	printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
476	       qbbno, hoseno, pci);
477
478	printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
479	       pci->pci_io_addr_ext.csr);
480	printk(KERN_ERR " PCI_CTRL:        0x%16lx\n", pci->pci_ctrl.csr);
481	printk(KERN_ERR " PCI_ERR_SUM:     0x%16lx\n", pci->pci_err_sum.csr);
482	printk(KERN_ERR " PCI_ERR_ADDR:    0x%16lx\n", pci->pci_err_addr.csr);
483	printk(KERN_ERR " PCI_STALL_CNT:   0x%16lx\n", pci->pci_stall_cnt.csr);
484	printk(KERN_ERR " PCI_PEND_INT:    0x%16lx\n", pci->pci_pend_int.csr);
485	printk(KERN_ERR " PCI_SENT_INT:    0x%16lx\n", pci->pci_sent_int.csr);
486
487	printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
488	       qbbno, hoseno, pci);
489	for (i = 0; i < 4; i++) {
490		printk(KERN_ERR "  window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
491		       pci->pci_window[i].wbase.csr,
492		       pci->pci_window[i].wmask.csr,
493		       pci->pci_window[i].tbase.csr);
494	}
495	printk(KERN_ERR "\n");
496}
497
498static void __init
499wildfire_dump_pca_regs(int qbbno, int pcano)
500{
501	wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
502	int i;
503
504	printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
505	       qbbno, pcano, pca);
506
507	printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
508	printk(KERN_ERR " PCA_ERR_SUM:   0x%16lx\n", pca->pca_err_sum.csr);
509	printk(KERN_ERR " PCA_PEND_INT:  0x%16lx\n", pca->pca_pend_int.csr);
510	printk(KERN_ERR " PCA_SENT_INT:  0x%16lx\n", pca->pca_sent_int.csr);
511	printk(KERN_ERR " PCA_STDIO_EL:  0x%16lx\n",
512	       pca->pca_stdio_edge_level.csr);
513
514	printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
515	       qbbno, pcano, pca);
516	for (i = 0; i < 4; i++) {
517	  printk(KERN_ERR "  target %d: 0x%16lx 0x%16lx\n", i,
518		       pca->pca_int[i].target.csr,
519		       pca->pca_int[i].enable.csr);
520	}
521
522	printk(KERN_ERR "\n");
523}
524
525static void __init
526wildfire_dump_qsa_regs(int qbbno)
527{
528	wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
529	int i;
530
531	printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
532
533	printk(KERN_ERR " QSA_QBB_ID:      0x%16lx\n", qsa->qsa_qbb_id.csr);
534	printk(KERN_ERR " QSA_PORT_ENA:    0x%16lx\n", qsa->qsa_port_ena.csr);
535	printk(KERN_ERR " QSA_REF_INT:     0x%16lx\n", qsa->qsa_ref_int.csr);
536
537	for (i = 0; i < 5; i++)
538		printk(KERN_ERR " QSA_CONFIG_%d:    0x%16lx\n",
539		       i, qsa->qsa_config[i].csr);
540
541	for (i = 0; i < 2; i++)
542		printk(KERN_ERR " QSA_QBB_POP_%d:   0x%16lx\n",
543		       i, qsa->qsa_qbb_pop[0].csr);
544
545	printk(KERN_ERR "\n");
546}
547
548static void __init
549wildfire_dump_qsd_regs(int qbbno)
550{
551	wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
552
553	printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
554
555	printk(KERN_ERR " QSD_WHAMI:         0x%16lx\n", qsd->qsd_whami.csr);
556	printk(KERN_ERR " QSD_REV:           0x%16lx\n", qsd->qsd_rev.csr);
557	printk(KERN_ERR " QSD_PORT_PRESENT:  0x%16lx\n",
558	       qsd->qsd_port_present.csr);
559	printk(KERN_ERR " QSD_PORT_ACTUVE:   0x%16lx\n",
560	       qsd->qsd_port_active.csr);
561	printk(KERN_ERR " QSD_FAULT_ENA:     0x%16lx\n",
562	       qsd->qsd_fault_ena.csr);
563	printk(KERN_ERR " QSD_CPU_INT_ENA:   0x%16lx\n",
564	       qsd->qsd_cpu_int_ena.csr);
565	printk(KERN_ERR " QSD_MEM_CONFIG:    0x%16lx\n",
566	       qsd->qsd_mem_config.csr);
567	printk(KERN_ERR " QSD_ERR_SUM:       0x%16lx\n",
568	       qsd->qsd_err_sum.csr);
569
570	printk(KERN_ERR "\n");
571}
572
573static void __init
574wildfire_dump_iop_regs(int qbbno)
575{
576	wildfire_iop *iop = WILDFIRE_iop(qbbno);
577	int i;
578
579	printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
580
581	printk(KERN_ERR " IOA_CONFIG:          0x%16lx\n", iop->ioa_config.csr);
582	printk(KERN_ERR " IOD_CONFIG:          0x%16lx\n", iop->iod_config.csr);
583	printk(KERN_ERR " IOP_SWITCH_CREDITS:  0x%16lx\n",
584	       iop->iop_switch_credits.csr);
585	printk(KERN_ERR " IOP_HOSE_CREDITS:    0x%16lx\n",
586	       iop->iop_hose_credits.csr);
587
588	for (i = 0; i < 4; i++)
589		printk(KERN_ERR " IOP_HOSE_%d_INIT:     0x%16lx\n",
590		       i, iop->iop_hose[i].init.csr);
591	for (i = 0; i < 4; i++)
592		printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
593		       i, iop->iop_dev_int[i].target.csr);
594
595	printk(KERN_ERR "\n");
596}
597
598static void __init
599wildfire_dump_gp_regs(int qbbno)
600{
601	wildfire_gp *gp = WILDFIRE_gp(qbbno);
602	int i;
603
604	printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
605	for (i = 0; i < 4; i++)
606		printk(KERN_ERR " GPA_QBB_MAP_%d:     0x%16lx\n",
607		       i, gp->gpa_qbb_map[i].csr);
608
609	printk(KERN_ERR " GPA_MEM_POP_MAP:   0x%16lx\n",
610	       gp->gpa_mem_pop_map.csr);
611	printk(KERN_ERR " GPA_SCRATCH:       0x%16lx\n", gp->gpa_scratch.csr);
612	printk(KERN_ERR " GPA_DIAG:          0x%16lx\n", gp->gpa_diag.csr);
613	printk(KERN_ERR " GPA_CONFIG_0:      0x%16lx\n", gp->gpa_config_0.csr);
614	printk(KERN_ERR " GPA_INIT_ID:       0x%16lx\n", gp->gpa_init_id.csr);
615	printk(KERN_ERR " GPA_CONFIG_2:      0x%16lx\n", gp->gpa_config_2.csr);
616
617	printk(KERN_ERR "\n");
618}
619#endif /* DUMP_REGS */
620
621#if DEBUG_DUMP_CONFIG
622static void __init
623wildfire_dump_hardware_config(void)
624{
625	int i;
626
627	printk(KERN_ERR "Probed Hardware Configuration\n");
628
629	printk(KERN_ERR " hard_qbb_mask:  0x%16lx\n", wildfire_hard_qbb_mask);
630	printk(KERN_ERR " soft_qbb_mask:  0x%16lx\n", wildfire_soft_qbb_mask);
631
632	printk(KERN_ERR " gp_mask:        0x%16lx\n", wildfire_gp_mask);
633	printk(KERN_ERR " hs_mask:        0x%16lx\n", wildfire_hs_mask);
634	printk(KERN_ERR " iop_mask:       0x%16lx\n", wildfire_iop_mask);
635	printk(KERN_ERR " ior_mask:       0x%16lx\n", wildfire_ior_mask);
636	printk(KERN_ERR " pca_mask:       0x%16lx\n", wildfire_pca_mask);
637
638	printk(KERN_ERR " cpu_mask:       0x%16lx\n", wildfire_cpu_mask);
639	printk(KERN_ERR " mem_mask:       0x%16lx\n", wildfire_mem_mask);
640
641	printk(" hard_qbb_map: ");
642	for (i = 0; i < WILDFIRE_MAX_QBB; i++)
643	    if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
644		printk("--- ");
645	    else
646		printk("%3d ", wildfire_hard_qbb_map[i]);
647	printk("\n");
648
649	printk(" soft_qbb_map: ");
650	for (i = 0; i < WILDFIRE_MAX_QBB; i++)
651	    if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
652		printk("--- ");
653	    else
654		printk("%3d ", wildfire_soft_qbb_map[i]);
655	printk("\n");
656}
657#endif /* DUMP_CONFIG */
658