1/*
2 *	linux/arch/alpha/kernel/core_cia.c
3 *
4 * Written by David A Rusling (david.rusling@reo.mts.dec.com).
5 * December 1995.
6 *
7 *	Copyright (C) 1995  David A Rusling
8 *	Copyright (C) 1997, 1998  Jay Estabrook
9 *	Copyright (C) 1998, 1999, 2000  Richard Henderson
10 *
11 * Code common to all CIA core logic chips.
12 */
13
14#define __EXTERN_INLINE inline
15#include <asm/io.h>
16#include <asm/core_cia.h>
17#undef __EXTERN_INLINE
18
19#include <linux/types.h>
20#include <linux/pci.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24
25#include <asm/ptrace.h>
26#include <asm/mce.h>
27
28#include "proto.h"
29#include "pci_impl.h"
30
31
32/*
33 * NOTE: Herein lie back-to-back mb instructions.  They are magic.
34 * One plausible explanation is that the i/o controller does not properly
35 * handle the system transaction.  Another involves timing.  Ho hum.
36 */
37
38#define DEBUG_CONFIG 0
39#if DEBUG_CONFIG
40# define DBGC(args)	printk args
41#else
42# define DBGC(args)
43#endif
44
45#define vip	volatile int  *
46
47/*
48 * Given a bus, device, and function number, compute resulting
49 * configuration space address.  It is therefore not safe to have
50 * concurrent invocations to configuration space access routines, but
51 * there really shouldn't be any need for this.
52 *
53 * Type 0:
54 *
55 *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
56 *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
57 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
58 * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
59 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
60 *
61 *	31:11	Device select bit.
62 * 	10:8	Function number
63 * 	 7:2	Register number
64 *
65 * Type 1:
66 *
67 *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
68 *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
69 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
70 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
71 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72 *
73 *	31:24	reserved
74 *	23:16	bus number (8 bits = 128 possible buses)
75 *	15:11	Device number (5 bits)
76 *	10:8	function number
77 *	 7:2	register number
78 *
79 * Notes:
80 *	The function number selects which function of a multi-function device
81 *	(e.g., SCSI and Ethernet).
82 *
83 *	The register selects a DWORD (32 bit) register offset.  Hence it
84 *	doesn't get shifted by 2 bits as we want to "drop" the bottom two
85 *	bits.
86 */
87
88static int
89mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where,
90	     unsigned long *pci_addr, unsigned char *type1)
91{
92	u8 bus = bus_dev->number;
93
94	*type1 = (bus != 0);
95	*pci_addr = (bus << 16) | (device_fn << 8) | where;
96
97	DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
98	      " returning address 0x%p\n"
99	      bus, device_fn, where, *pci_addr));
100
101	return 0;
102}
103
104static unsigned int
105conf_read(unsigned long addr, unsigned char type1)
106{
107	unsigned long flags;
108	int stat0, value;
109	int cia_cfg = 0;
110
111	DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
112	local_irq_save(flags);
113
114	/* Reset status register to avoid losing errors.  */
115	stat0 = *(vip)CIA_IOC_CIA_ERR;
116	*(vip)CIA_IOC_CIA_ERR = stat0;
117	mb();
118	*(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
119
120	/* If Type1 access, must set CIA CFG. */
121	if (type1) {
122		cia_cfg = *(vip)CIA_IOC_CFG;
123		*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
124		mb();
125		*(vip)CIA_IOC_CFG;
126	}
127
128	mb();
129	draina();
130	mcheck_expected(0) = 1;
131	mcheck_taken(0) = 0;
132	mb();
133
134	/* Access configuration space.  */
135	value = *(vip)addr;
136	mb();
137	mb();  /* magic */
138	if (mcheck_taken(0)) {
139		mcheck_taken(0) = 0;
140		value = 0xffffffff;
141		mb();
142	}
143	mcheck_expected(0) = 0;
144	mb();
145
146	/* If Type1 access, must reset IOC CFG so normal IO space ops work.  */
147	if (type1) {
148		*(vip)CIA_IOC_CFG = cia_cfg;
149		mb();
150		*(vip)CIA_IOC_CFG;
151	}
152
153	local_irq_restore(flags);
154	DBGC(("done\n"));
155
156	return value;
157}
158
159static void
160conf_write(unsigned long addr, unsigned int value, unsigned char type1)
161{
162	unsigned long flags;
163	int stat0, cia_cfg = 0;
164
165	DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
166	local_irq_save(flags);
167
168	/* Reset status register to avoid losing errors.  */
169	stat0 = *(vip)CIA_IOC_CIA_ERR;
170	*(vip)CIA_IOC_CIA_ERR = stat0;
171	mb();
172	*(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
173
174	/* If Type1 access, must set CIA CFG.  */
175	if (type1) {
176		cia_cfg = *(vip)CIA_IOC_CFG;
177		*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
178		mb();
179		*(vip)CIA_IOC_CFG;
180	}
181
182	mb();
183	draina();
184	mcheck_expected(0) = 1;
185	mcheck_taken(0) = 0;
186	mb();
187
188	/* Access configuration space.  */
189	*(vip)addr = value;
190	mb();
191	*(vip)addr; /* read back to force the write */
192
193	mcheck_expected(0) = 0;
194	mb();
195
196	/* If Type1 access, must reset IOC CFG so normal IO space ops work.  */
197	if (type1) {
198		*(vip)CIA_IOC_CFG = cia_cfg;
199		mb();
200		*(vip)CIA_IOC_CFG;
201	}
202
203	local_irq_restore(flags);
204	DBGC(("done\n"));
205}
206
207static int
208cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
209		u32 *value)
210{
211	unsigned long addr, pci_addr;
212	long mask;
213	unsigned char type1;
214	int shift;
215
216	if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
217		return PCIBIOS_DEVICE_NOT_FOUND;
218
219	mask = (size - 1) * 8;
220	shift = (where & 3) * 8;
221	addr = (pci_addr << 5) + mask + CIA_CONF;
222	*value = conf_read(addr, type1) >> (shift);
223	return PCIBIOS_SUCCESSFUL;
224}
225
226static int
227cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
228		 u32 value)
229{
230	unsigned long addr, pci_addr;
231	long mask;
232	unsigned char type1;
233
234	if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
235		return PCIBIOS_DEVICE_NOT_FOUND;
236
237	mask = (size - 1) * 8;
238	addr = (pci_addr << 5) + mask + CIA_CONF;
239	conf_write(addr, value << ((where & 3) * 8), type1);
240	return PCIBIOS_SUCCESSFUL;
241}
242
243struct pci_ops cia_pci_ops =
244{
245	.read = 	cia_read_config,
246	.write =	cia_write_config,
247};
248
249/*
250 * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
251 * It cannot be invalidated.  Rather than hard code the pass numbers,
252 * actually try the tbia to see if it works.
253 */
254
255void
256cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
257{
258	wmb();
259	*(vip)CIA_IOC_PCI_TBIA = 3;	/* Flush all locked and unlocked.  */
260	mb();
261	*(vip)CIA_IOC_PCI_TBIA;
262}
263
264/*
265 * On PYXIS, even if the tbia works, we cannot use it. It effectively locks
266 * the chip (as well as direct write to the tag registers) if there is a
267 * SG DMA operation in progress. This is true at least for PYXIS rev. 1,
268 * so always use the method below.
269 */
270/*
271 * This is the method NT and NetBSD use.
272 *
273 * Allocate mappings, and put the chip into DMA loopback mode to read a
274 * garbage page.  This works by causing TLB misses, causing old entries to
275 * be purged to make room for the new entries coming in for the garbage page.
276 */
277
278#define CIA_BROKEN_TBIA_BASE	0x30000000
279#define CIA_BROKEN_TBIA_SIZE	1024
280
281/* Always called with interrupts disabled */
282void
283cia_pci_tbi_try2(struct pci_controller *hose,
284		 dma_addr_t start, dma_addr_t end)
285{
286	void __iomem *bus_addr;
287	int ctrl;
288
289	/* Put the chip into PCI loopback mode.  */
290	mb();
291	ctrl = *(vip)CIA_IOC_CIA_CTRL;
292	*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
293	mb();
294	*(vip)CIA_IOC_CIA_CTRL;
295	mb();
296
297	/* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
298	   each read.  This forces SG TLB misses.  NetBSD claims that the
299	   TLB entries are not quite LRU, meaning that we need to read more
300	   times than there are actual tags.  The 2117x docs claim strict
301	   round-robin.  Oh well, we've come this far...  */
302	/* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can
303	   be filled by the TLB misses *only once* after being invalidated
304	   (by tbia or direct write). Next misses won't update them even
305	   though the lock bits are cleared. Tags 4-7 are "quite LRU" though,
306	   so use them and read at window 3 base exactly 4 times. Reading
307	   more sometimes makes the chip crazy.  -ink */
308
309	bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4);
310
311	cia_readl(bus_addr + 0x00000);
312	cia_readl(bus_addr + 0x08000);
313	cia_readl(bus_addr + 0x10000);
314	cia_readl(bus_addr + 0x18000);
315
316	cia_iounmap(bus_addr);
317
318	/* Restore normal PCI operation.  */
319	mb();
320	*(vip)CIA_IOC_CIA_CTRL = ctrl;
321	mb();
322	*(vip)CIA_IOC_CIA_CTRL;
323	mb();
324}
325
326static inline void
327cia_prepare_tbia_workaround(int window)
328{
329	unsigned long *ppte, pte;
330	long i;
331
332	/* Use minimal 1K map. */
333	ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
334	pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
335
336	for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
337		ppte[i] = pte;
338
339	*(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3;
340	*(vip)CIA_IOC_PCI_Wn_MASK(window)
341	  = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000;
342	*(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2;
343}
344
345static void __init
346verify_tb_operation(void)
347{
348	static int page[PAGE_SIZE/4]
349		__attribute__((aligned(PAGE_SIZE)))
350		__initdata = { 0 };
351
352	struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
353	int ctrl, addr0, tag0, pte0, data0;
354	int temp, use_tbia_try2 = 0;
355	void __iomem *bus_addr;
356
357	/* pyxis -- tbia is broken */
358	if (pci_isa_hose->dense_io_base)
359		use_tbia_try2 = 1;
360
361	/* Put the chip into PCI loopback mode.  */
362	mb();
363	ctrl = *(vip)CIA_IOC_CIA_CTRL;
364	*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
365	mb();
366	*(vip)CIA_IOC_CIA_CTRL;
367	mb();
368
369	/* Write a valid entry directly into the TLB registers.  */
370
371	addr0 = arena->dma_base;
372	tag0 = addr0 | 1;
373	pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
374
375	*(vip)CIA_IOC_TB_TAGn(0) = tag0;
376	*(vip)CIA_IOC_TB_TAGn(1) = 0;
377	*(vip)CIA_IOC_TB_TAGn(2) = 0;
378	*(vip)CIA_IOC_TB_TAGn(3) = 0;
379	*(vip)CIA_IOC_TB_TAGn(4) = 0;
380	*(vip)CIA_IOC_TB_TAGn(5) = 0;
381	*(vip)CIA_IOC_TB_TAGn(6) = 0;
382	*(vip)CIA_IOC_TB_TAGn(7) = 0;
383	*(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
384	*(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
385	*(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
386	*(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
387	mb();
388
389	/* Get a usable bus address */
390	bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE);
391
392	/* First, verify we can read back what we've written.  If
393	   this fails, we can't be sure of any of the other testing
394	   we're going to do, so bail.  */
395	/* ??? Actually, we could do the work with machine checks.
396	   By passing this register update test, we pretty much
397	   guarantee that cia_pci_tbi_try1 works.  If this test
398	   fails, cia_pci_tbi_try2 might still work.  */
399
400	temp = *(vip)CIA_IOC_TB_TAGn(0);
401	if (temp != tag0) {
402		printk("pci: failed tb register update test "
403		       "(tag0 %#x != %#x)\n", temp, tag0);
404		goto failed;
405	}
406	temp = *(vip)CIA_IOC_TB_TAGn(1);
407	if (temp != 0) {
408		printk("pci: failed tb register update test "
409		       "(tag1 %#x != 0)\n", temp);
410		goto failed;
411	}
412	temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
413	if (temp != pte0) {
414		printk("pci: failed tb register update test "
415		       "(pte0 %#x != %#x)\n", temp, pte0);
416		goto failed;
417	}
418	printk("pci: passed tb register update test\n");
419
420	/* Second, verify we can actually do I/O through this entry.  */
421
422	data0 = 0xdeadbeef;
423	page[0] = data0;
424	mcheck_expected(0) = 1;
425	mcheck_taken(0) = 0;
426	mb();
427	temp = cia_readl(bus_addr);
428	mb();
429	mcheck_expected(0) = 0;
430	mb();
431	if (mcheck_taken(0)) {
432		printk("pci: failed sg loopback i/o read test (mcheck)\n");
433		goto failed;
434	}
435	if (temp != data0) {
436		printk("pci: failed sg loopback i/o read test "
437		       "(%#x != %#x)\n", temp, data0);
438		goto failed;
439	}
440	printk("pci: passed sg loopback i/o read test\n");
441
442	/* Third, try to invalidate the TLB.  */
443
444	if (! use_tbia_try2) {
445		cia_pci_tbi(arena->hose, 0, -1);
446		temp = *(vip)CIA_IOC_TB_TAGn(0);
447		if (temp & 1) {
448			use_tbia_try2 = 1;
449			printk("pci: failed tbia test; workaround available\n");
450		} else {
451			printk("pci: passed tbia test\n");
452		}
453	}
454
455	/* Fourth, verify the TLB snoops the EV5's caches when
456	   doing a tlb fill.  */
457
458	data0 = 0x5adda15e;
459	page[0] = data0;
460	arena->ptes[4] = pte0;
461	mcheck_expected(0) = 1;
462	mcheck_taken(0) = 0;
463	mb();
464	temp = cia_readl(bus_addr + 4*PAGE_SIZE);
465	mb();
466	mcheck_expected(0) = 0;
467	mb();
468	if (mcheck_taken(0)) {
469		printk("pci: failed pte write cache snoop test (mcheck)\n");
470		goto failed;
471	}
472	if (temp != data0) {
473		printk("pci: failed pte write cache snoop test "
474		       "(%#x != %#x)\n", temp, data0);
475		goto failed;
476	}
477	printk("pci: passed pte write cache snoop test\n");
478
479	/* Fifth, verify that a previously invalid PTE entry gets
480	   filled from the page table.  */
481
482	data0 = 0xabcdef12;
483	page[0] = data0;
484	arena->ptes[5] = pte0;
485	mcheck_expected(0) = 1;
486	mcheck_taken(0) = 0;
487	mb();
488	temp = cia_readl(bus_addr + 5*PAGE_SIZE);
489	mb();
490	mcheck_expected(0) = 0;
491	mb();
492	if (mcheck_taken(0)) {
493		printk("pci: failed valid tag invalid pte reload test "
494		       "(mcheck; workaround available)\n");
495		/* Work around this bug by aligning new allocations
496		   on 4 page boundaries.  */
497		arena->align_entry = 4;
498	} else if (temp != data0) {
499		printk("pci: failed valid tag invalid pte reload test "
500		       "(%#x != %#x)\n", temp, data0);
501		goto failed;
502	} else {
503		printk("pci: passed valid tag invalid pte reload test\n");
504	}
505
506	/* Sixth, verify machine checks are working.  Test invalid
507	   pte under the same valid tag as we used above.  */
508
509	mcheck_expected(0) = 1;
510	mcheck_taken(0) = 0;
511	mb();
512	temp = cia_readl(bus_addr + 6*PAGE_SIZE);
513	mb();
514	mcheck_expected(0) = 0;
515	mb();
516	printk("pci: %s pci machine check test\n",
517	       mcheck_taken(0) ? "passed" : "failed");
518
519	/* Clean up after the tests.  */
520	arena->ptes[4] = 0;
521	arena->ptes[5] = 0;
522
523	if (use_tbia_try2) {
524		alpha_mv.mv_pci_tbi = cia_pci_tbi_try2;
525
526		/* Tags 0-3 must be disabled if we use this workaraund. */
527		wmb();
528		*(vip)CIA_IOC_TB_TAGn(0) = 2;
529		*(vip)CIA_IOC_TB_TAGn(1) = 2;
530		*(vip)CIA_IOC_TB_TAGn(2) = 2;
531		*(vip)CIA_IOC_TB_TAGn(3) = 2;
532
533		printk("pci: tbia workaround enabled\n");
534	}
535	alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
536
537exit:
538	/* unmap the bus addr */
539	cia_iounmap(bus_addr);
540
541	/* Restore normal PCI operation.  */
542	mb();
543	*(vip)CIA_IOC_CIA_CTRL = ctrl;
544	mb();
545	*(vip)CIA_IOC_CIA_CTRL;
546	mb();
547	return;
548
549failed:
550	printk("pci: disabling sg translation window\n");
551	*(vip)CIA_IOC_PCI_W0_BASE = 0;
552	*(vip)CIA_IOC_PCI_W1_BASE = 0;
553	pci_isa_hose->sg_isa = NULL;
554	alpha_mv.mv_pci_tbi = NULL;
555	goto exit;
556}
557
558#if defined(ALPHA_RESTORE_SRM_SETUP)
559/* Save CIA configuration data as the console had it set up.  */
560struct
561{
562    unsigned int hae_mem;
563    unsigned int hae_io;
564    unsigned int pci_dac_offset;
565    unsigned int err_mask;
566    unsigned int cia_ctrl;
567    unsigned int cia_cnfg;
568    struct {
569	unsigned int w_base;
570	unsigned int w_mask;
571	unsigned int t_base;
572    } window[4];
573} saved_config __attribute((common));
574
575void
576cia_save_srm_settings(int is_pyxis)
577{
578	int i;
579
580	/* Save some important registers. */
581	saved_config.err_mask       = *(vip)CIA_IOC_ERR_MASK;
582	saved_config.cia_ctrl       = *(vip)CIA_IOC_CIA_CTRL;
583	saved_config.hae_mem        = *(vip)CIA_IOC_HAE_MEM;
584	saved_config.hae_io         = *(vip)CIA_IOC_HAE_IO;
585	saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC;
586
587	if (is_pyxis)
588	    saved_config.cia_cnfg   = *(vip)CIA_IOC_CIA_CNFG;
589	else
590	    saved_config.cia_cnfg   = 0;
591
592	/* Save DMA windows configuration. */
593	for (i = 0; i < 4; i++) {
594	    saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i);
595	    saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i);
596	    saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i);
597	}
598	mb();
599}
600
601void
602cia_restore_srm_settings(void)
603{
604	int i;
605
606	for (i = 0; i < 4; i++) {
607	    *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base;
608	    *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask;
609	    *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base;
610	}
611
612	*(vip)CIA_IOC_HAE_MEM   = saved_config.hae_mem;
613	*(vip)CIA_IOC_HAE_IO    = saved_config.hae_io;
614	*(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset;
615	*(vip)CIA_IOC_ERR_MASK  = saved_config.err_mask;
616	*(vip)CIA_IOC_CIA_CTRL  = saved_config.cia_ctrl;
617
618	if (saved_config.cia_cnfg) /* Must be pyxis. */
619	    *(vip)CIA_IOC_CIA_CNFG  = saved_config.cia_cnfg;
620
621	mb();
622}
623#else /* ALPHA_RESTORE_SRM_SETUP */
624#define cia_save_srm_settings(p)	do {} while (0)
625#define cia_restore_srm_settings()	do {} while (0)
626#endif /* ALPHA_RESTORE_SRM_SETUP */
627
628
629static void __init
630do_init_arch(int is_pyxis)
631{
632	struct pci_controller *hose;
633	int temp, cia_rev, tbia_window;
634
635	cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
636	printk("pci: cia revision %d%s\n",
637	       cia_rev, is_pyxis ? " (pyxis)" : "");
638
639	if (alpha_using_srm)
640		cia_save_srm_settings(is_pyxis);
641
642	/* Set up error reporting.  */
643	temp = *(vip)CIA_IOC_ERR_MASK;
644	temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
645		  | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
646	*(vip)CIA_IOC_ERR_MASK = temp;
647
648	/* Clear all currently pending errors.  */
649	temp = *(vip)CIA_IOC_CIA_ERR;
650	*(vip)CIA_IOC_CIA_ERR = temp;
651
652	/* Turn on mchecks.  */
653	temp = *(vip)CIA_IOC_CIA_CTRL;
654	temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
655	*(vip)CIA_IOC_CIA_CTRL = temp;
656
657	/* Clear the CFG register, which gets used for PCI config space
658	   accesses.  That is the way we want to use it, and we do not
659	   want to depend on what ARC or SRM might have left behind.  */
660	*(vip)CIA_IOC_CFG = 0;
661
662	/* Zero the HAEs.  */
663	*(vip)CIA_IOC_HAE_MEM = 0;
664	*(vip)CIA_IOC_HAE_IO = 0;
665
666	/* For PYXIS, we always use BWX bus and i/o accesses.  To that end,
667	   make sure they're enabled on the controller.  At the same time,
668	   enable the monster window.  */
669	if (is_pyxis) {
670		temp = *(vip)CIA_IOC_CIA_CNFG;
671		temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN;
672		*(vip)CIA_IOC_CIA_CNFG = temp;
673	}
674
675	/* Synchronize with all previous changes.  */
676	mb();
677	*(vip)CIA_IOC_CIA_REV;
678
679	/*
680	 * Create our single hose.
681	 */
682
683	pci_isa_hose = hose = alloc_pci_controller();
684	hose->io_space = &ioport_resource;
685	hose->mem_space = &iomem_resource;
686	hose->index = 0;
687
688	if (! is_pyxis) {
689		struct resource *hae_mem = alloc_resource();
690		hose->mem_space = hae_mem;
691
692		hae_mem->start = 0;
693		hae_mem->end = CIA_MEM_R1_MASK;
694		hae_mem->name = pci_hae0_name;
695		hae_mem->flags = IORESOURCE_MEM;
696
697		if (request_resource(&iomem_resource, hae_mem) < 0)
698			printk(KERN_ERR "Failed to request HAE_MEM\n");
699
700		hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
701		hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
702		hose->sparse_io_base = CIA_IO - IDENT_ADDR;
703		hose->dense_io_base = 0;
704	} else {
705		hose->sparse_mem_base = 0;
706		hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
707		hose->sparse_io_base = 0;
708		hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
709	}
710
711	/*
712	 * Set up the PCI to main memory translation windows.
713	 *
714	 * Window 0 is S/G 8MB at 8MB (for isa)
715	 * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1)
716	 * Window 2 is direct access 2GB at 2GB
717	 * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1)
718	 *
719	 * ??? NetBSD hints that page tables must be aligned to 32K,
720	 * possibly due to a hardware bug.  This is over-aligned
721	 * from the 8K alignment one would expect for an 8MB window.
722	 * No description of what revisions affected.
723	 */
724
725	hose->sg_pci = NULL;
726	hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
727
728	__direct_map_base = 0x80000000;
729	__direct_map_size = 0x80000000;
730
731	*(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
732	*(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
733	*(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
734
735	*(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
736	*(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
737	*(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2;
738
739	/* On PYXIS we have the monster window, selected by bit 40, so
740	   there is no need for window3 to be enabled.
741
742	   On CIA, we don't have true arbitrary addressing -- bits <39:32>
743	   are compared against W_DAC.  We can, however, directly map 4GB,
744	   which is better than before.  However, due to assumptions made
745	   elsewhere, we should not claim that we support DAC unless that
746	   4GB covers all of physical memory.
747
748	   On CIA rev 1, apparently W1 and W2 can't be used for SG.
749	   At least, there are reports that it doesn't work for Alcor.
750	   In that case, we have no choice but to use W3 for the TBIA
751	   workaround, which means we can't use DAC at all. */
752
753	tbia_window = 1;
754	if (is_pyxis) {
755		*(vip)CIA_IOC_PCI_W3_BASE = 0;
756	} else if (cia_rev == 1) {
757		*(vip)CIA_IOC_PCI_W1_BASE = 0;
758		tbia_window = 3;
759	} else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
760		*(vip)CIA_IOC_PCI_W3_BASE = 0;
761	} else {
762		*(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8;
763		*(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000;
764		*(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2;
765
766		alpha_mv.pci_dac_offset = 0x200000000UL;
767		*(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32;
768	}
769
770	/* Prepare workaround for apparently broken tbia. */
771	cia_prepare_tbia_workaround(tbia_window);
772}
773
774void __init
775cia_init_arch(void)
776{
777	do_init_arch(0);
778}
779
780void __init
781pyxis_init_arch(void)
782{
783	/* On pyxis machines we can precisely calculate the
784	   CPU clock frequency using pyxis real time counter.
785	   It's especially useful for SX164 with broken RTC.
786
787	   Both CPU and chipset are driven by the single 16.666M
788	   or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
789	   66.66 MHz. -ink */
790
791	unsigned int cc0, cc1;
792	unsigned long pyxis_cc;
793
794	__asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
795	pyxis_cc = *(vulp)PYXIS_RT_COUNT;
796	do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
797	__asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
798	cc1 -= cc0;
799	hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
800	hwrpb_update_checksum(hwrpb);
801
802	do_init_arch(1);
803}
804
805void
806cia_kill_arch(int mode)
807{
808	if (alpha_using_srm)
809		cia_restore_srm_settings();
810}
811
812void __init
813cia_init_pci(void)
814{
815	/* Must delay this from init_arch, as we need machine checks.  */
816	verify_tb_operation();
817	common_init_pci();
818}
819
820static inline void
821cia_pci_clr_err(void)
822{
823	int jd;
824
825	jd = *(vip)CIA_IOC_CIA_ERR;
826	*(vip)CIA_IOC_CIA_ERR = jd;
827	mb();
828	*(vip)CIA_IOC_CIA_ERR;		/* re-read to force write.  */
829}
830
831#ifdef CONFIG_VERBOSE_MCHECK
832static void
833cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
834{
835	static const char * const pci_cmd_desc[16] = {
836		"Interrupt Acknowledge", "Special Cycle", "I/O Read",
837		"I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read",
838		"Memory Write", "Reserved 0x8", "Reserved 0x9",
839		"Configuration Read", "Configuration Write",
840		"Memory Read Multiple", "Dual Address Cycle",
841		"Memory Read Line", "Memory Write and Invalidate"
842	};
843
844	if (cia->cia_err & (CIA_ERR_COR_ERR
845			    | CIA_ERR_UN_COR_ERR
846			    | CIA_ERR_MEM_NEM
847			    | CIA_ERR_PA_PTE_INV)) {
848		static const char * const window_desc[6] = {
849			"No window active", "Window 0 hit", "Window 1 hit",
850			"Window 2 hit", "Window 3 hit", "Monster window hit"
851		};
852
853		const char *window;
854		const char *cmd;
855		unsigned long addr, tmp;
856		int lock, dac;
857
858		cmd = pci_cmd_desc[cia->pci_err0 & 0x7];
859		lock = (cia->pci_err0 >> 4) & 1;
860		dac = (cia->pci_err0 >> 5) & 1;
861
862		tmp = (cia->pci_err0 >> 8) & 0x1F;
863		tmp = ffs(tmp);
864		window = window_desc[tmp];
865
866		addr = cia->pci_err1;
867		if (dac) {
868			tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL;
869			addr |= tmp << 32;
870		}
871
872		printk(KERN_CRIT "CIA machine check: %s\n", msg);
873		printk(KERN_CRIT "  DMA command: %s\n", cmd);
874		printk(KERN_CRIT "  PCI address: %#010lx\n", addr);
875		printk(KERN_CRIT "  %s, Lock: %d, DAC: %d\n",
876		       window, lock, dac);
877	} else if (cia->cia_err & (CIA_ERR_PERR
878				   | CIA_ERR_PCI_ADDR_PE
879				   | CIA_ERR_RCVD_MAS_ABT
880				   | CIA_ERR_RCVD_TAR_ABT
881				   | CIA_ERR_IOA_TIMEOUT)) {
882		static const char * const master_st_desc[16] = {
883			"Idle", "Drive bus", "Address step cycle",
884			"Address cycle", "Data cycle", "Last read data cycle",
885			"Last write data cycle", "Read stop cycle",
886			"Write stop cycle", "Read turnaround cycle",
887			"Write turnaround cycle", "Reserved 0xB",
888			"Reserved 0xC", "Reserved 0xD", "Reserved 0xE",
889			"Unknown state"
890		};
891		static const char * const target_st_desc[16] = {
892			"Idle", "Busy", "Read data cycle", "Write data cycle",
893			"Read stop cycle", "Write stop cycle",
894			"Read turnaround cycle", "Write turnaround cycle",
895			"Read wait cycle", "Write wait cycle",
896			"Reserved 0xA", "Reserved 0xB", "Reserved 0xC",
897			"Reserved 0xD", "Reserved 0xE", "Unknown state"
898		};
899
900		const char *cmd;
901		const char *master, *target;
902		unsigned long addr, tmp;
903		int dac;
904
905		master = master_st_desc[(cia->pci_err0 >> 16) & 0xF];
906		target = target_st_desc[(cia->pci_err0 >> 20) & 0xF];
907		cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF];
908		dac = (cia->pci_err0 >> 28) & 1;
909
910		addr = cia->pci_err2;
911		if (dac) {
912			tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL;
913			addr |= tmp << 32;
914		}
915
916		printk(KERN_CRIT "CIA machine check: %s\n", msg);
917		printk(KERN_CRIT "  PCI command: %s\n", cmd);
918		printk(KERN_CRIT "  Master state: %s, Target state: %s\n",
919		       master, target);
920		printk(KERN_CRIT "  PCI address: %#010lx, DAC: %d\n",
921		       addr, dac);
922	} else {
923		printk(KERN_CRIT "CIA machine check: %s\n", msg);
924		printk(KERN_CRIT "  Unknown PCI error\n");
925		printk(KERN_CRIT "  PCI_ERR0 = %#08lx", cia->pci_err0);
926		printk(KERN_CRIT "  PCI_ERR1 = %#08lx", cia->pci_err1);
927		printk(KERN_CRIT "  PCI_ERR2 = %#08lx", cia->pci_err2);
928	}
929}
930
931static void
932cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
933{
934	unsigned long mem_port_addr;
935	unsigned long mem_port_mask;
936	const char *mem_port_cmd;
937	const char *seq_state;
938	const char *set_select;
939	unsigned long tmp;
940
941	/* If this is a DMA command, also decode the PCI bits.  */
942	if ((cia->mem_err1 >> 20) & 1)
943		cia_decode_pci_error(cia, msg);
944	else
945		printk(KERN_CRIT "CIA machine check: %s\n", msg);
946
947	mem_port_addr = cia->mem_err0 & 0xfffffff0;
948	mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32;
949
950	mem_port_mask = (cia->mem_err1 >> 12) & 0xF;
951
952	tmp = (cia->mem_err1 >> 8) & 0xF;
953	tmp |= ((cia->mem_err1 >> 20) & 1) << 4;
954	if ((tmp & 0x1E) == 0x06)
955		mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK";
956	else if ((tmp & 0x1C) == 0x08)
957		mem_port_cmd = "READ MISS or READ MISS MODIFY";
958	else if (tmp == 0x1C)
959		mem_port_cmd = "BC VICTIM";
960	else if ((tmp & 0x1E) == 0x0E)
961		mem_port_cmd = "READ MISS MODIFY";
962	else if ((tmp & 0x1C) == 0x18)
963		mem_port_cmd = "DMA READ or DMA READ MODIFY";
964	else if ((tmp & 0x1E) == 0x12)
965		mem_port_cmd = "DMA WRITE";
966	else
967		mem_port_cmd = "Unknown";
968
969	tmp = (cia->mem_err1 >> 16) & 0xF;
970	switch (tmp) {
971	case 0x0:
972		seq_state = "Idle";
973		break;
974	case 0x1:
975		seq_state = "DMA READ or DMA WRITE";
976		break;
977	case 0x2: case 0x3:
978		seq_state = "READ MISS (or READ MISS MODIFY) with victim";
979		break;
980	case 0x4: case 0x5: case 0x6:
981		seq_state = "READ MISS (or READ MISS MODIFY) with no victim";
982		break;
983	case 0x8: case 0x9: case 0xB:
984		seq_state = "Refresh";
985		break;
986	case 0xC:
987		seq_state = "Idle, waiting for DMA pending read";
988		break;
989	case 0xE: case 0xF:
990		seq_state = "Idle, ras precharge";
991		break;
992	default:
993		seq_state = "Unknown";
994		break;
995	}
996
997	tmp = (cia->mem_err1 >> 24) & 0x1F;
998	switch (tmp) {
999	case 0x00: set_select = "Set 0 selected"; break;
1000	case 0x01: set_select = "Set 1 selected"; break;
1001	case 0x02: set_select = "Set 2 selected"; break;
1002	case 0x03: set_select = "Set 3 selected"; break;
1003	case 0x04: set_select = "Set 4 selected"; break;
1004	case 0x05: set_select = "Set 5 selected"; break;
1005	case 0x06: set_select = "Set 6 selected"; break;
1006	case 0x07: set_select = "Set 7 selected"; break;
1007	case 0x08: set_select = "Set 8 selected"; break;
1008	case 0x09: set_select = "Set 9 selected"; break;
1009	case 0x0A: set_select = "Set A selected"; break;
1010	case 0x0B: set_select = "Set B selected"; break;
1011	case 0x0C: set_select = "Set C selected"; break;
1012	case 0x0D: set_select = "Set D selected"; break;
1013	case 0x0E: set_select = "Set E selected"; break;
1014	case 0x0F: set_select = "Set F selected"; break;
1015	case 0x10: set_select = "No set selected"; break;
1016	case 0x1F: set_select = "Refresh cycle"; break;
1017	default:   set_select = "Unknown"; break;
1018	}
1019
1020	printk(KERN_CRIT "  Memory port command: %s\n", mem_port_cmd);
1021	printk(KERN_CRIT "  Memory port address: %#010lx, mask: %#lx\n",
1022	       mem_port_addr, mem_port_mask);
1023	printk(KERN_CRIT "  Memory sequencer state: %s\n", seq_state);
1024	printk(KERN_CRIT "  Memory set: %s\n", set_select);
1025}
1026
1027static void
1028cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
1029{
1030	long syn;
1031	long i;
1032	const char *fmt;
1033
1034	cia_decode_mem_error(cia, msg);
1035
1036	syn = cia->cia_syn & 0xff;
1037	if (syn == (syn & -syn)) {
1038		fmt = KERN_CRIT "  ECC syndrome %#x -- check bit %d\n";
1039		i = ffs(syn) - 1;
1040	} else {
1041		static unsigned char const data_bit[64] = {
1042			0xCE, 0xCB, 0xD3, 0xD5,
1043			0xD6, 0xD9, 0xDA, 0xDC,
1044			0x23, 0x25, 0x26, 0x29,
1045			0x2A, 0x2C, 0x31, 0x34,
1046			0x0E, 0x0B, 0x13, 0x15,
1047			0x16, 0x19, 0x1A, 0x1C,
1048			0xE3, 0xE5, 0xE6, 0xE9,
1049			0xEA, 0xEC, 0xF1, 0xF4,
1050			0x4F, 0x4A, 0x52, 0x54,
1051			0x57, 0x58, 0x5B, 0x5D,
1052			0xA2, 0xA4, 0xA7, 0xA8,
1053			0xAB, 0xAD, 0xB0, 0xB5,
1054			0x8F, 0x8A, 0x92, 0x94,
1055			0x97, 0x98, 0x9B, 0x9D,
1056			0x62, 0x64, 0x67, 0x68,
1057			0x6B, 0x6D, 0x70, 0x75
1058		};
1059
1060		for (i = 0; i < 64; ++i)
1061			if (data_bit[i] == syn)
1062				break;
1063
1064		if (i < 64)
1065			fmt = KERN_CRIT "  ECC syndrome %#x -- data bit %d\n";
1066		else
1067			fmt = KERN_CRIT "  ECC syndrome %#x -- unknown bit\n";
1068	}
1069
1070	printk (fmt, syn, i);
1071}
1072
1073static void
1074cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia)
1075{
1076	static const char * const cmd_desc[16] = {
1077		"NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER",
1078		"SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK",
1079		"READ MISS0", "READ MISS1", "READ MISS MOD0",
1080		"READ MISS MOD1", "BCACHE VICTIM", "Spare",
1081		"READ MISS MOD STC0", "READ MISS MOD STC1"
1082	};
1083
1084	unsigned long addr;
1085	unsigned long mask;
1086	const char *cmd;
1087	int par;
1088
1089	addr = cia->cpu_err0 & 0xfffffff0;
1090	addr |= (cia->cpu_err1 & 0x83UL) << 32;
1091	cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF];
1092	mask = (cia->cpu_err1 >> 12) & 0xF;
1093	par = (cia->cpu_err1 >> 21) & 1;
1094
1095	printk(KERN_CRIT "CIA machine check: System bus parity error\n");
1096	printk(KERN_CRIT "  Command: %s, Parity bit: %d\n", cmd, par);
1097	printk(KERN_CRIT "  Address: %#010lx, Mask: %#lx\n", addr, mask);
1098}
1099#endif /* CONFIG_VERBOSE_MCHECK */
1100
1101
1102static int
1103cia_decode_mchk(unsigned long la_ptr)
1104{
1105	struct el_common *com;
1106	struct el_CIA_sysdata_mcheck *cia;
1107
1108	com = (void *)la_ptr;
1109	cia = (void *)(la_ptr + com->sys_offset);
1110
1111	if ((cia->cia_err & CIA_ERR_VALID) == 0)
1112		return 0;
1113
1114#ifdef CONFIG_VERBOSE_MCHECK
1115	if (!alpha_verbose_mcheck)
1116		return 1;
1117
1118	switch (ffs(cia->cia_err & 0xfff) - 1) {
1119	case 0: /* CIA_ERR_COR_ERR */
1120		cia_decode_ecc_error(cia, "Corrected ECC error");
1121		break;
1122	case 1: /* CIA_ERR_UN_COR_ERR */
1123		cia_decode_ecc_error(cia, "Uncorrected ECC error");
1124		break;
1125	case 2: /* CIA_ERR_CPU_PE */
1126		cia_decode_parity_error(cia);
1127		break;
1128	case 3: /* CIA_ERR_MEM_NEM */
1129		cia_decode_mem_error(cia, "Access to nonexistent memory");
1130		break;
1131	case 4: /* CIA_ERR_PCI_SERR */
1132		cia_decode_pci_error(cia, "PCI bus system error");
1133		break;
1134	case 5: /* CIA_ERR_PERR */
1135		cia_decode_pci_error(cia, "PCI data parity error");
1136		break;
1137	case 6: /* CIA_ERR_PCI_ADDR_PE */
1138		cia_decode_pci_error(cia, "PCI address parity error");
1139		break;
1140	case 7: /* CIA_ERR_RCVD_MAS_ABT */
1141		cia_decode_pci_error(cia, "PCI master abort");
1142		break;
1143	case 8: /* CIA_ERR_RCVD_TAR_ABT */
1144		cia_decode_pci_error(cia, "PCI target abort");
1145		break;
1146	case 9: /* CIA_ERR_PA_PTE_INV */
1147		cia_decode_pci_error(cia, "PCI invalid PTE");
1148		break;
1149	case 10: /* CIA_ERR_FROM_WRT_ERR */
1150		cia_decode_mem_error(cia, "Write to flash ROM attempted");
1151		break;
1152	case 11: /* CIA_ERR_IOA_TIMEOUT */
1153		cia_decode_pci_error(cia, "I/O timeout");
1154		break;
1155	}
1156
1157	if (cia->cia_err & CIA_ERR_LOST_CORR_ERR)
1158		printk(KERN_CRIT "CIA lost machine check: "
1159		       "Correctable ECC error\n");
1160	if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR)
1161		printk(KERN_CRIT "CIA lost machine check: "
1162		       "Uncorrectable ECC error\n");
1163	if (cia->cia_err & CIA_ERR_LOST_CPU_PE)
1164		printk(KERN_CRIT "CIA lost machine check: "
1165		       "System bus parity error\n");
1166	if (cia->cia_err & CIA_ERR_LOST_MEM_NEM)
1167		printk(KERN_CRIT "CIA lost machine check: "
1168		       "Access to nonexistent memory\n");
1169	if (cia->cia_err & CIA_ERR_LOST_PERR)
1170		printk(KERN_CRIT "CIA lost machine check: "
1171		       "PCI data parity error\n");
1172	if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE)
1173		printk(KERN_CRIT "CIA lost machine check: "
1174		       "PCI address parity error\n");
1175	if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT)
1176		printk(KERN_CRIT "CIA lost machine check: "
1177		       "PCI master abort\n");
1178	if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT)
1179		printk(KERN_CRIT "CIA lost machine check: "
1180		       "PCI target abort\n");
1181	if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV)
1182		printk(KERN_CRIT "CIA lost machine check: "
1183		       "PCI invalid PTE\n");
1184	if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR)
1185		printk(KERN_CRIT "CIA lost machine check: "
1186		       "Write to flash ROM attempted\n");
1187	if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT)
1188		printk(KERN_CRIT "CIA lost machine check: "
1189		       "I/O timeout\n");
1190#endif /* CONFIG_VERBOSE_MCHECK */
1191
1192	return 1;
1193}
1194
1195void
1196cia_machine_check(unsigned long vector, unsigned long la_ptr)
1197{
1198	int expected;
1199
1200	/* Clear the error before any reporting.  */
1201	mb();
1202	mb();  /* magic */
1203	draina();
1204	cia_pci_clr_err();
1205	wrmces(rdmces());	/* reset machine check pending flag.  */
1206	mb();
1207
1208	expected = mcheck_expected(0);
1209	if (!expected && vector == 0x660)
1210		expected = cia_decode_mchk(la_ptr);
1211	process_mcheck_info(vector, la_ptr, "CIA", expected);
1212}
1213