smtc.c revision 21a151d8ca3aa74ee79f9791a9d4dc370d3e0636
1/* Copyright (C) 2004 Mips Technologies, Inc */
2
3#include <linux/clockchips.h>
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/cpumask.h>
7#include <linux/interrupt.h>
8#include <linux/kernel_stat.h>
9#include <linux/module.h>
10
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <asm/hardirq.h>
16#include <asm/hazards.h>
17#include <asm/irq.h>
18#include <asm/mmu_context.h>
19#include <asm/smp.h>
20#include <asm/mipsregs.h>
21#include <asm/cacheflush.h>
22#include <asm/time.h>
23#include <asm/addrspace.h>
24#include <asm/smtc.h>
25#include <asm/smtc_ipi.h>
26#include <asm/smtc_proc.h>
27
28/*
29 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
30 * in do_IRQ. These are passed in setup_irq_smtc() and stored
31 * in this table.
32 */
33unsigned long irq_hwmask[NR_IRQS];
34
35#define LOCK_MT_PRA() \
36	local_irq_save(flags); \
37	mtflags = dmt()
38
39#define UNLOCK_MT_PRA() \
40	emt(mtflags); \
41	local_irq_restore(flags)
42
43#define LOCK_CORE_PRA() \
44	local_irq_save(flags); \
45	mtflags = dvpe()
46
47#define UNLOCK_CORE_PRA() \
48	evpe(mtflags); \
49	local_irq_restore(flags)
50
51/*
52 * Data structures purely associated with SMTC parallelism
53 */
54
55
56/*
57 * Table for tracking ASIDs whose lifetime is prolonged.
58 */
59
60asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
61
62/*
63 * Clock interrupt "latch" buffers, per "CPU"
64 */
65
66static atomic_t ipi_timer_latch[NR_CPUS];
67
68/*
69 * Number of InterProcessor Interupt (IPI) message buffers to allocate
70 */
71
72#define IPIBUF_PER_CPU 4
73
74static struct smtc_ipi_q IPIQ[NR_CPUS];
75static struct smtc_ipi_q freeIPIq;
76
77
78/* Forward declarations */
79
80void ipi_decode(struct smtc_ipi *);
81static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
82static void setup_cross_vpe_interrupts(unsigned int nvpe);
83void init_smtc_stats(void);
84
85/* Global SMTC Status */
86
87unsigned int smtc_status = 0;
88
89/* Boot command line configuration overrides */
90
91static int ipibuffers = 0;
92static int nostlb = 0;
93static int asidmask = 0;
94unsigned long smtc_asid_mask = 0xff;
95
96static int __init ipibufs(char *str)
97{
98	get_option(&str, &ipibuffers);
99	return 1;
100}
101
102static int __init stlb_disable(char *s)
103{
104	nostlb = 1;
105	return 1;
106}
107
108static int __init asidmask_set(char *str)
109{
110	get_option(&str, &asidmask);
111	switch (asidmask) {
112	case 0x1:
113	case 0x3:
114	case 0x7:
115	case 0xf:
116	case 0x1f:
117	case 0x3f:
118	case 0x7f:
119	case 0xff:
120		smtc_asid_mask = (unsigned long)asidmask;
121		break;
122	default:
123		printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
124	}
125	return 1;
126}
127
128__setup("ipibufs=", ipibufs);
129__setup("nostlb", stlb_disable);
130__setup("asidmask=", asidmask_set);
131
132#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
133
134static int hang_trig = 0;
135
136static int __init hangtrig_enable(char *s)
137{
138	hang_trig = 1;
139	return 1;
140}
141
142
143__setup("hangtrig", hangtrig_enable);
144
145#define DEFAULT_BLOCKED_IPI_LIMIT 32
146
147static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
148
149static int __init tintq(char *str)
150{
151	get_option(&str, &timerq_limit);
152	return 1;
153}
154
155__setup("tintq=", tintq);
156
157static int imstuckcount[2][8];
158/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
159static int vpemask[2][8] = {
160	{0, 0, 1, 0, 0, 0, 0, 1},
161	{0, 0, 0, 0, 0, 0, 0, 1}
162};
163int tcnoprog[NR_CPUS];
164static atomic_t idle_hook_initialized = {0};
165static int clock_hang_reported[NR_CPUS];
166
167#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
168
169/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
170
171void __init sanitize_tlb_entries(void)
172{
173	printk("Deprecated sanitize_tlb_entries() invoked\n");
174}
175
176
177/*
178 * Configure shared TLB - VPC configuration bit must be set by caller
179 */
180
181static void smtc_configure_tlb(void)
182{
183	int i, tlbsiz, vpes;
184	unsigned long mvpconf0;
185	unsigned long config1val;
186
187	/* Set up ASID preservation table */
188	for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
189	    for(i = 0; i < MAX_SMTC_ASIDS; i++) {
190		smtc_live_asid[vpes][i] = 0;
191	    }
192	}
193	mvpconf0 = read_c0_mvpconf0();
194
195	if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
196			>> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
197	    /* If we have multiple VPEs, try to share the TLB */
198	    if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
199		/*
200		 * If TLB sizing is programmable, shared TLB
201		 * size is the total available complement.
202		 * Otherwise, we have to take the sum of all
203		 * static VPE TLB entries.
204		 */
205		if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
206				>> MVPCONF0_PTLBE_SHIFT)) == 0) {
207		    /*
208		     * If there's more than one VPE, there had better
209		     * be more than one TC, because we need one to bind
210		     * to each VPE in turn to be able to read
211		     * its configuration state!
212		     */
213		    settc(1);
214		    /* Stop the TC from doing anything foolish */
215		    write_tc_c0_tchalt(TCHALT_H);
216		    mips_ihb();
217		    /* No need to un-Halt - that happens later anyway */
218		    for (i=0; i < vpes; i++) {
219		    	write_tc_c0_tcbind(i);
220			/*
221			 * To be 100% sure we're really getting the right
222			 * information, we exit the configuration state
223			 * and do an IHB after each rebinding.
224			 */
225			write_c0_mvpcontrol(
226				read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
227			mips_ihb();
228			/*
229			 * Only count if the MMU Type indicated is TLB
230			 */
231			if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
232				config1val = read_vpe_c0_config1();
233				tlbsiz += ((config1val >> 25) & 0x3f) + 1;
234			}
235
236			/* Put core back in configuration state */
237			write_c0_mvpcontrol(
238				read_c0_mvpcontrol() | MVPCONTROL_VPC );
239			mips_ihb();
240		    }
241		}
242		write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
243		ehb();
244
245		/*
246		 * Setup kernel data structures to use software total,
247		 * rather than read the per-VPE Config1 value. The values
248		 * for "CPU 0" gets copied to all the other CPUs as part
249		 * of their initialization in smtc_cpu_setup().
250		 */
251
252		/* MIPS32 limits TLB indices to 64 */
253		if (tlbsiz > 64)
254			tlbsiz = 64;
255		cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
256		smtc_status |= SMTC_TLB_SHARED;
257		local_flush_tlb_all();
258
259		printk("TLB of %d entry pairs shared by %d VPEs\n",
260			tlbsiz, vpes);
261	    } else {
262		printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
263	    }
264	}
265}
266
267
268/*
269 * Incrementally build the CPU map out of constituent MIPS MT cores,
270 * using the specified available VPEs and TCs.  Plaform code needs
271 * to ensure that each MIPS MT core invokes this routine on reset,
272 * one at a time(!).
273 *
274 * This version of the build_cpu_map and prepare_cpus routines assumes
275 * that *all* TCs of a MIPS MT core will be used for Linux, and that
276 * they will be spread across *all* available VPEs (to minimise the
277 * loss of efficiency due to exception service serialization).
278 * An improved version would pick up configuration information and
279 * possibly leave some TCs/VPEs as "slave" processors.
280 *
281 * Use c0_MVPConf0 to find out how many TCs are available, setting up
282 * phys_cpu_present_map and the logical/physical mappings.
283 */
284
285int __init mipsmt_build_cpu_map(int start_cpu_slot)
286{
287	int i, ntcs;
288
289	/*
290	 * The CPU map isn't actually used for anything at this point,
291	 * so it's not clear what else we should do apart from set
292	 * everything up so that "logical" = "physical".
293	 */
294	ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
295	for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
296		cpu_set(i, phys_cpu_present_map);
297		__cpu_number_map[i] = i;
298		__cpu_logical_map[i] = i;
299	}
300#ifdef CONFIG_MIPS_MT_FPAFF
301	/* Initialize map of CPUs with FPUs */
302	cpus_clear(mt_fpu_cpumask);
303#endif
304
305	/* One of those TC's is the one booting, and not a secondary... */
306	printk("%i available secondary CPU TC(s)\n", i - 1);
307
308	return i;
309}
310
311/*
312 * Common setup before any secondaries are started
313 * Make sure all CPU's are in a sensible state before we boot any of the
314 * secondaries.
315 *
316 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
317 * as possible across the available VPEs.
318 */
319
320static void smtc_tc_setup(int vpe, int tc, int cpu)
321{
322	settc(tc);
323	write_tc_c0_tchalt(TCHALT_H);
324	mips_ihb();
325	write_tc_c0_tcstatus((read_tc_c0_tcstatus()
326			& ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
327			| TCSTATUS_A);
328	write_tc_c0_tccontext(0);
329	/* Bind tc to vpe */
330	write_tc_c0_tcbind(vpe);
331	/* In general, all TCs should have the same cpu_data indications */
332	memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
333	/* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
334	if (cpu_data[0].cputype == CPU_34K)
335		cpu_data[cpu].options &= ~MIPS_CPU_FPU;
336	cpu_data[cpu].vpe_id = vpe;
337	cpu_data[cpu].tc_id = tc;
338}
339
340
341void mipsmt_prepare_cpus(void)
342{
343	int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
344	unsigned long flags;
345	unsigned long val;
346	int nipi;
347	struct smtc_ipi *pipi;
348
349	/* disable interrupts so we can disable MT */
350	local_irq_save(flags);
351	/* disable MT so we can configure */
352	dvpe();
353	dmt();
354
355	spin_lock_init(&freeIPIq.lock);
356
357	/*
358	 * We probably don't have as many VPEs as we do SMP "CPUs",
359	 * but it's possible - and in any case we'll never use more!
360	 */
361	for (i=0; i<NR_CPUS; i++) {
362		IPIQ[i].head = IPIQ[i].tail = NULL;
363		spin_lock_init(&IPIQ[i].lock);
364		IPIQ[i].depth = 0;
365		atomic_set(&ipi_timer_latch[i], 0);
366	}
367
368	/* cpu_data index starts at zero */
369	cpu = 0;
370	cpu_data[cpu].vpe_id = 0;
371	cpu_data[cpu].tc_id = 0;
372	cpu++;
373
374	/* Report on boot-time options */
375	mips_mt_set_cpuoptions();
376	if (vpelimit > 0)
377		printk("Limit of %d VPEs set\n", vpelimit);
378	if (tclimit > 0)
379		printk("Limit of %d TCs set\n", tclimit);
380	if (nostlb) {
381		printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
382	}
383	if (asidmask)
384		printk("ASID mask value override to 0x%x\n", asidmask);
385
386	/* Temporary */
387#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
388	if (hang_trig)
389		printk("Logic Analyser Trigger on suspected TC hang\n");
390#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
391
392	/* Put MVPE's into 'configuration state' */
393	write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
394
395	val = read_c0_mvpconf0();
396	nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
397	if (vpelimit > 0 && nvpe > vpelimit)
398		nvpe = vpelimit;
399	ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
400	if (ntc > NR_CPUS)
401		ntc = NR_CPUS;
402	if (tclimit > 0 && ntc > tclimit)
403		ntc = tclimit;
404	tcpervpe = ntc / nvpe;
405	slop = ntc % nvpe;	/* Residual TCs, < NVPE */
406
407	/* Set up shared TLB */
408	smtc_configure_tlb();
409
410	for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
411		/*
412		 * Set the MVP bits.
413		 */
414		settc(tc);
415		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
416		if (vpe != 0)
417			printk(", ");
418		printk("VPE %d: TC", vpe);
419		for (i = 0; i < tcpervpe; i++) {
420			/*
421			 * TC 0 is bound to VPE 0 at reset,
422			 * and is presumably executing this
423			 * code.  Leave it alone!
424			 */
425			if (tc != 0) {
426				smtc_tc_setup(vpe, tc, cpu);
427				cpu++;
428			}
429			printk(" %d", tc);
430			tc++;
431		}
432		if (slop) {
433			if (tc != 0) {
434				smtc_tc_setup(vpe, tc, cpu);
435				cpu++;
436			}
437			printk(" %d", tc);
438			tc++;
439			slop--;
440		}
441		if (vpe != 0) {
442			/*
443			 * Clear any stale software interrupts from VPE's Cause
444			 */
445			write_vpe_c0_cause(0);
446
447			/*
448			 * Clear ERL/EXL of VPEs other than 0
449			 * and set restricted interrupt enable/mask.
450			 */
451			write_vpe_c0_status((read_vpe_c0_status()
452				& ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
453				| (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
454				| ST0_IE));
455			/*
456			 * set config to be the same as vpe0,
457			 *  particularly kseg0 coherency alg
458			 */
459			write_vpe_c0_config(read_c0_config());
460			/* Clear any pending timer interrupt */
461			write_vpe_c0_compare(0);
462			/* Propagate Config7 */
463			write_vpe_c0_config7(read_c0_config7());
464			write_vpe_c0_count(read_c0_count());
465		}
466		/* enable multi-threading within VPE */
467		write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
468		/* enable the VPE */
469		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
470	}
471
472	/*
473	 * Pull any physically present but unused TCs out of circulation.
474	 */
475	while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
476		cpu_clear(tc, phys_cpu_present_map);
477		cpu_clear(tc, cpu_present_map);
478		tc++;
479	}
480
481	/* release config state */
482	write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
483
484	printk("\n");
485
486	/* Set up coprocessor affinity CPU mask(s) */
487
488#ifdef CONFIG_MIPS_MT_FPAFF
489	for (tc = 0; tc < ntc; tc++) {
490		if (cpu_data[tc].options & MIPS_CPU_FPU)
491			cpu_set(tc, mt_fpu_cpumask);
492	}
493#endif
494
495	/* set up ipi interrupts... */
496
497	/* If we have multiple VPEs running, set up the cross-VPE interrupt */
498
499	setup_cross_vpe_interrupts(nvpe);
500
501	/* Set up queue of free IPI "messages". */
502	nipi = NR_CPUS * IPIBUF_PER_CPU;
503	if (ipibuffers > 0)
504		nipi = ipibuffers;
505
506	pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
507	if (pipi == NULL)
508		panic("kmalloc of IPI message buffers failed\n");
509	else
510		printk("IPI buffer pool of %d buffers\n", nipi);
511	for (i = 0; i < nipi; i++) {
512		smtc_ipi_nq(&freeIPIq, pipi);
513		pipi++;
514	}
515
516	/* Arm multithreading and enable other VPEs - but all TCs are Halted */
517	emt(EMT_ENABLE);
518	evpe(EVPE_ENABLE);
519	local_irq_restore(flags);
520	/* Initialize SMTC /proc statistics/diagnostics */
521	init_smtc_stats();
522}
523
524
525/*
526 * Setup the PC, SP, and GP of a secondary processor and start it
527 * running!
528 * smp_bootstrap is the place to resume from
529 * __KSTK_TOS(idle) is apparently the stack pointer
530 * (unsigned long)idle->thread_info the gp
531 *
532 */
533void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
534{
535	extern u32 kernelsp[NR_CPUS];
536	long flags;
537	int mtflags;
538
539	LOCK_MT_PRA();
540	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
541		dvpe();
542	}
543	settc(cpu_data[cpu].tc_id);
544
545	/* pc */
546	write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
547
548	/* stack pointer */
549	kernelsp[cpu] = __KSTK_TOS(idle);
550	write_tc_gpr_sp(__KSTK_TOS(idle));
551
552	/* global pointer */
553	write_tc_gpr_gp((unsigned long)task_thread_info(idle));
554
555	smtc_status |= SMTC_MTC_ACTIVE;
556	write_tc_c0_tchalt(0);
557	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
558		evpe(EVPE_ENABLE);
559	}
560	UNLOCK_MT_PRA();
561}
562
563void smtc_init_secondary(void)
564{
565	/*
566	 * Start timer on secondary VPEs if necessary.
567	 * plat_timer_setup has already have been invoked by init/main
568	 * on "boot" TC.  Like per_cpu_trap_init() hack, this assumes that
569	 * SMTC init code assigns TCs consdecutively and in ascending order
570	 * to across available VPEs.
571	 */
572	if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
573	    ((read_c0_tcbind() & TCBIND_CURVPE)
574	    != cpu_data[smp_processor_id() - 1].vpe_id)){
575		write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
576	}
577
578	local_irq_enable();
579}
580
581void smtc_smp_finish(void)
582{
583	printk("TC %d going on-line as CPU %d\n",
584		cpu_data[smp_processor_id()].tc_id, smp_processor_id());
585}
586
587void smtc_cpus_done(void)
588{
589}
590
591/*
592 * Support for SMTC-optimized driver IRQ registration
593 */
594
595/*
596 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
597 * in do_IRQ. These are passed in setup_irq_smtc() and stored
598 * in this table.
599 */
600
601int setup_irq_smtc(unsigned int irq, struct irqaction * new,
602			unsigned long hwmask)
603{
604#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
605	unsigned int vpe = current_cpu_data.vpe_id;
606
607	vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
608#endif
609	irq_hwmask[irq] = hwmask;
610
611	return setup_irq(irq, new);
612}
613
614#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
615/*
616 * Support for IRQ affinity to TCs
617 */
618
619void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
620{
621	/*
622	 * If a "fast path" cache of quickly decodable affinity state
623	 * is maintained, this is where it gets done, on a call up
624	 * from the platform affinity code.
625	 */
626}
627
628void smtc_forward_irq(unsigned int irq)
629{
630	int target;
631
632	/*
633	 * OK wise guy, now figure out how to get the IRQ
634	 * to be serviced on an authorized "CPU".
635	 *
636	 * Ideally, to handle the situation where an IRQ has multiple
637	 * eligible CPUS, we would maintain state per IRQ that would
638	 * allow a fair distribution of service requests.  Since the
639	 * expected use model is any-or-only-one, for simplicity
640	 * and efficiency, we just pick the easiest one to find.
641	 */
642
643	target = first_cpu(irq_desc[irq].affinity);
644
645	/*
646	 * We depend on the platform code to have correctly processed
647	 * IRQ affinity change requests to ensure that the IRQ affinity
648	 * mask has been purged of bits corresponding to nonexistent and
649	 * offline "CPUs", and to TCs bound to VPEs other than the VPE
650	 * connected to the physical interrupt input for the interrupt
651	 * in question.  Otherwise we have a nasty problem with interrupt
652	 * mask management.  This is best handled in non-performance-critical
653	 * platform IRQ affinity setting code,  to minimize interrupt-time
654	 * checks.
655	 */
656
657	/* If no one is eligible, service locally */
658	if (target >= NR_CPUS) {
659		do_IRQ_no_affinity(irq);
660		return;
661	}
662
663	smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
664}
665
666#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
667
668/*
669 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
670 * Within a VPE one TC can interrupt another by different approaches.
671 * The easiest to get right would probably be to make all TCs except
672 * the target IXMT and set a software interrupt, but an IXMT-based
673 * scheme requires that a handler must run before a new IPI could
674 * be sent, which would break the "broadcast" loops in MIPS MT.
675 * A more gonzo approach within a VPE is to halt the TC, extract
676 * its Restart, Status, and a couple of GPRs, and program the Restart
677 * address to emulate an interrupt.
678 *
679 * Within a VPE, one can be confident that the target TC isn't in
680 * a critical EXL state when halted, since the write to the Halt
681 * register could not have issued on the writing thread if the
682 * halting thread had EXL set. So k0 and k1 of the target TC
683 * can be used by the injection code.  Across VPEs, one can't
684 * be certain that the target TC isn't in a critical exception
685 * state. So we try a two-step process of sending a software
686 * interrupt to the target VPE, which either handles the event
687 * itself (if it was the target) or injects the event within
688 * the VPE.
689 */
690
691static void smtc_ipi_qdump(void)
692{
693	int i;
694
695	for (i = 0; i < NR_CPUS ;i++) {
696		printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
697			i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
698			IPIQ[i].depth);
699	}
700}
701
702/*
703 * The standard atomic.h primitives don't quite do what we want
704 * here: We need an atomic add-and-return-previous-value (which
705 * could be done with atomic_add_return and a decrement) and an
706 * atomic set/zero-and-return-previous-value (which can't really
707 * be done with the atomic.h primitives). And since this is
708 * MIPS MT, we can assume that we have LL/SC.
709 */
710static inline int atomic_postincrement(atomic_t *v)
711{
712	unsigned long result;
713
714	unsigned long temp;
715
716	__asm__ __volatile__(
717	"1:	ll	%0, %2					\n"
718	"	addu	%1, %0, 1				\n"
719	"	sc	%1, %2					\n"
720	"	beqz	%1, 1b					\n"
721	__WEAK_LLSC_MB
722	: "=&r" (result), "=&r" (temp), "=m" (v->counter)
723	: "m" (v->counter)
724	: "memory");
725
726	return result;
727}
728
729void smtc_send_ipi(int cpu, int type, unsigned int action)
730{
731	int tcstatus;
732	struct smtc_ipi *pipi;
733	long flags;
734	int mtflags;
735
736	if (cpu == smp_processor_id()) {
737		printk("Cannot Send IPI to self!\n");
738		return;
739	}
740	/* Set up a descriptor, to be delivered either promptly or queued */
741	pipi = smtc_ipi_dq(&freeIPIq);
742	if (pipi == NULL) {
743		bust_spinlocks(1);
744		mips_mt_regdump(dvpe());
745		panic("IPI Msg. Buffers Depleted\n");
746	}
747	pipi->type = type;
748	pipi->arg = (void *)action;
749	pipi->dest = cpu;
750	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
751		if (type == SMTC_CLOCK_TICK)
752			atomic_inc(&ipi_timer_latch[cpu]);
753		/* If not on same VPE, enqueue and send cross-VPE interupt */
754		smtc_ipi_nq(&IPIQ[cpu], pipi);
755		LOCK_CORE_PRA();
756		settc(cpu_data[cpu].tc_id);
757		write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
758		UNLOCK_CORE_PRA();
759	} else {
760		/*
761		 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
762		 * since ASID shootdown on the other VPE may
763		 * collide with this operation.
764		 */
765		LOCK_CORE_PRA();
766		settc(cpu_data[cpu].tc_id);
767		/* Halt the targeted TC */
768		write_tc_c0_tchalt(TCHALT_H);
769		mips_ihb();
770
771		/*
772	 	 * Inspect TCStatus - if IXMT is set, we have to queue
773		 * a message. Otherwise, we set up the "interrupt"
774		 * of the other TC
775	 	 */
776		tcstatus = read_tc_c0_tcstatus();
777
778		if ((tcstatus & TCSTATUS_IXMT) != 0) {
779			/*
780			 * Spin-waiting here can deadlock,
781			 * so we queue the message for the target TC.
782			 */
783			write_tc_c0_tchalt(0);
784			UNLOCK_CORE_PRA();
785			/* Try to reduce redundant timer interrupt messages */
786			if (type == SMTC_CLOCK_TICK) {
787			    if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
788				smtc_ipi_nq(&freeIPIq, pipi);
789				return;
790			    }
791			}
792			smtc_ipi_nq(&IPIQ[cpu], pipi);
793		} else {
794			if (type == SMTC_CLOCK_TICK)
795				atomic_inc(&ipi_timer_latch[cpu]);
796			post_direct_ipi(cpu, pipi);
797			write_tc_c0_tchalt(0);
798			UNLOCK_CORE_PRA();
799		}
800	}
801}
802
803/*
804 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
805 */
806static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
807{
808	struct pt_regs *kstack;
809	unsigned long tcstatus;
810	unsigned long tcrestart;
811	extern u32 kernelsp[NR_CPUS];
812	extern void __smtc_ipi_vector(void);
813//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
814
815	/* Extract Status, EPC from halted TC */
816	tcstatus = read_tc_c0_tcstatus();
817	tcrestart = read_tc_c0_tcrestart();
818	/* If TCRestart indicates a WAIT instruction, advance the PC */
819	if ((tcrestart & 0x80000000)
820	    && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
821		tcrestart += 4;
822	}
823	/*
824	 * Save on TC's future kernel stack
825	 *
826	 * CU bit of Status is indicator that TC was
827	 * already running on a kernel stack...
828	 */
829	if (tcstatus & ST0_CU0)  {
830		/* Note that this "- 1" is pointer arithmetic */
831		kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
832	} else {
833		kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
834	}
835
836	kstack->cp0_epc = (long)tcrestart;
837	/* Save TCStatus */
838	kstack->cp0_tcstatus = tcstatus;
839	/* Pass token of operation to be performed kernel stack pad area */
840	kstack->pad0[4] = (unsigned long)pipi;
841	/* Pass address of function to be called likewise */
842	kstack->pad0[5] = (unsigned long)&ipi_decode;
843	/* Set interrupt exempt and kernel mode */
844	tcstatus |= TCSTATUS_IXMT;
845	tcstatus &= ~TCSTATUS_TKSU;
846	write_tc_c0_tcstatus(tcstatus);
847	ehb();
848	/* Set TC Restart address to be SMTC IPI vector */
849	write_tc_c0_tcrestart(__smtc_ipi_vector);
850}
851
852static void ipi_resched_interrupt(void)
853{
854	/* Return from interrupt should be enough to cause scheduler check */
855}
856
857
858static void ipi_call_interrupt(void)
859{
860	/* Invoke generic function invocation code in smp.c */
861	smp_call_function_interrupt();
862}
863
864DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
865
866void ipi_decode(struct smtc_ipi *pipi)
867{
868	unsigned int cpu = smp_processor_id();
869	struct clock_event_device *cd;
870	void *arg_copy = pipi->arg;
871	int type_copy = pipi->type;
872	int ticks;
873
874	smtc_ipi_nq(&freeIPIq, pipi);
875	switch (type_copy) {
876	case SMTC_CLOCK_TICK:
877		irq_enter();
878		kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
879		cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
880		ticks = atomic_read(&ipi_timer_latch[cpu]);
881		atomic_sub(ticks, &ipi_timer_latch[cpu]);
882		while (ticks) {
883			cd->event_handler(cd);
884			ticks--;
885		}
886		irq_exit();
887		break;
888
889	case LINUX_SMP_IPI:
890		switch ((int)arg_copy) {
891		case SMP_RESCHEDULE_YOURSELF:
892			ipi_resched_interrupt();
893			break;
894		case SMP_CALL_FUNCTION:
895			ipi_call_interrupt();
896			break;
897		default:
898			printk("Impossible SMTC IPI Argument 0x%x\n",
899				(int)arg_copy);
900			break;
901		}
902		break;
903#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
904	case IRQ_AFFINITY_IPI:
905		/*
906		 * Accept a "forwarded" interrupt that was initially
907		 * taken by a TC who doesn't have affinity for the IRQ.
908		 */
909		do_IRQ_no_affinity((int)arg_copy);
910		break;
911#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
912	default:
913		printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
914		break;
915	}
916}
917
918void deferred_smtc_ipi(void)
919{
920	struct smtc_ipi *pipi;
921	unsigned long flags;
922/* DEBUG */
923	int q = smp_processor_id();
924
925	/*
926	 * Test is not atomic, but much faster than a dequeue,
927	 * and the vast majority of invocations will have a null queue.
928	 */
929	if (IPIQ[q].head != NULL) {
930		while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
931			/* ipi_decode() should be called with interrupts off */
932			local_irq_save(flags);
933			ipi_decode(pipi);
934			local_irq_restore(flags);
935		}
936	}
937}
938
939/*
940 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
941 * set via cross-VPE MTTR manipulation of the Cause register. It would be
942 * in some regards preferable to have external logic for "doorbell" hardware
943 * interrupts.
944 */
945
946static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
947
948static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
949{
950	int my_vpe = cpu_data[smp_processor_id()].vpe_id;
951	int my_tc = cpu_data[smp_processor_id()].tc_id;
952	int cpu;
953	struct smtc_ipi *pipi;
954	unsigned long tcstatus;
955	int sent;
956	long flags;
957	unsigned int mtflags;
958	unsigned int vpflags;
959
960	/*
961	 * So long as cross-VPE interrupts are done via
962	 * MFTR/MTTR read-modify-writes of Cause, we need
963	 * to stop other VPEs whenever the local VPE does
964	 * anything similar.
965	 */
966	local_irq_save(flags);
967	vpflags = dvpe();
968	clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
969	set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
970	irq_enable_hazard();
971	evpe(vpflags);
972	local_irq_restore(flags);
973
974	/*
975	 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
976	 * queued for TCs on this VPE other than the current one.
977	 * Return-from-interrupt should cause us to drain the queue
978	 * for the current TC, so we ought not to have to do it explicitly here.
979	 */
980
981	for_each_online_cpu(cpu) {
982		if (cpu_data[cpu].vpe_id != my_vpe)
983			continue;
984
985		pipi = smtc_ipi_dq(&IPIQ[cpu]);
986		if (pipi != NULL) {
987			if (cpu_data[cpu].tc_id != my_tc) {
988				sent = 0;
989				LOCK_MT_PRA();
990				settc(cpu_data[cpu].tc_id);
991				write_tc_c0_tchalt(TCHALT_H);
992				mips_ihb();
993				tcstatus = read_tc_c0_tcstatus();
994				if ((tcstatus & TCSTATUS_IXMT) == 0) {
995					post_direct_ipi(cpu, pipi);
996					sent = 1;
997				}
998				write_tc_c0_tchalt(0);
999				UNLOCK_MT_PRA();
1000				if (!sent) {
1001					smtc_ipi_req(&IPIQ[cpu], pipi);
1002				}
1003			} else {
1004				/*
1005				 * ipi_decode() should be called
1006				 * with interrupts off
1007				 */
1008				local_irq_save(flags);
1009				ipi_decode(pipi);
1010				local_irq_restore(flags);
1011			}
1012		}
1013	}
1014
1015	return IRQ_HANDLED;
1016}
1017
1018static void ipi_irq_dispatch(void)
1019{
1020	do_IRQ(cpu_ipi_irq);
1021}
1022
1023static struct irqaction irq_ipi = {
1024	.handler	= ipi_interrupt,
1025	.flags		= IRQF_DISABLED,
1026	.name		= "SMTC_IPI",
1027	.flags		= IRQF_PERCPU
1028};
1029
1030static void setup_cross_vpe_interrupts(unsigned int nvpe)
1031{
1032	if (nvpe < 1)
1033		return;
1034
1035	if (!cpu_has_vint)
1036		panic("SMTC Kernel requires Vectored Interupt support");
1037
1038	set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1039
1040	setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1041
1042	set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
1043}
1044
1045/*
1046 * SMTC-specific hacks invoked from elsewhere in the kernel.
1047 *
1048 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1049 * called with interrupts disabled.  We do rely on interrupts being disabled
1050 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1051 * result in a recursive call to raw_local_irq_restore().
1052 */
1053
1054static void __smtc_ipi_replay(void)
1055{
1056	unsigned int cpu = smp_processor_id();
1057
1058	/*
1059	 * To the extent that we've ever turned interrupts off,
1060	 * we may have accumulated deferred IPIs.  This is subtle.
1061	 * If we use the smtc_ipi_qdepth() macro, we'll get an
1062	 * exact number - but we'll also disable interrupts
1063	 * and create a window of failure where a new IPI gets
1064	 * queued after we test the depth but before we re-enable
1065	 * interrupts. So long as IXMT never gets set, however,
1066	 * we should be OK:  If we pick up something and dispatch
1067	 * it here, that's great. If we see nothing, but concurrent
1068	 * with this operation, another TC sends us an IPI, IXMT
1069	 * is clear, and we'll handle it as a real pseudo-interrupt
1070	 * and not a pseudo-pseudo interrupt.
1071	 */
1072	if (IPIQ[cpu].depth > 0) {
1073		while (1) {
1074			struct smtc_ipi_q *q = &IPIQ[cpu];
1075			struct smtc_ipi *pipi;
1076			extern void self_ipi(struct smtc_ipi *);
1077
1078			spin_lock(&q->lock);
1079			pipi = __smtc_ipi_dq(q);
1080			spin_unlock(&q->lock);
1081			if (!pipi)
1082				break;
1083
1084			self_ipi(pipi);
1085			smtc_cpu_stats[cpu].selfipis++;
1086		}
1087	}
1088}
1089
1090void smtc_ipi_replay(void)
1091{
1092	raw_local_irq_disable();
1093	__smtc_ipi_replay();
1094}
1095
1096EXPORT_SYMBOL(smtc_ipi_replay);
1097
1098void smtc_idle_loop_hook(void)
1099{
1100#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1101	int im;
1102	int flags;
1103	int mtflags;
1104	int bit;
1105	int vpe;
1106	int tc;
1107	int hook_ntcs;
1108	/*
1109	 * printk within DMT-protected regions can deadlock,
1110	 * so buffer diagnostic messages for later output.
1111	 */
1112	char *pdb_msg;
1113	char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1114
1115	if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1116		if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1117			int mvpconf0;
1118			/* Tedious stuff to just do once */
1119			mvpconf0 = read_c0_mvpconf0();
1120			hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1121			if (hook_ntcs > NR_CPUS)
1122				hook_ntcs = NR_CPUS;
1123			for (tc = 0; tc < hook_ntcs; tc++) {
1124				tcnoprog[tc] = 0;
1125				clock_hang_reported[tc] = 0;
1126	    		}
1127			for (vpe = 0; vpe < 2; vpe++)
1128				for (im = 0; im < 8; im++)
1129					imstuckcount[vpe][im] = 0;
1130			printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1131			atomic_set(&idle_hook_initialized, 1000);
1132		} else {
1133			/* Someone else is initializing in parallel - let 'em finish */
1134			while (atomic_read(&idle_hook_initialized) < 1000)
1135				;
1136		}
1137	}
1138
1139	/* Have we stupidly left IXMT set somewhere? */
1140	if (read_c0_tcstatus() & 0x400) {
1141		write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1142		ehb();
1143		printk("Dangling IXMT in cpu_idle()\n");
1144	}
1145
1146	/* Have we stupidly left an IM bit turned off? */
1147#define IM_LIMIT 2000
1148	local_irq_save(flags);
1149	mtflags = dmt();
1150	pdb_msg = &id_ho_db_msg[0];
1151	im = read_c0_status();
1152	vpe = current_cpu_data.vpe_id;
1153	for (bit = 0; bit < 8; bit++) {
1154		/*
1155		 * In current prototype, I/O interrupts
1156		 * are masked for VPE > 0
1157		 */
1158		if (vpemask[vpe][bit]) {
1159			if (!(im & (0x100 << bit)))
1160				imstuckcount[vpe][bit]++;
1161			else
1162				imstuckcount[vpe][bit] = 0;
1163			if (imstuckcount[vpe][bit] > IM_LIMIT) {
1164				set_c0_status(0x100 << bit);
1165				ehb();
1166				imstuckcount[vpe][bit] = 0;
1167				pdb_msg += sprintf(pdb_msg,
1168					"Dangling IM %d fixed for VPE %d\n", bit,
1169					vpe);
1170			}
1171		}
1172	}
1173
1174	/*
1175	 * Now that we limit outstanding timer IPIs, check for hung TC
1176	 */
1177	for (tc = 0; tc < NR_CPUS; tc++) {
1178		/* Don't check ourself - we'll dequeue IPIs just below */
1179		if ((tc != smp_processor_id()) &&
1180		    atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
1181		    if (clock_hang_reported[tc] == 0) {
1182			pdb_msg += sprintf(pdb_msg,
1183				"TC %d looks hung with timer latch at %d\n",
1184				tc, atomic_read(&ipi_timer_latch[tc]));
1185			clock_hang_reported[tc]++;
1186			}
1187		}
1188	}
1189	emt(mtflags);
1190	local_irq_restore(flags);
1191	if (pdb_msg != &id_ho_db_msg[0])
1192		printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1193#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1194
1195	/*
1196	 * Replay any accumulated deferred IPIs. If "Instant Replay"
1197	 * is in use, there should never be any.
1198	 */
1199#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1200	{
1201		unsigned long flags;
1202
1203		local_irq_save(flags);
1204		__smtc_ipi_replay();
1205		local_irq_restore(flags);
1206	}
1207#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1208}
1209
1210void smtc_soft_dump(void)
1211{
1212	int i;
1213
1214	printk("Counter Interrupts taken per CPU (TC)\n");
1215	for (i=0; i < NR_CPUS; i++) {
1216		printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1217	}
1218	printk("Self-IPI invocations:\n");
1219	for (i=0; i < NR_CPUS; i++) {
1220		printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1221	}
1222	smtc_ipi_qdump();
1223	printk("Timer IPI Backlogs:\n");
1224	for (i=0; i < NR_CPUS; i++) {
1225		printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
1226	}
1227	printk("%d Recoveries of \"stolen\" FPU\n",
1228	       atomic_read(&smtc_fpu_recoveries));
1229}
1230
1231
1232/*
1233 * TLB management routines special to SMTC
1234 */
1235
1236void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1237{
1238	unsigned long flags, mtflags, tcstat, prevhalt, asid;
1239	int tlb, i;
1240
1241	/*
1242	 * It would be nice to be able to use a spinlock here,
1243	 * but this is invoked from within TLB flush routines
1244	 * that protect themselves with DVPE, so if a lock is
1245	 * held by another TC, it'll never be freed.
1246	 *
1247	 * DVPE/DMT must not be done with interrupts enabled,
1248	 * so even so most callers will already have disabled
1249	 * them, let's be really careful...
1250	 */
1251
1252	local_irq_save(flags);
1253	if (smtc_status & SMTC_TLB_SHARED) {
1254		mtflags = dvpe();
1255		tlb = 0;
1256	} else {
1257		mtflags = dmt();
1258		tlb = cpu_data[cpu].vpe_id;
1259	}
1260	asid = asid_cache(cpu);
1261
1262	do {
1263		if (!((asid += ASID_INC) & ASID_MASK) ) {
1264			if (cpu_has_vtag_icache)
1265				flush_icache_all();
1266			/* Traverse all online CPUs (hack requires contigous range) */
1267			for (i = 0; i < num_online_cpus(); i++) {
1268				/*
1269				 * We don't need to worry about our own CPU, nor those of
1270				 * CPUs who don't share our TLB.
1271				 */
1272				if ((i != smp_processor_id()) &&
1273				    ((smtc_status & SMTC_TLB_SHARED) ||
1274				     (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1275					settc(cpu_data[i].tc_id);
1276					prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1277					if (!prevhalt) {
1278						write_tc_c0_tchalt(TCHALT_H);
1279						mips_ihb();
1280					}
1281					tcstat = read_tc_c0_tcstatus();
1282					smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1283					if (!prevhalt)
1284						write_tc_c0_tchalt(0);
1285				}
1286			}
1287			if (!asid)		/* fix version if needed */
1288				asid = ASID_FIRST_VERSION;
1289			local_flush_tlb_all();	/* start new asid cycle */
1290		}
1291	} while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1292
1293	/*
1294	 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1295	 */
1296	for (i = 0; i < num_online_cpus(); i++) {
1297		if ((smtc_status & SMTC_TLB_SHARED) ||
1298		    (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1299			cpu_context(i, mm) = asid_cache(i) = asid;
1300	}
1301
1302	if (smtc_status & SMTC_TLB_SHARED)
1303		evpe(mtflags);
1304	else
1305		emt(mtflags);
1306	local_irq_restore(flags);
1307}
1308
1309/*
1310 * Invoked from macros defined in mmu_context.h
1311 * which must already have disabled interrupts
1312 * and done a DVPE or DMT as appropriate.
1313 */
1314
1315void smtc_flush_tlb_asid(unsigned long asid)
1316{
1317	int entry;
1318	unsigned long ehi;
1319
1320	entry = read_c0_wired();
1321
1322	/* Traverse all non-wired entries */
1323	while (entry < current_cpu_data.tlbsize) {
1324		write_c0_index(entry);
1325		ehb();
1326		tlb_read();
1327		ehb();
1328		ehi = read_c0_entryhi();
1329		if ((ehi & ASID_MASK) == asid) {
1330		    /*
1331		     * Invalidate only entries with specified ASID,
1332		     * makiing sure all entries differ.
1333		     */
1334		    write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1335		    write_c0_entrylo0(0);
1336		    write_c0_entrylo1(0);
1337		    mtc0_tlbw_hazard();
1338		    tlb_write_indexed();
1339		}
1340		entry++;
1341	}
1342	write_c0_index(PARKED_INDEX);
1343	tlbw_use_hazard();
1344}
1345
1346/*
1347 * Support for single-threading cache flush operations.
1348 */
1349
1350static int halt_state_save[NR_CPUS];
1351
1352/*
1353 * To really, really be sure that nothing is being done
1354 * by other TCs, halt them all.  This code assumes that
1355 * a DVPE has already been done, so while their Halted
1356 * state is theoretically architecturally unstable, in
1357 * practice, it's not going to change while we're looking
1358 * at it.
1359 */
1360
1361void smtc_cflush_lockdown(void)
1362{
1363	int cpu;
1364
1365	for_each_online_cpu(cpu) {
1366		if (cpu != smp_processor_id()) {
1367			settc(cpu_data[cpu].tc_id);
1368			halt_state_save[cpu] = read_tc_c0_tchalt();
1369			write_tc_c0_tchalt(TCHALT_H);
1370		}
1371	}
1372	mips_ihb();
1373}
1374
1375/* It would be cheating to change the cpu_online states during a flush! */
1376
1377void smtc_cflush_release(void)
1378{
1379	int cpu;
1380
1381	/*
1382	 * Start with a hazard barrier to ensure
1383	 * that all CACHE ops have played through.
1384	 */
1385	mips_ihb();
1386
1387	for_each_online_cpu(cpu) {
1388		if (cpu != smp_processor_id()) {
1389			settc(cpu_data[cpu].tc_id);
1390			write_tc_c0_tchalt(halt_state_save[cpu]);
1391		}
1392	}
1393	mips_ihb();
1394}
1395