tlb-r4k.c revision 2a21c7300b53b744d16903256a172d9cbcfdd03e
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10 */
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14
15#include <asm/cpu.h>
16#include <asm/bootinfo.h>
17#include <asm/mmu_context.h>
18#include <asm/pgtable.h>
19#include <asm/system.h>
20
21extern void build_tlb_refill_handler(void);
22
23/*
24 * Make sure all entries differ.  If they're not different
25 * MIPS32 will take revenge ...
26 */
27#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
28
29/* Atomicity and interruptability */
30#ifdef CONFIG_MIPS_MT_SMTC
31
32#include <asm/smtc.h>
33#include <asm/mipsmtregs.h>
34
35#define ENTER_CRITICAL(flags) \
36	{ \
37	unsigned int mvpflags; \
38	local_irq_save(flags);\
39	mvpflags = dvpe()
40#define EXIT_CRITICAL(flags) \
41	evpe(mvpflags); \
42	local_irq_restore(flags); \
43	}
44#else
45
46#define ENTER_CRITICAL(flags) local_irq_save(flags)
47#define EXIT_CRITICAL(flags) local_irq_restore(flags)
48
49#endif /* CONFIG_MIPS_MT_SMTC */
50
51#if defined(CONFIG_CPU_LOONGSON2)
52/*
53 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
54 * unfortrunately, itlb is not totally transparent to software.
55 */
56#define FLUSH_ITLB write_c0_diag(4);
57
58#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
59
60#else
61
62#define FLUSH_ITLB
63#define FLUSH_ITLB_VM(vma)
64
65#endif
66
67void local_flush_tlb_all(void)
68{
69	unsigned long flags;
70	unsigned long old_ctx;
71	int entry;
72
73	ENTER_CRITICAL(flags);
74	/* Save old context and create impossible VPN2 value */
75	old_ctx = read_c0_entryhi();
76	write_c0_entrylo0(0);
77	write_c0_entrylo1(0);
78
79	entry = read_c0_wired();
80
81	/* Blast 'em all away. */
82	while (entry < current_cpu_data.tlbsize) {
83		/* Make sure all entries differ. */
84		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
85		write_c0_index(entry);
86		mtc0_tlbw_hazard();
87		tlb_write_indexed();
88		entry++;
89	}
90	tlbw_use_hazard();
91	write_c0_entryhi(old_ctx);
92	FLUSH_ITLB;
93	EXIT_CRITICAL(flags);
94}
95
96/* All entries common to a mm share an asid.  To effectively flush
97   these entries, we just bump the asid. */
98void local_flush_tlb_mm(struct mm_struct *mm)
99{
100	int cpu;
101
102	preempt_disable();
103
104	cpu = smp_processor_id();
105
106	if (cpu_context(cpu, mm) != 0) {
107		drop_mmu_context(mm, cpu);
108	}
109
110	preempt_enable();
111}
112
113void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
114	unsigned long end)
115{
116	struct mm_struct *mm = vma->vm_mm;
117	int cpu = smp_processor_id();
118
119	if (cpu_context(cpu, mm) != 0) {
120		unsigned long flags;
121		int size;
122
123		ENTER_CRITICAL(flags);
124		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
125		size = (size + 1) >> 1;
126		if (size <= current_cpu_data.tlbsize/2) {
127			int oldpid = read_c0_entryhi();
128			int newpid = cpu_asid(cpu, mm);
129
130			start &= (PAGE_MASK << 1);
131			end += ((PAGE_SIZE << 1) - 1);
132			end &= (PAGE_MASK << 1);
133			while (start < end) {
134				int idx;
135
136				write_c0_entryhi(start | newpid);
137				start += (PAGE_SIZE << 1);
138				mtc0_tlbw_hazard();
139				tlb_probe();
140				tlb_probe_hazard();
141				idx = read_c0_index();
142				write_c0_entrylo0(0);
143				write_c0_entrylo1(0);
144				if (idx < 0)
145					continue;
146				/* Make sure all entries differ. */
147				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
148				mtc0_tlbw_hazard();
149				tlb_write_indexed();
150			}
151			tlbw_use_hazard();
152			write_c0_entryhi(oldpid);
153		} else {
154			drop_mmu_context(mm, cpu);
155		}
156		FLUSH_ITLB;
157		EXIT_CRITICAL(flags);
158	}
159}
160
161void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
162{
163	unsigned long flags;
164	int size;
165
166	ENTER_CRITICAL(flags);
167	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
168	size = (size + 1) >> 1;
169	if (size <= current_cpu_data.tlbsize / 2) {
170		int pid = read_c0_entryhi();
171
172		start &= (PAGE_MASK << 1);
173		end += ((PAGE_SIZE << 1) - 1);
174		end &= (PAGE_MASK << 1);
175
176		while (start < end) {
177			int idx;
178
179			write_c0_entryhi(start);
180			start += (PAGE_SIZE << 1);
181			mtc0_tlbw_hazard();
182			tlb_probe();
183			tlb_probe_hazard();
184			idx = read_c0_index();
185			write_c0_entrylo0(0);
186			write_c0_entrylo1(0);
187			if (idx < 0)
188				continue;
189			/* Make sure all entries differ. */
190			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
191			mtc0_tlbw_hazard();
192			tlb_write_indexed();
193		}
194		tlbw_use_hazard();
195		write_c0_entryhi(pid);
196	} else {
197		local_flush_tlb_all();
198	}
199	FLUSH_ITLB;
200	EXIT_CRITICAL(flags);
201}
202
203void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
204{
205	int cpu = smp_processor_id();
206
207	if (cpu_context(cpu, vma->vm_mm) != 0) {
208		unsigned long flags;
209		int oldpid, newpid, idx;
210
211		newpid = cpu_asid(cpu, vma->vm_mm);
212		page &= (PAGE_MASK << 1);
213		ENTER_CRITICAL(flags);
214		oldpid = read_c0_entryhi();
215		write_c0_entryhi(page | newpid);
216		mtc0_tlbw_hazard();
217		tlb_probe();
218		tlb_probe_hazard();
219		idx = read_c0_index();
220		write_c0_entrylo0(0);
221		write_c0_entrylo1(0);
222		if (idx < 0)
223			goto finish;
224		/* Make sure all entries differ. */
225		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
226		mtc0_tlbw_hazard();
227		tlb_write_indexed();
228		tlbw_use_hazard();
229
230	finish:
231		write_c0_entryhi(oldpid);
232		FLUSH_ITLB_VM(vma);
233		EXIT_CRITICAL(flags);
234	}
235}
236
237/*
238 * This one is only used for pages with the global bit set so we don't care
239 * much about the ASID.
240 */
241void local_flush_tlb_one(unsigned long page)
242{
243	unsigned long flags;
244	int oldpid, idx;
245
246	ENTER_CRITICAL(flags);
247	oldpid = read_c0_entryhi();
248	page &= (PAGE_MASK << 1);
249	write_c0_entryhi(page);
250	mtc0_tlbw_hazard();
251	tlb_probe();
252	tlb_probe_hazard();
253	idx = read_c0_index();
254	write_c0_entrylo0(0);
255	write_c0_entrylo1(0);
256	if (idx >= 0) {
257		/* Make sure all entries differ. */
258		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
259		mtc0_tlbw_hazard();
260		tlb_write_indexed();
261		tlbw_use_hazard();
262	}
263	write_c0_entryhi(oldpid);
264	FLUSH_ITLB;
265	EXIT_CRITICAL(flags);
266}
267
268/*
269 * We will need multiple versions of update_mmu_cache(), one that just
270 * updates the TLB with the new pte(s), and another which also checks
271 * for the R4k "end of page" hardware bug and does the needy.
272 */
273void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
274{
275	unsigned long flags;
276	pgd_t *pgdp;
277	pud_t *pudp;
278	pmd_t *pmdp;
279	pte_t *ptep;
280	int idx, pid;
281
282	/*
283	 * Handle debugger faulting in for debugee.
284	 */
285	if (current->active_mm != vma->vm_mm)
286		return;
287
288	ENTER_CRITICAL(flags);
289
290	pid = read_c0_entryhi() & ASID_MASK;
291	address &= (PAGE_MASK << 1);
292	write_c0_entryhi(address | pid);
293	pgdp = pgd_offset(vma->vm_mm, address);
294	mtc0_tlbw_hazard();
295	tlb_probe();
296	tlb_probe_hazard();
297	pudp = pud_offset(pgdp, address);
298	pmdp = pmd_offset(pudp, address);
299	idx = read_c0_index();
300	ptep = pte_offset_map(pmdp, address);
301
302#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
303	write_c0_entrylo0(ptep->pte_high);
304	ptep++;
305	write_c0_entrylo1(ptep->pte_high);
306#else
307	write_c0_entrylo0(pte_val(*ptep++) >> 6);
308	write_c0_entrylo1(pte_val(*ptep) >> 6);
309#endif
310	mtc0_tlbw_hazard();
311	if (idx < 0)
312		tlb_write_random();
313	else
314		tlb_write_indexed();
315	tlbw_use_hazard();
316	FLUSH_ITLB_VM(vma);
317	EXIT_CRITICAL(flags);
318}
319
320#if 0
321static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
322				       unsigned long address, pte_t pte)
323{
324	unsigned long flags;
325	unsigned int asid;
326	pgd_t *pgdp;
327	pmd_t *pmdp;
328	pte_t *ptep;
329	int idx;
330
331	ENTER_CRITICAL(flags);
332	address &= (PAGE_MASK << 1);
333	asid = read_c0_entryhi() & ASID_MASK;
334	write_c0_entryhi(address | asid);
335	pgdp = pgd_offset(vma->vm_mm, address);
336	mtc0_tlbw_hazard();
337	tlb_probe();
338	tlb_probe_hazard();
339	pmdp = pmd_offset(pgdp, address);
340	idx = read_c0_index();
341	ptep = pte_offset_map(pmdp, address);
342	write_c0_entrylo0(pte_val(*ptep++) >> 6);
343	write_c0_entrylo1(pte_val(*ptep) >> 6);
344	mtc0_tlbw_hazard();
345	if (idx < 0)
346		tlb_write_random();
347	else
348		tlb_write_indexed();
349	tlbw_use_hazard();
350	EXIT_CRITICAL(flags);
351}
352#endif
353
354void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
355	unsigned long entryhi, unsigned long pagemask)
356{
357	unsigned long flags;
358	unsigned long wired;
359	unsigned long old_pagemask;
360	unsigned long old_ctx;
361
362	ENTER_CRITICAL(flags);
363	/* Save old context and create impossible VPN2 value */
364	old_ctx = read_c0_entryhi();
365	old_pagemask = read_c0_pagemask();
366	wired = read_c0_wired();
367	write_c0_wired(wired + 1);
368	write_c0_index(wired);
369	tlbw_use_hazard();	/* What is the hazard here? */
370	write_c0_pagemask(pagemask);
371	write_c0_entryhi(entryhi);
372	write_c0_entrylo0(entrylo0);
373	write_c0_entrylo1(entrylo1);
374	mtc0_tlbw_hazard();
375	tlb_write_indexed();
376	tlbw_use_hazard();
377
378	write_c0_entryhi(old_ctx);
379	tlbw_use_hazard();	/* What is the hazard here? */
380	write_c0_pagemask(old_pagemask);
381	local_flush_tlb_all();
382	EXIT_CRITICAL(flags);
383}
384
385/*
386 * Used for loading TLB entries before trap_init() has started, when we
387 * don't actually want to add a wired entry which remains throughout the
388 * lifetime of the system
389 */
390
391static int temp_tlb_entry __initdata;
392
393__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
394			       unsigned long entryhi, unsigned long pagemask)
395{
396	int ret = 0;
397	unsigned long flags;
398	unsigned long wired;
399	unsigned long old_pagemask;
400	unsigned long old_ctx;
401
402	ENTER_CRITICAL(flags);
403	/* Save old context and create impossible VPN2 value */
404	old_ctx = read_c0_entryhi();
405	old_pagemask = read_c0_pagemask();
406	wired = read_c0_wired();
407	if (--temp_tlb_entry < wired) {
408		printk(KERN_WARNING
409		       "No TLB space left for add_temporary_entry\n");
410		ret = -ENOSPC;
411		goto out;
412	}
413
414	write_c0_index(temp_tlb_entry);
415	write_c0_pagemask(pagemask);
416	write_c0_entryhi(entryhi);
417	write_c0_entrylo0(entrylo0);
418	write_c0_entrylo1(entrylo1);
419	mtc0_tlbw_hazard();
420	tlb_write_indexed();
421	tlbw_use_hazard();
422
423	write_c0_entryhi(old_ctx);
424	write_c0_pagemask(old_pagemask);
425out:
426	EXIT_CRITICAL(flags);
427	return ret;
428}
429
430static void __init probe_tlb(unsigned long config)
431{
432	struct cpuinfo_mips *c = &current_cpu_data;
433	unsigned int reg;
434
435	/*
436	 * If this isn't a MIPS32 / MIPS64 compliant CPU.  Config 1 register
437	 * is not supported, we assume R4k style.  Cpu probing already figured
438	 * out the number of tlb entries.
439	 */
440	if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
441		return;
442#ifdef CONFIG_MIPS_MT_SMTC
443	/*
444	 * If TLB is shared in SMTC system, total size already
445	 * has been calculated and written into cpu_data tlbsize
446	 */
447	if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
448		return;
449#endif /* CONFIG_MIPS_MT_SMTC */
450
451	reg = read_c0_config1();
452	if (!((config >> 7) & 3))
453		panic("No TLB present");
454
455	c->tlbsize = ((reg >> 25) & 0x3f) + 1;
456}
457
458static int __initdata ntlb = 0;
459static int __init set_ntlb(char *str)
460{
461	get_option(&str, &ntlb);
462	return 1;
463}
464
465__setup("ntlb=", set_ntlb);
466
467void __init tlb_init(void)
468{
469	unsigned int config = read_c0_config();
470
471	/*
472	 * You should never change this register:
473	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
474	 *     the value in the c0_pagemask register.
475	 *   - The entire mm handling assumes the c0_pagemask register to
476	 *     be set for 4kb pages.
477	 */
478	probe_tlb(config);
479	write_c0_pagemask(PM_DEFAULT_MASK);
480	write_c0_wired(0);
481	write_c0_framemask(0);
482	temp_tlb_entry = current_cpu_data.tlbsize - 1;
483
484        /* From this point on the ARC firmware is dead.  */
485	local_flush_tlb_all();
486
487	/* Did I tell you that ARC SUCKS?  */
488
489	if (ntlb) {
490		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
491			int wired = current_cpu_data.tlbsize - ntlb;
492			write_c0_wired(wired);
493			write_c0_index(wired-1);
494			printk ("Restricting TLB to %d entries\n", ntlb);
495		} else
496			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
497	}
498
499	build_tlb_refill_handler();
500}
501