tlb-r4k.c revision d7a887a73dec6c387b02a966a71aac767bbd9ce6
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10 */
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/smp.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16
17#include <asm/cpu.h>
18#include <asm/bootinfo.h>
19#include <asm/mmu_context.h>
20#include <asm/pgtable.h>
21#include <asm/system.h>
22
23extern void build_tlb_refill_handler(void);
24
25/*
26 * Make sure all entries differ.  If they're not different
27 * MIPS32 will take revenge ...
28 */
29#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
30
31/* Atomicity and interruptability */
32#ifdef CONFIG_MIPS_MT_SMTC
33
34#include <asm/smtc.h>
35#include <asm/mipsmtregs.h>
36
37#define ENTER_CRITICAL(flags) \
38	{ \
39	unsigned int mvpflags; \
40	local_irq_save(flags);\
41	mvpflags = dvpe()
42#define EXIT_CRITICAL(flags) \
43	evpe(mvpflags); \
44	local_irq_restore(flags); \
45	}
46#else
47
48#define ENTER_CRITICAL(flags) local_irq_save(flags)
49#define EXIT_CRITICAL(flags) local_irq_restore(flags)
50
51#endif /* CONFIG_MIPS_MT_SMTC */
52
53#if defined(CONFIG_CPU_LOONGSON2)
54/*
55 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
56 * unfortrunately, itlb is not totally transparent to software.
57 */
58#define FLUSH_ITLB write_c0_diag(4);
59
60#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
61
62#else
63
64#define FLUSH_ITLB
65#define FLUSH_ITLB_VM(vma)
66
67#endif
68
69void local_flush_tlb_all(void)
70{
71	unsigned long flags;
72	unsigned long old_ctx;
73	int entry;
74
75	ENTER_CRITICAL(flags);
76	/* Save old context and create impossible VPN2 value */
77	old_ctx = read_c0_entryhi();
78	write_c0_entrylo0(0);
79	write_c0_entrylo1(0);
80
81	entry = read_c0_wired();
82
83	/* Blast 'em all away. */
84	while (entry < current_cpu_data.tlbsize) {
85		/* Make sure all entries differ. */
86		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
87		write_c0_index(entry);
88		mtc0_tlbw_hazard();
89		tlb_write_indexed();
90		entry++;
91	}
92	tlbw_use_hazard();
93	write_c0_entryhi(old_ctx);
94	FLUSH_ITLB;
95	EXIT_CRITICAL(flags);
96}
97
98/* All entries common to a mm share an asid.  To effectively flush
99   these entries, we just bump the asid. */
100void local_flush_tlb_mm(struct mm_struct *mm)
101{
102	int cpu;
103
104	preempt_disable();
105
106	cpu = smp_processor_id();
107
108	if (cpu_context(cpu, mm) != 0) {
109		drop_mmu_context(mm, cpu);
110	}
111
112	preempt_enable();
113}
114
115void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
116	unsigned long end)
117{
118	struct mm_struct *mm = vma->vm_mm;
119	int cpu = smp_processor_id();
120
121	if (cpu_context(cpu, mm) != 0) {
122		unsigned long size, flags;
123		int huge = is_vm_hugetlb_page(vma);
124
125		ENTER_CRITICAL(flags);
126		if (huge) {
127			start = round_down(start, HPAGE_SIZE);
128			end = round_up(end, HPAGE_SIZE);
129			size = (end - start) >> HPAGE_SHIFT;
130		} else {
131			start = round_down(start, PAGE_SIZE << 1);
132			end = round_up(end, PAGE_SIZE << 1);
133			size = (end - start) >> (PAGE_SHIFT + 1);
134		}
135		if (size <= current_cpu_data.tlbsize/2) {
136			int oldpid = read_c0_entryhi();
137			int newpid = cpu_asid(cpu, mm);
138
139			while (start < end) {
140				int idx;
141
142				write_c0_entryhi(start | newpid);
143				if (huge)
144					start += HPAGE_SIZE;
145				else
146					start += (PAGE_SIZE << 1);
147				mtc0_tlbw_hazard();
148				tlb_probe();
149				tlb_probe_hazard();
150				idx = read_c0_index();
151				write_c0_entrylo0(0);
152				write_c0_entrylo1(0);
153				if (idx < 0)
154					continue;
155				/* Make sure all entries differ. */
156				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
157				mtc0_tlbw_hazard();
158				tlb_write_indexed();
159			}
160			tlbw_use_hazard();
161			write_c0_entryhi(oldpid);
162		} else {
163			drop_mmu_context(mm, cpu);
164		}
165		FLUSH_ITLB;
166		EXIT_CRITICAL(flags);
167	}
168}
169
170void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
171{
172	unsigned long size, flags;
173
174	ENTER_CRITICAL(flags);
175	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
176	size = (size + 1) >> 1;
177	if (size <= current_cpu_data.tlbsize / 2) {
178		int pid = read_c0_entryhi();
179
180		start &= (PAGE_MASK << 1);
181		end += ((PAGE_SIZE << 1) - 1);
182		end &= (PAGE_MASK << 1);
183
184		while (start < end) {
185			int idx;
186
187			write_c0_entryhi(start);
188			start += (PAGE_SIZE << 1);
189			mtc0_tlbw_hazard();
190			tlb_probe();
191			tlb_probe_hazard();
192			idx = read_c0_index();
193			write_c0_entrylo0(0);
194			write_c0_entrylo1(0);
195			if (idx < 0)
196				continue;
197			/* Make sure all entries differ. */
198			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
199			mtc0_tlbw_hazard();
200			tlb_write_indexed();
201		}
202		tlbw_use_hazard();
203		write_c0_entryhi(pid);
204	} else {
205		local_flush_tlb_all();
206	}
207	FLUSH_ITLB;
208	EXIT_CRITICAL(flags);
209}
210
211void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212{
213	int cpu = smp_processor_id();
214
215	if (cpu_context(cpu, vma->vm_mm) != 0) {
216		unsigned long flags;
217		int oldpid, newpid, idx;
218
219		newpid = cpu_asid(cpu, vma->vm_mm);
220		page &= (PAGE_MASK << 1);
221		ENTER_CRITICAL(flags);
222		oldpid = read_c0_entryhi();
223		write_c0_entryhi(page | newpid);
224		mtc0_tlbw_hazard();
225		tlb_probe();
226		tlb_probe_hazard();
227		idx = read_c0_index();
228		write_c0_entrylo0(0);
229		write_c0_entrylo1(0);
230		if (idx < 0)
231			goto finish;
232		/* Make sure all entries differ. */
233		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
234		mtc0_tlbw_hazard();
235		tlb_write_indexed();
236		tlbw_use_hazard();
237
238	finish:
239		write_c0_entryhi(oldpid);
240		FLUSH_ITLB_VM(vma);
241		EXIT_CRITICAL(flags);
242	}
243}
244
245/*
246 * This one is only used for pages with the global bit set so we don't care
247 * much about the ASID.
248 */
249void local_flush_tlb_one(unsigned long page)
250{
251	unsigned long flags;
252	int oldpid, idx;
253
254	ENTER_CRITICAL(flags);
255	oldpid = read_c0_entryhi();
256	page &= (PAGE_MASK << 1);
257	write_c0_entryhi(page);
258	mtc0_tlbw_hazard();
259	tlb_probe();
260	tlb_probe_hazard();
261	idx = read_c0_index();
262	write_c0_entrylo0(0);
263	write_c0_entrylo1(0);
264	if (idx >= 0) {
265		/* Make sure all entries differ. */
266		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
267		mtc0_tlbw_hazard();
268		tlb_write_indexed();
269		tlbw_use_hazard();
270	}
271	write_c0_entryhi(oldpid);
272	FLUSH_ITLB;
273	EXIT_CRITICAL(flags);
274}
275
276/*
277 * We will need multiple versions of update_mmu_cache(), one that just
278 * updates the TLB with the new pte(s), and another which also checks
279 * for the R4k "end of page" hardware bug and does the needy.
280 */
281void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
282{
283	unsigned long flags;
284	pgd_t *pgdp;
285	pud_t *pudp;
286	pmd_t *pmdp;
287	pte_t *ptep;
288	int idx, pid;
289
290	/*
291	 * Handle debugger faulting in for debugee.
292	 */
293	if (current->active_mm != vma->vm_mm)
294		return;
295
296	ENTER_CRITICAL(flags);
297
298	pid = read_c0_entryhi() & ASID_MASK;
299	address &= (PAGE_MASK << 1);
300	write_c0_entryhi(address | pid);
301	pgdp = pgd_offset(vma->vm_mm, address);
302	mtc0_tlbw_hazard();
303	tlb_probe();
304	tlb_probe_hazard();
305	pudp = pud_offset(pgdp, address);
306	pmdp = pmd_offset(pudp, address);
307	idx = read_c0_index();
308#ifdef CONFIG_HUGETLB_PAGE
309	/* this could be a huge page  */
310	if (pmd_huge(*pmdp)) {
311		unsigned long lo;
312		write_c0_pagemask(PM_HUGE_MASK);
313		ptep = (pte_t *)pmdp;
314		lo = pte_to_entrylo(pte_val(*ptep));
315		write_c0_entrylo0(lo);
316		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
317
318		mtc0_tlbw_hazard();
319		if (idx < 0)
320			tlb_write_random();
321		else
322			tlb_write_indexed();
323		write_c0_pagemask(PM_DEFAULT_MASK);
324	} else
325#endif
326	{
327		ptep = pte_offset_map(pmdp, address);
328
329#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
330		write_c0_entrylo0(ptep->pte_high);
331		ptep++;
332		write_c0_entrylo1(ptep->pte_high);
333#else
334		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
335		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
336#endif
337		mtc0_tlbw_hazard();
338		if (idx < 0)
339			tlb_write_random();
340		else
341			tlb_write_indexed();
342	}
343	tlbw_use_hazard();
344	FLUSH_ITLB_VM(vma);
345	EXIT_CRITICAL(flags);
346}
347
348void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
349		     unsigned long entryhi, unsigned long pagemask)
350{
351	unsigned long flags;
352	unsigned long wired;
353	unsigned long old_pagemask;
354	unsigned long old_ctx;
355
356	ENTER_CRITICAL(flags);
357	/* Save old context and create impossible VPN2 value */
358	old_ctx = read_c0_entryhi();
359	old_pagemask = read_c0_pagemask();
360	wired = read_c0_wired();
361	write_c0_wired(wired + 1);
362	write_c0_index(wired);
363	tlbw_use_hazard();	/* What is the hazard here? */
364	write_c0_pagemask(pagemask);
365	write_c0_entryhi(entryhi);
366	write_c0_entrylo0(entrylo0);
367	write_c0_entrylo1(entrylo1);
368	mtc0_tlbw_hazard();
369	tlb_write_indexed();
370	tlbw_use_hazard();
371
372	write_c0_entryhi(old_ctx);
373	tlbw_use_hazard();	/* What is the hazard here? */
374	write_c0_pagemask(old_pagemask);
375	local_flush_tlb_all();
376	EXIT_CRITICAL(flags);
377}
378
379static int __cpuinitdata ntlb;
380static int __init set_ntlb(char *str)
381{
382	get_option(&str, &ntlb);
383	return 1;
384}
385
386__setup("ntlb=", set_ntlb);
387
388void __cpuinit tlb_init(void)
389{
390	/*
391	 * You should never change this register:
392	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
393	 *     the value in the c0_pagemask register.
394	 *   - The entire mm handling assumes the c0_pagemask register to
395	 *     be set to fixed-size pages.
396	 */
397	write_c0_pagemask(PM_DEFAULT_MASK);
398	write_c0_wired(0);
399	if (current_cpu_type() == CPU_R10000 ||
400	    current_cpu_type() == CPU_R12000 ||
401	    current_cpu_type() == CPU_R14000)
402		write_c0_framemask(0);
403
404	if (kernel_uses_smartmips_rixi) {
405		/*
406		 * Enable the no read, no exec bits, and enable large virtual
407		 * address.
408		 */
409		u32 pg = PG_RIE | PG_XIE;
410#ifdef CONFIG_64BIT
411		pg |= PG_ELPA;
412#endif
413		write_c0_pagegrain(pg);
414	}
415
416        /* From this point on the ARC firmware is dead.  */
417	local_flush_tlb_all();
418
419	/* Did I tell you that ARC SUCKS?  */
420
421	if (ntlb) {
422		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
423			int wired = current_cpu_data.tlbsize - ntlb;
424			write_c0_wired(wired);
425			write_c0_index(wired-1);
426			printk("Restricting TLB to %d entries\n", ntlb);
427		} else
428			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
429	}
430
431	build_tlb_refill_handler();
432}
433