tlb-r4k.c revision 6e760c8dae7d6c47eff011dd4aad53c94d30494b
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10 */
11#include <linux/config.h>
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15
16#include <asm/cpu.h>
17#include <asm/bootinfo.h>
18#include <asm/mmu_context.h>
19#include <asm/pgtable.h>
20#include <asm/system.h>
21
22extern void build_tlb_refill_handler(void);
23
24/*
25 * Make sure all entries differ.  If they're not different
26 * MIPS32 will take revenge ...
27 */
28#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29
30/* CP0 hazard avoidance. */
31#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
32				     "nop; nop; nop; nop; nop; nop;\n\t" \
33				     ".set reorder\n\t")
34
35void local_flush_tlb_all(void)
36{
37	unsigned long flags;
38	unsigned long old_ctx;
39	int entry;
40
41	local_irq_save(flags);
42	/* Save old context and create impossible VPN2 value */
43	old_ctx = read_c0_entryhi();
44	write_c0_entrylo0(0);
45	write_c0_entrylo1(0);
46
47	entry = read_c0_wired();
48
49	/* Blast 'em all away. */
50	while (entry < current_cpu_data.tlbsize) {
51		/* Make sure all entries differ. */
52		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
53		write_c0_index(entry);
54		mtc0_tlbw_hazard();
55		tlb_write_indexed();
56		entry++;
57	}
58	tlbw_use_hazard();
59	write_c0_entryhi(old_ctx);
60	local_irq_restore(flags);
61}
62
63/* All entries common to a mm share an asid.  To effectively flush
64   these entries, we just bump the asid. */
65void local_flush_tlb_mm(struct mm_struct *mm)
66{
67	int cpu;
68
69	preempt_disable();
70
71	cpu = smp_processor_id();
72
73	if (cpu_context(cpu, mm) != 0) {
74		drop_mmu_context(mm, cpu);
75	}
76
77	preempt_enable();
78}
79
80void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
81	unsigned long end)
82{
83	struct mm_struct *mm = vma->vm_mm;
84	int cpu = smp_processor_id();
85
86	if (cpu_context(cpu, mm) != 0) {
87		unsigned long flags;
88		int size;
89
90		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
91		size = (size + 1) >> 1;
92		local_irq_save(flags);
93		if (size <= current_cpu_data.tlbsize/2) {
94			int oldpid = read_c0_entryhi();
95			int newpid = cpu_asid(cpu, mm);
96
97			start &= (PAGE_MASK << 1);
98			end += ((PAGE_SIZE << 1) - 1);
99			end &= (PAGE_MASK << 1);
100			while (start < end) {
101				int idx;
102
103				write_c0_entryhi(start | newpid);
104				start += (PAGE_SIZE << 1);
105				mtc0_tlbw_hazard();
106				tlb_probe();
107				BARRIER;
108				idx = read_c0_index();
109				write_c0_entrylo0(0);
110				write_c0_entrylo1(0);
111				if (idx < 0)
112					continue;
113				/* Make sure all entries differ. */
114				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
115				mtc0_tlbw_hazard();
116				tlb_write_indexed();
117			}
118			tlbw_use_hazard();
119			write_c0_entryhi(oldpid);
120		} else {
121			drop_mmu_context(mm, cpu);
122		}
123		local_irq_restore(flags);
124	}
125}
126
127void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
128{
129	unsigned long flags;
130	int size;
131
132	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
133	size = (size + 1) >> 1;
134	local_irq_save(flags);
135	if (size <= current_cpu_data.tlbsize / 2) {
136		int pid = read_c0_entryhi();
137
138		start &= (PAGE_MASK << 1);
139		end += ((PAGE_SIZE << 1) - 1);
140		end &= (PAGE_MASK << 1);
141
142		while (start < end) {
143			int idx;
144
145			write_c0_entryhi(start);
146			start += (PAGE_SIZE << 1);
147			mtc0_tlbw_hazard();
148			tlb_probe();
149			BARRIER;
150			idx = read_c0_index();
151			write_c0_entrylo0(0);
152			write_c0_entrylo1(0);
153			if (idx < 0)
154				continue;
155			/* Make sure all entries differ. */
156			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
157			mtc0_tlbw_hazard();
158			tlb_write_indexed();
159		}
160		tlbw_use_hazard();
161		write_c0_entryhi(pid);
162	} else {
163		local_flush_tlb_all();
164	}
165	local_irq_restore(flags);
166}
167
168void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
169{
170	int cpu = smp_processor_id();
171
172	if (cpu_context(cpu, vma->vm_mm) != 0) {
173		unsigned long flags;
174		int oldpid, newpid, idx;
175
176		newpid = cpu_asid(cpu, vma->vm_mm);
177		page &= (PAGE_MASK << 1);
178		local_irq_save(flags);
179		oldpid = read_c0_entryhi();
180		write_c0_entryhi(page | newpid);
181		mtc0_tlbw_hazard();
182		tlb_probe();
183		BARRIER;
184		idx = read_c0_index();
185		write_c0_entrylo0(0);
186		write_c0_entrylo1(0);
187		if (idx < 0)
188			goto finish;
189		/* Make sure all entries differ. */
190		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
191		mtc0_tlbw_hazard();
192		tlb_write_indexed();
193		tlbw_use_hazard();
194
195	finish:
196		write_c0_entryhi(oldpid);
197		local_irq_restore(flags);
198	}
199}
200
201/*
202 * This one is only used for pages with the global bit set so we don't care
203 * much about the ASID.
204 */
205void local_flush_tlb_one(unsigned long page)
206{
207	unsigned long flags;
208	int oldpid, idx;
209
210	local_irq_save(flags);
211	oldpid = read_c0_entryhi();
212	page &= (PAGE_MASK << 1);
213	write_c0_entryhi(page);
214	mtc0_tlbw_hazard();
215	tlb_probe();
216	BARRIER;
217	idx = read_c0_index();
218	write_c0_entrylo0(0);
219	write_c0_entrylo1(0);
220	if (idx >= 0) {
221		/* Make sure all entries differ. */
222		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
223		mtc0_tlbw_hazard();
224		tlb_write_indexed();
225		tlbw_use_hazard();
226	}
227	write_c0_entryhi(oldpid);
228
229	local_irq_restore(flags);
230}
231
232/*
233 * We will need multiple versions of update_mmu_cache(), one that just
234 * updates the TLB with the new pte(s), and another which also checks
235 * for the R4k "end of page" hardware bug and does the needy.
236 */
237void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
238{
239	unsigned long flags;
240	pgd_t *pgdp;
241	pud_t *pudp;
242	pmd_t *pmdp;
243	pte_t *ptep;
244	int idx, pid;
245
246	/*
247	 * Handle debugger faulting in for debugee.
248	 */
249	if (current->active_mm != vma->vm_mm)
250		return;
251
252	local_irq_save(flags);
253
254	pid = read_c0_entryhi() & ASID_MASK;
255	address &= (PAGE_MASK << 1);
256	write_c0_entryhi(address | pid);
257	pgdp = pgd_offset(vma->vm_mm, address);
258	mtc0_tlbw_hazard();
259	tlb_probe();
260	BARRIER;
261	pudp = pud_offset(pgdp, address);
262	pmdp = pmd_offset(pudp, address);
263	idx = read_c0_index();
264	ptep = pte_offset_map(pmdp, address);
265
266#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
267	write_c0_entrylo0(ptep->pte_high);
268	ptep++;
269	write_c0_entrylo1(ptep->pte_high);
270#else
271	write_c0_entrylo0(pte_val(*ptep++) >> 6);
272	write_c0_entrylo1(pte_val(*ptep) >> 6);
273#endif
274	mtc0_tlbw_hazard();
275	if (idx < 0)
276		tlb_write_random();
277	else
278		tlb_write_indexed();
279	tlbw_use_hazard();
280	local_irq_restore(flags);
281}
282
283#if 0
284static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
285				       unsigned long address, pte_t pte)
286{
287	unsigned long flags;
288	unsigned int asid;
289	pgd_t *pgdp;
290	pmd_t *pmdp;
291	pte_t *ptep;
292	int idx;
293
294	local_irq_save(flags);
295	address &= (PAGE_MASK << 1);
296	asid = read_c0_entryhi() & ASID_MASK;
297	write_c0_entryhi(address | asid);
298	pgdp = pgd_offset(vma->vm_mm, address);
299	mtc0_tlbw_hazard();
300	tlb_probe();
301	BARRIER;
302	pmdp = pmd_offset(pgdp, address);
303	idx = read_c0_index();
304	ptep = pte_offset_map(pmdp, address);
305	write_c0_entrylo0(pte_val(*ptep++) >> 6);
306	write_c0_entrylo1(pte_val(*ptep) >> 6);
307	mtc0_tlbw_hazard();
308	if (idx < 0)
309		tlb_write_random();
310	else
311		tlb_write_indexed();
312	tlbw_use_hazard();
313	local_irq_restore(flags);
314}
315#endif
316
317void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
318	unsigned long entryhi, unsigned long pagemask)
319{
320	unsigned long flags;
321	unsigned long wired;
322	unsigned long old_pagemask;
323	unsigned long old_ctx;
324
325	local_irq_save(flags);
326	/* Save old context and create impossible VPN2 value */
327	old_ctx = read_c0_entryhi();
328	old_pagemask = read_c0_pagemask();
329	wired = read_c0_wired();
330	write_c0_wired(wired + 1);
331	write_c0_index(wired);
332	BARRIER;
333	write_c0_pagemask(pagemask);
334	write_c0_entryhi(entryhi);
335	write_c0_entrylo0(entrylo0);
336	write_c0_entrylo1(entrylo1);
337	mtc0_tlbw_hazard();
338	tlb_write_indexed();
339	tlbw_use_hazard();
340
341	write_c0_entryhi(old_ctx);
342	BARRIER;
343	write_c0_pagemask(old_pagemask);
344	local_flush_tlb_all();
345	local_irq_restore(flags);
346}
347
348/*
349 * Used for loading TLB entries before trap_init() has started, when we
350 * don't actually want to add a wired entry which remains throughout the
351 * lifetime of the system
352 */
353
354static int temp_tlb_entry __initdata;
355
356__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
357			       unsigned long entryhi, unsigned long pagemask)
358{
359	int ret = 0;
360	unsigned long flags;
361	unsigned long wired;
362	unsigned long old_pagemask;
363	unsigned long old_ctx;
364
365	local_irq_save(flags);
366	/* Save old context and create impossible VPN2 value */
367	old_ctx = read_c0_entryhi();
368	old_pagemask = read_c0_pagemask();
369	wired = read_c0_wired();
370	if (--temp_tlb_entry < wired) {
371		printk(KERN_WARNING
372		       "No TLB space left for add_temporary_entry\n");
373		ret = -ENOSPC;
374		goto out;
375	}
376
377	write_c0_index(temp_tlb_entry);
378	write_c0_pagemask(pagemask);
379	write_c0_entryhi(entryhi);
380	write_c0_entrylo0(entrylo0);
381	write_c0_entrylo1(entrylo1);
382	mtc0_tlbw_hazard();
383	tlb_write_indexed();
384	tlbw_use_hazard();
385
386	write_c0_entryhi(old_ctx);
387	write_c0_pagemask(old_pagemask);
388out:
389	local_irq_restore(flags);
390	return ret;
391}
392
393static void __init probe_tlb(unsigned long config)
394{
395	struct cpuinfo_mips *c = &current_cpu_data;
396	unsigned int reg;
397
398	/*
399	 * If this isn't a MIPS32 / MIPS64 compliant CPU.  Config 1 register
400	 * is not supported, we assume R4k style.  Cpu probing already figured
401	 * out the number of tlb entries.
402	 */
403	if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
404		return;
405
406	reg = read_c0_config1();
407	if (!((config >> 7) & 3))
408		panic("No TLB present");
409
410	c->tlbsize = ((reg >> 25) & 0x3f) + 1;
411}
412
413void __init tlb_init(void)
414{
415	unsigned int config = read_c0_config();
416
417	/*
418	 * You should never change this register:
419	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
420	 *     the value in the c0_pagemask register.
421	 *   - The entire mm handling assumes the c0_pagemask register to
422	 *     be set for 4kb pages.
423	 */
424	probe_tlb(config);
425	write_c0_pagemask(PM_DEFAULT_MASK);
426	write_c0_wired(0);
427	temp_tlb_entry = current_cpu_data.tlbsize - 1;
428	local_flush_tlb_all();
429
430	build_tlb_refill_handler();
431}
432