tlb.c revision 3ec704e6660aa58505110a50102e57cdb9daa044
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
7#include "asm/page.h"
8#include "asm/pgalloc.h"
9#include "asm/pgtable.h"
10#include "asm/tlbflush.h"
11#include "choose-mode.h"
12#include "mode_kern.h"
13#include "as-layout.h"
14#include "tlb.h"
15#include "mem.h"
16#include "mem_user.h"
17#include "os.h"
18
19static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
20		    int r, int w, int x, struct host_vm_op *ops, int *index,
21		    int last_filled, union mm_context *mmu, void **flush,
22		    int (*do_ops)(union mm_context *, struct host_vm_op *,
23				  int, int, void **))
24{
25	__u64 offset;
26	struct host_vm_op *last;
27	int fd, ret = 0;
28
29	fd = phys_mapping(phys, &offset);
30	if(*index != -1){
31		last = &ops[*index];
32		if((last->type == MMAP) &&
33		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
34		   (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
35		   (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
36		   (last->u.mmap.offset + last->u.mmap.len == offset)){
37			last->u.mmap.len += len;
38			return 0;
39		}
40	}
41
42	if(*index == last_filled){
43		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
44		*index = -1;
45	}
46
47	ops[++*index] = ((struct host_vm_op) { .type	= MMAP,
48			     			.u = { .mmap = {
49						       .addr	= virt,
50						       .len	= len,
51						       .r	= r,
52						       .w	= w,
53						       .x	= x,
54						       .fd	= fd,
55						       .offset	= offset }
56			   } });
57	return ret;
58}
59
60static int add_munmap(unsigned long addr, unsigned long len,
61		      struct host_vm_op *ops, int *index, int last_filled,
62		      union mm_context *mmu, void **flush,
63		      int (*do_ops)(union mm_context *, struct host_vm_op *,
64				    int, int, void **))
65{
66	struct host_vm_op *last;
67	int ret = 0;
68
69	if(*index != -1){
70		last = &ops[*index];
71		if((last->type == MUNMAP) &&
72		   (last->u.munmap.addr + last->u.mmap.len == addr)){
73			last->u.munmap.len += len;
74			return 0;
75		}
76	}
77
78	if(*index == last_filled){
79		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
80		*index = -1;
81	}
82
83	ops[++*index] = ((struct host_vm_op) { .type	= MUNMAP,
84			     		       .u = { .munmap = {
85						        .addr	= addr,
86							.len	= len } } });
87	return ret;
88}
89
90static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
91			int x, struct host_vm_op *ops, int *index,
92			int last_filled, union mm_context *mmu, void **flush,
93			int (*do_ops)(union mm_context *, struct host_vm_op *,
94				      int, int, void **))
95{
96	struct host_vm_op *last;
97	int ret = 0;
98
99	if(*index != -1){
100		last = &ops[*index];
101		if((last->type == MPROTECT) &&
102		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
103		   (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
104		   (last->u.mprotect.x == x)){
105			last->u.mprotect.len += len;
106			return 0;
107		}
108	}
109
110	if(*index == last_filled){
111		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
112		*index = -1;
113	}
114
115	ops[++*index] = ((struct host_vm_op) { .type	= MPROTECT,
116			     		       .u = { .mprotect = {
117						       .addr	= addr,
118						       .len	= len,
119						       .r	= r,
120						       .w	= w,
121						       .x	= x } } });
122	return ret;
123}
124
125#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
126
127static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
128				   unsigned long end, struct host_vm_op *ops,
129				   int last_op, int *op_index, int force,
130				   union mm_context *mmu, void **flush,
131				   int (*do_ops)(union mm_context *,
132						 struct host_vm_op *, int, int,
133						 void **))
134{
135	pte_t *pte;
136	int r, w, x, ret = 0;
137
138	pte = pte_offset_kernel(pmd, addr);
139	do {
140		r = pte_read(*pte);
141		w = pte_write(*pte);
142		x = pte_exec(*pte);
143		if (!pte_young(*pte)) {
144			r = 0;
145			w = 0;
146		} else if (!pte_dirty(*pte)) {
147			w = 0;
148		}
149		if(force || pte_newpage(*pte)){
150			if(pte_present(*pte))
151				ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
152					       PAGE_SIZE, r, w, x, ops,
153					       op_index, last_op, mmu, flush,
154					       do_ops);
155			else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
156					      last_op, mmu, flush, do_ops);
157		}
158		else if(pte_newprot(*pte))
159			ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
160					   op_index, last_op, mmu, flush,
161					   do_ops);
162		*pte = pte_mkuptodate(*pte);
163	} while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
164	return ret;
165}
166
167static inline int update_pmd_range(pud_t *pud, unsigned long addr,
168				   unsigned long end, struct host_vm_op *ops,
169				   int last_op, int *op_index, int force,
170				   union mm_context *mmu, void **flush,
171				   int (*do_ops)(union mm_context *,
172						 struct host_vm_op *, int, int,
173						 void **))
174{
175	pmd_t *pmd;
176	unsigned long next;
177	int ret = 0;
178
179	pmd = pmd_offset(pud, addr);
180	do {
181		next = pmd_addr_end(addr, end);
182		if(!pmd_present(*pmd)){
183			if(force || pmd_newpage(*pmd)){
184				ret = add_munmap(addr, next - addr, ops,
185						 op_index, last_op, mmu,
186						 flush, do_ops);
187				pmd_mkuptodate(*pmd);
188			}
189		}
190		else ret = update_pte_range(pmd, addr, next, ops, last_op,
191					    op_index, force, mmu, flush,
192					    do_ops);
193	} while (pmd++, addr = next, ((addr != end) && !ret));
194	return ret;
195}
196
197static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
198				   unsigned long end, struct host_vm_op *ops,
199				   int last_op, int *op_index, int force,
200				   union mm_context *mmu, void **flush,
201				   int (*do_ops)(union mm_context *,
202						 struct host_vm_op *, int, int,
203						 void **))
204{
205	pud_t *pud;
206	unsigned long next;
207	int ret = 0;
208
209	pud = pud_offset(pgd, addr);
210	do {
211		next = pud_addr_end(addr, end);
212		if(!pud_present(*pud)){
213			if(force || pud_newpage(*pud)){
214				ret = add_munmap(addr, next - addr, ops,
215						 op_index, last_op, mmu,
216						 flush, do_ops);
217				pud_mkuptodate(*pud);
218			}
219		}
220		else ret = update_pmd_range(pud, addr, next, ops, last_op,
221					    op_index, force, mmu, flush,
222					    do_ops);
223	} while (pud++, addr = next, ((addr != end) && !ret));
224	return ret;
225}
226
227void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
228		      unsigned long end_addr, int force,
229		      int (*do_ops)(union mm_context *, struct host_vm_op *,
230				    int, int, void **))
231{
232	pgd_t *pgd;
233	union mm_context *mmu = &mm->context;
234	struct host_vm_op ops[1];
235	unsigned long addr = start_addr, next;
236	int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
237	void *flush = NULL;
238
239	ops[0].type = NONE;
240	pgd = pgd_offset(mm, addr);
241	do {
242		next = pgd_addr_end(addr, end_addr);
243		if(!pgd_present(*pgd)){
244			if (force || pgd_newpage(*pgd)){
245				ret = add_munmap(addr, next - addr, ops,
246						 &op_index, last_op, mmu,
247						 &flush, do_ops);
248				pgd_mkuptodate(*pgd);
249			}
250		}
251		else ret = update_pud_range(pgd, addr, next, ops, last_op,
252					    &op_index, force, mmu, &flush,
253					    do_ops);
254	} while (pgd++, addr = next, ((addr != end_addr) && !ret));
255
256	if(!ret)
257		ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
258
259	/* This is not an else because ret is modified above */
260	if(ret) {
261		printk("fix_range_common: failed, killing current process\n");
262		force_sig(SIGKILL, current);
263	}
264}
265
266int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
267{
268	struct mm_struct *mm;
269	pgd_t *pgd;
270	pud_t *pud;
271	pmd_t *pmd;
272	pte_t *pte;
273	unsigned long addr, last;
274	int updated = 0, err;
275
276	mm = &init_mm;
277	for(addr = start; addr < end;){
278		pgd = pgd_offset(mm, addr);
279		if(!pgd_present(*pgd)){
280			last = ADD_ROUND(addr, PGDIR_SIZE);
281			if(last > end)
282				last = end;
283			if(pgd_newpage(*pgd)){
284				updated = 1;
285				err = os_unmap_memory((void *) addr,
286						      last - addr);
287				if(err < 0)
288					panic("munmap failed, errno = %d\n",
289					      -err);
290			}
291			addr = last;
292			continue;
293		}
294
295		pud = pud_offset(pgd, addr);
296		if(!pud_present(*pud)){
297			last = ADD_ROUND(addr, PUD_SIZE);
298			if(last > end)
299				last = end;
300			if(pud_newpage(*pud)){
301				updated = 1;
302				err = os_unmap_memory((void *) addr,
303						      last - addr);
304				if(err < 0)
305					panic("munmap failed, errno = %d\n",
306					      -err);
307			}
308			addr = last;
309			continue;
310		}
311
312		pmd = pmd_offset(pud, addr);
313		if(!pmd_present(*pmd)){
314			last = ADD_ROUND(addr, PMD_SIZE);
315			if(last > end)
316				last = end;
317			if(pmd_newpage(*pmd)){
318				updated = 1;
319				err = os_unmap_memory((void *) addr,
320						      last - addr);
321				if(err < 0)
322					panic("munmap failed, errno = %d\n",
323					      -err);
324			}
325			addr = last;
326			continue;
327		}
328
329		pte = pte_offset_kernel(pmd, addr);
330		if(!pte_present(*pte) || pte_newpage(*pte)){
331			updated = 1;
332			err = os_unmap_memory((void *) addr,
333					      PAGE_SIZE);
334			if(err < 0)
335				panic("munmap failed, errno = %d\n",
336				      -err);
337			if(pte_present(*pte))
338				map_memory(addr,
339					   pte_val(*pte) & PAGE_MASK,
340					   PAGE_SIZE, 1, 1, 1);
341		}
342		else if(pte_newprot(*pte)){
343			updated = 1;
344			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
345		}
346		addr += PAGE_SIZE;
347	}
348	return(updated);
349}
350
351pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
352{
353	return(pgd_offset(mm, address));
354}
355
356pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
357{
358	return(pud_offset(pgd, address));
359}
360
361pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
362{
363	return(pmd_offset(pud, address));
364}
365
366pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
367{
368	return(pte_offset_kernel(pmd, address));
369}
370
371pte_t *addr_pte(struct task_struct *task, unsigned long addr)
372{
373	pgd_t *pgd = pgd_offset(task->mm, addr);
374	pud_t *pud = pud_offset(pgd, addr);
375	pmd_t *pmd = pmd_offset(pud, addr);
376
377	return(pte_offset_map(pmd, addr));
378}
379
380void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
381{
382	address &= PAGE_MASK;
383
384	CHOOSE_MODE(flush_tlb_range(vma, address, address + PAGE_SIZE),
385		    flush_tlb_page_skas(vma, address));
386}
387
388void flush_tlb_all(void)
389{
390	flush_tlb_mm(current->mm);
391}
392
393void flush_tlb_kernel_range(unsigned long start, unsigned long end)
394{
395	CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
396			 flush_tlb_kernel_range_common, start, end);
397}
398
399void flush_tlb_kernel_vm(void)
400{
401	CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
402		    flush_tlb_kernel_range_common(start_vm, end_vm));
403}
404
405void __flush_tlb_one(unsigned long addr)
406{
407	CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
408}
409
410void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
411		     unsigned long end)
412{
413	CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
414			 end);
415}
416
417void flush_tlb_mm(struct mm_struct *mm)
418{
419	CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
420}
421
422void force_flush_all(void)
423{
424	CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
425}
426
427