1/*
2 *  linux/arch/cris/arch-v10/mm/tlb.c
3 *
4 *  Low level TLB handling
5 *
6 *
7 *  Copyright (C) 2000-2007  Axis Communications AB
8 *
9 *  Authors:   Bjorn Wesen (bjornw@axis.com)
10 *
11 */
12
13#include <asm/tlb.h>
14#include <asm/mmu_context.h>
15#include <arch/svinto.h>
16
17#define D(x)
18
19/* The TLB can host up to 64 different mm contexts at the same time.
20 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
21 * page_id that has to match to give a hit. In page_id_map, we keep track
22 * of which mm's we have assigned which page_id's, so that we know when
23 * to invalidate TLB entries.
24 *
25 * The last page_id is never running - it is used as an invalid page_id
26 * so we can make TLB entries that will never match.
27 *
28 * Notice that we need to make the flushes atomic, otherwise an interrupt
29 * handler that uses vmalloced memory might cause a TLB load in the middle
30 * of a flush causing.
31 */
32
33/* invalidate all TLB entries */
34
35void
36flush_tlb_all(void)
37{
38	int i;
39	unsigned long flags;
40
41	/* the vpn of i & 0xf is so we dont write similar TLB entries
42	 * in the same 4-way entry group. details...
43	 */
44
45	local_irq_save(flags);
46	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
47		*R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
48		*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
49			      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
50
51		*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no       ) |
52			      IO_STATE(R_TLB_LO, valid, no       ) |
53			      IO_STATE(R_TLB_LO, kernel,no	 ) |
54			      IO_STATE(R_TLB_LO, we,    no       ) |
55			      IO_FIELD(R_TLB_LO, pfn,   0        ) );
56	}
57	local_irq_restore(flags);
58	D(printk("tlb: flushed all\n"));
59}
60
61/* invalidate the selected mm context only */
62
63void
64flush_tlb_mm(struct mm_struct *mm)
65{
66	int i;
67	int page_id = mm->context.page_id;
68	unsigned long flags;
69
70	D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
71
72	if(page_id == NO_CONTEXT)
73		return;
74
75	/* mark the TLB entries that match the page_id as invalid.
76	 * here we could also check the _PAGE_GLOBAL bit and NOT flush
77	 * global pages. is it worth the extra I/O ?
78	 */
79
80	local_irq_save(flags);
81	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
82		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
83		if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
84			*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
85				      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
86
87			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
88				      IO_STATE(R_TLB_LO, valid, no  ) |
89				      IO_STATE(R_TLB_LO, kernel,no  ) |
90				      IO_STATE(R_TLB_LO, we,    no  ) |
91				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
92		}
93	}
94	local_irq_restore(flags);
95}
96
97/* invalidate a single page */
98
99void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
100{
101	struct mm_struct *mm = vma->vm_mm;
102	int page_id = mm->context.page_id;
103	int i;
104	unsigned long flags;
105
106	D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
107
108	if(page_id == NO_CONTEXT)
109		return;
110
111	addr &= PAGE_MASK; /* perhaps not necessary */
112
113	/* invalidate those TLB entries that match both the mm context
114	 * and the virtual address requested
115	 */
116
117	local_irq_save(flags);
118	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
119		unsigned long tlb_hi;
120		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
121		tlb_hi = *R_TLB_HI;
122		if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
123		    (tlb_hi & PAGE_MASK) == addr) {
124			*R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
125				addr; /* same addr as before works. */
126
127			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
128				      IO_STATE(R_TLB_LO, valid, no  ) |
129				      IO_STATE(R_TLB_LO, kernel,no  ) |
130				      IO_STATE(R_TLB_LO, we,    no  ) |
131				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
132		}
133	}
134	local_irq_restore(flags);
135}
136
137/*
138 * Initialize the context related info for a new mm_struct
139 * instance.
140 */
141
142int
143init_new_context(struct task_struct *tsk, struct mm_struct *mm)
144{
145	mm->context.page_id = NO_CONTEXT;
146	return 0;
147}
148
149/* called in schedule() just before actually doing the switch_to */
150
151void switch_mm(struct mm_struct *prev, struct mm_struct *next,
152	struct task_struct *tsk)
153{
154	if (prev != next) {
155		/* make sure we have a context */
156		get_mmu_context(next);
157
158		/* remember the pgd for the fault handlers
159		 * this is similar to the pgd register in some other CPU's.
160		 * we need our own copy of it because current and active_mm
161		 * might be invalid at points where we still need to derefer
162		 * the pgd.
163		 */
164
165		per_cpu(current_pgd, smp_processor_id()) = next->pgd;
166
167		/* switch context in the MMU */
168
169		D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n",
170			next->context, next));
171
172		*R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT,
173					  page_id, next->context.page_id);
174	}
175}
176
177