1/*
2 * Based upon linux/arch/m68k/mm/sun3mmu.c
3 * Based upon linux/arch/ppc/mm/mmu_context.c
4 *
5 * Implementations of mm routines specific to the Coldfire MMU.
6 *
7 * Copyright (c) 2008 Freescale Semiconductor, Inc.
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/bootmem.h>
16
17#include <asm/setup.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/mmu_context.h>
21#include <asm/mcf_pgalloc.h>
22#include <asm/tlbflush.h>
23
24#define KMAPAREA(x)	((x >= VMALLOC_START) && (x < KMAP_END))
25
26mm_context_t next_mmu_context;
27unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
28atomic_t nr_free_contexts;
29struct mm_struct *context_mm[LAST_CONTEXT+1];
30extern unsigned long num_pages;
31
32/*
33 * ColdFire paging_init derived from sun3.
34 */
35void __init paging_init(void)
36{
37	pgd_t *pg_dir;
38	pte_t *pg_table;
39	unsigned long address, size;
40	unsigned long next_pgtable, bootmem_end;
41	unsigned long zones_size[MAX_NR_ZONES];
42	enum zone_type zone;
43	int i;
44
45	empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
46	memset((void *) empty_zero_page, 0, PAGE_SIZE);
47
48	pg_dir = swapper_pg_dir;
49	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
50
51	size = num_pages * sizeof(pte_t);
52	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
53	next_pgtable = (unsigned long) alloc_bootmem_pages(size);
54
55	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
56	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
57
58	address = PAGE_OFFSET;
59	while (address < (unsigned long)high_memory) {
60		pg_table = (pte_t *) next_pgtable;
61		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
62		pgd_val(*pg_dir) = (unsigned long) pg_table;
63		pg_dir++;
64
65		/* now change pg_table to kernel virtual addresses */
66		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
67			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
68			if (address >= (unsigned long) high_memory)
69				pte_val(pte) = 0;
70
71			set_pte(pg_table, pte);
72			address += PAGE_SIZE;
73		}
74	}
75
76	current->mm = NULL;
77
78	for (zone = 0; zone < MAX_NR_ZONES; zone++)
79		zones_size[zone] = 0x0;
80	zones_size[ZONE_DMA] = num_pages;
81	free_area_init(zones_size);
82}
83
84int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
85{
86	unsigned long flags, mmuar, mmutr;
87	struct mm_struct *mm;
88	pgd_t *pgd;
89	pmd_t *pmd;
90	pte_t *pte;
91	int asid;
92
93	local_irq_save(flags);
94
95	mmuar = (dtlb) ? mmu_read(MMUAR) :
96		regs->pc + (extension_word * sizeof(long));
97
98	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
99	if (!mm) {
100		local_irq_restore(flags);
101		return -1;
102	}
103
104	pgd = pgd_offset(mm, mmuar);
105	if (pgd_none(*pgd))  {
106		local_irq_restore(flags);
107		return -1;
108	}
109
110	pmd = pmd_offset(pgd, mmuar);
111	if (pmd_none(*pmd)) {
112		local_irq_restore(flags);
113		return -1;
114	}
115
116	pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
117				: pte_offset_map(pmd, mmuar);
118	if (pte_none(*pte) || !pte_present(*pte)) {
119		local_irq_restore(flags);
120		return -1;
121	}
122
123	if (write) {
124		if (!pte_write(*pte)) {
125			local_irq_restore(flags);
126			return -1;
127		}
128		set_pte(pte, pte_mkdirty(*pte));
129	}
130
131	set_pte(pte, pte_mkyoung(*pte));
132	asid = mm->context & 0xff;
133	if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
134		set_pte(pte, pte_wrprotect(*pte));
135
136	mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
137	if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
138		mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
139	mmu_write(MMUTR, mmutr);
140
141	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
142		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
143
144	if (dtlb)
145		mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
146	else
147		mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
148
149	local_irq_restore(flags);
150	return 0;
151}
152
153/*
154 * Initialize the context management stuff.
155 * The following was taken from arch/ppc/mmu_context.c
156 */
157void __init mmu_context_init(void)
158{
159	/*
160	 * Some processors have too few contexts to reserve one for
161	 * init_mm, and require using context 0 for a normal task.
162	 * Other processors reserve the use of context zero for the kernel.
163	 * This code assumes FIRST_CONTEXT < 32.
164	 */
165	context_map[0] = (1 << FIRST_CONTEXT) - 1;
166	next_mmu_context = FIRST_CONTEXT;
167	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
168}
169
170/*
171 * Steal a context from a task that has one at the moment.
172 * This is only used on 8xx and 4xx and we presently assume that
173 * they don't do SMP.  If they do then thicfpgalloc.hs will have to check
174 * whether the MM we steal is in use.
175 * We also assume that this is only used on systems that don't
176 * use an MMU hash table - this is true for 8xx and 4xx.
177 * This isn't an LRU system, it just frees up each context in
178 * turn (sort-of pseudo-random replacement :).  This would be the
179 * place to implement an LRU scheme if anyone was motivated to do it.
180 *  -- paulus
181 */
182void steal_context(void)
183{
184	struct mm_struct *mm;
185	/*
186	 * free up context `next_mmu_context'
187	 * if we shouldn't free context 0, don't...
188	 */
189	if (next_mmu_context < FIRST_CONTEXT)
190		next_mmu_context = FIRST_CONTEXT;
191	mm = context_mm[next_mmu_context];
192	flush_tlb_mm(mm);
193	destroy_context(mm);
194}
195
196