1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
10 */
11#include <linux/bug.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/smp.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
28#include <linux/proc_fs.h>
29#include <linux/pfn.h>
30#include <linux/hardirq.h>
31#include <linux/gfp.h>
32
33#include <asm/asm-offsets.h>
34#include <asm/bootinfo.h>
35#include <asm/cachectl.h>
36#include <asm/cpu.h>
37#include <asm/dma.h>
38#include <asm/kmap_types.h>
39#include <asm/mmu_context.h>
40#include <asm/sections.h>
41#include <asm/pgtable.h>
42#include <asm/pgalloc.h>
43#include <asm/tlb.h>
44#include <asm/fixmap.h>
45
46/* Atomicity and interruptability */
47#ifdef CONFIG_MIPS_MT_SMTC
48
49#include <asm/mipsmtregs.h>
50
51#define ENTER_CRITICAL(flags) \
52	{ \
53	unsigned int mvpflags; \
54	local_irq_save(flags);\
55	mvpflags = dvpe()
56#define EXIT_CRITICAL(flags) \
57	evpe(mvpflags); \
58	local_irq_restore(flags); \
59	}
60#else
61
62#define ENTER_CRITICAL(flags) local_irq_save(flags)
63#define EXIT_CRITICAL(flags) local_irq_restore(flags)
64
65#endif /* CONFIG_MIPS_MT_SMTC */
66
67/*
68 * We have up to 8 empty zeroed pages so we can map one of the right colour
69 * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
70 * where we have to avoid VCED / VECI exceptions for good performance at
71 * any price.  Since page is never written to after the initialization we
72 * don't have to care about aliases on other CPUs.
73 */
74unsigned long empty_zero_page, zero_page_mask;
75EXPORT_SYMBOL_GPL(empty_zero_page);
76
77/*
78 * Not static inline because used by IP27 special magic initialization code
79 */
80unsigned long setup_zero_pages(void)
81{
82	unsigned int order;
83	unsigned long size;
84	struct page *page;
85
86	if (cpu_has_vce)
87		order = 3;
88	else
89		order = 0;
90
91	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
92	if (!empty_zero_page)
93		panic("Oh boy, that early out of memory?");
94
95	page = virt_to_page((void *)empty_zero_page);
96	split_page(page, order);
97	while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
98		SetPageReserved(page);
99		page++;
100	}
101
102	size = PAGE_SIZE << order;
103	zero_page_mask = (size - 1) & PAGE_MASK;
104
105	return 1UL << order;
106}
107
108#ifdef CONFIG_MIPS_MT_SMTC
109static pte_t *kmap_coherent_pte;
110static void __init kmap_coherent_init(void)
111{
112	unsigned long vaddr;
113
114	/* cache the first coherent kmap pte */
115	vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
116	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
117}
118#else
119static inline void kmap_coherent_init(void) {}
120#endif
121
122void *kmap_coherent(struct page *page, unsigned long addr)
123{
124	enum fixed_addresses idx;
125	unsigned long vaddr, flags, entrylo;
126	unsigned long old_ctx;
127	pte_t pte;
128	int tlbidx;
129
130	BUG_ON(Page_dcache_dirty(page));
131
132	inc_preempt_count();
133	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
134#ifdef CONFIG_MIPS_MT_SMTC
135	idx += FIX_N_COLOURS * smp_processor_id() +
136		(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
137#else
138	idx += in_interrupt() ? FIX_N_COLOURS : 0;
139#endif
140	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
141	pte = mk_pte(page, PAGE_KERNEL);
142#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
143	entrylo = pte.pte_high;
144#else
145	entrylo = pte_to_entrylo(pte_val(pte));
146#endif
147
148	ENTER_CRITICAL(flags);
149	old_ctx = read_c0_entryhi();
150	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
151	write_c0_entrylo0(entrylo);
152	write_c0_entrylo1(entrylo);
153#ifdef CONFIG_MIPS_MT_SMTC
154	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
155	/* preload TLB instead of local_flush_tlb_one() */
156	mtc0_tlbw_hazard();
157	tlb_probe();
158	tlb_probe_hazard();
159	tlbidx = read_c0_index();
160	mtc0_tlbw_hazard();
161	if (tlbidx < 0)
162		tlb_write_random();
163	else
164		tlb_write_indexed();
165#else
166	tlbidx = read_c0_wired();
167	write_c0_wired(tlbidx + 1);
168	write_c0_index(tlbidx);
169	mtc0_tlbw_hazard();
170	tlb_write_indexed();
171#endif
172	tlbw_use_hazard();
173	write_c0_entryhi(old_ctx);
174	EXIT_CRITICAL(flags);
175
176	return (void*) vaddr;
177}
178
179#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
180
181void kunmap_coherent(void)
182{
183#ifndef CONFIG_MIPS_MT_SMTC
184	unsigned int wired;
185	unsigned long flags, old_ctx;
186
187	ENTER_CRITICAL(flags);
188	old_ctx = read_c0_entryhi();
189	wired = read_c0_wired() - 1;
190	write_c0_wired(wired);
191	write_c0_index(wired);
192	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
193	write_c0_entrylo0(0);
194	write_c0_entrylo1(0);
195	mtc0_tlbw_hazard();
196	tlb_write_indexed();
197	tlbw_use_hazard();
198	write_c0_entryhi(old_ctx);
199	EXIT_CRITICAL(flags);
200#endif
201	dec_preempt_count();
202	preempt_check_resched();
203}
204
205void copy_user_highpage(struct page *to, struct page *from,
206	unsigned long vaddr, struct vm_area_struct *vma)
207{
208	void *vfrom, *vto;
209
210	vto = kmap_atomic(to);
211	if (cpu_has_dc_aliases &&
212	    page_mapped(from) && !Page_dcache_dirty(from)) {
213		vfrom = kmap_coherent(from, vaddr);
214		copy_page(vto, vfrom);
215		kunmap_coherent();
216	} else {
217		vfrom = kmap_atomic(from);
218		copy_page(vto, vfrom);
219		kunmap_atomic(vfrom);
220	}
221	if ((!cpu_has_ic_fills_f_dc) ||
222	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
223		flush_data_cache_page((unsigned long)vto);
224	kunmap_atomic(vto);
225	/* Make sure this page is cleared on other CPU's too before using it */
226	smp_wmb();
227}
228
229void copy_to_user_page(struct vm_area_struct *vma,
230	struct page *page, unsigned long vaddr, void *dst, const void *src,
231	unsigned long len)
232{
233	if (cpu_has_dc_aliases &&
234	    page_mapped(page) && !Page_dcache_dirty(page)) {
235		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
236		memcpy(vto, src, len);
237		kunmap_coherent();
238	} else {
239		memcpy(dst, src, len);
240		if (cpu_has_dc_aliases)
241			SetPageDcacheDirty(page);
242	}
243	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
244		flush_cache_page(vma, vaddr, page_to_pfn(page));
245}
246
247void copy_from_user_page(struct vm_area_struct *vma,
248	struct page *page, unsigned long vaddr, void *dst, const void *src,
249	unsigned long len)
250{
251	if (cpu_has_dc_aliases &&
252	    page_mapped(page) && !Page_dcache_dirty(page)) {
253		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
254		memcpy(dst, vfrom, len);
255		kunmap_coherent();
256	} else {
257		memcpy(dst, src, len);
258		if (cpu_has_dc_aliases)
259			SetPageDcacheDirty(page);
260	}
261}
262
263void __init fixrange_init(unsigned long start, unsigned long end,
264	pgd_t *pgd_base)
265{
266#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
267	pgd_t *pgd;
268	pud_t *pud;
269	pmd_t *pmd;
270	pte_t *pte;
271	int i, j, k;
272	unsigned long vaddr;
273
274	vaddr = start;
275	i = __pgd_offset(vaddr);
276	j = __pud_offset(vaddr);
277	k = __pmd_offset(vaddr);
278	pgd = pgd_base + i;
279
280	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
281		pud = (pud_t *)pgd;
282		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
283			pmd = (pmd_t *)pud;
284			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
285				if (pmd_none(*pmd)) {
286					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
287					set_pmd(pmd, __pmd((unsigned long)pte));
288					BUG_ON(pte != pte_offset_kernel(pmd, 0));
289				}
290				vaddr += PMD_SIZE;
291			}
292			k = 0;
293		}
294		j = 0;
295	}
296#endif
297}
298
299#ifndef CONFIG_NEED_MULTIPLE_NODES
300int page_is_ram(unsigned long pagenr)
301{
302	int i;
303
304	for (i = 0; i < boot_mem_map.nr_map; i++) {
305		unsigned long addr, end;
306
307		switch (boot_mem_map.map[i].type) {
308		case BOOT_MEM_RAM:
309		case BOOT_MEM_INIT_RAM:
310			break;
311		default:
312			/* not usable memory */
313			continue;
314		}
315
316		addr = PFN_UP(boot_mem_map.map[i].addr);
317		end = PFN_DOWN(boot_mem_map.map[i].addr +
318			       boot_mem_map.map[i].size);
319
320		if (pagenr >= addr && pagenr < end)
321			return 1;
322	}
323
324	return 0;
325}
326
327void __init paging_init(void)
328{
329	unsigned long max_zone_pfns[MAX_NR_ZONES];
330	unsigned long lastpfn __maybe_unused;
331
332	pagetable_init();
333
334#ifdef CONFIG_HIGHMEM
335	kmap_init();
336#endif
337	kmap_coherent_init();
338
339#ifdef CONFIG_ZONE_DMA
340	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
341#endif
342#ifdef CONFIG_ZONE_DMA32
343	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
344#endif
345	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
346	lastpfn = max_low_pfn;
347#ifdef CONFIG_HIGHMEM
348	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
349	lastpfn = highend_pfn;
350
351	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
352		printk(KERN_WARNING "This processor doesn't support highmem."
353		       " %ldk highmem ignored\n",
354		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
355		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
356		lastpfn = max_low_pfn;
357	}
358#endif
359
360	free_area_init_nodes(max_zone_pfns);
361}
362
363#ifdef CONFIG_64BIT
364static struct kcore_list kcore_kseg0;
365#endif
366
367void __init mem_init(void)
368{
369	unsigned long codesize, reservedpages, datasize, initsize;
370	unsigned long tmp, ram;
371
372#ifdef CONFIG_HIGHMEM
373#ifdef CONFIG_DISCONTIGMEM
374#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
375#endif
376	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
377#else
378	max_mapnr = max_low_pfn;
379#endif
380	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
381
382	totalram_pages += free_all_bootmem();
383	totalram_pages -= setup_zero_pages();	/* Setup zeroed pages.  */
384
385	reservedpages = ram = 0;
386	for (tmp = 0; tmp < max_low_pfn; tmp++)
387		if (page_is_ram(tmp) && pfn_valid(tmp)) {
388			ram++;
389			if (PageReserved(pfn_to_page(tmp)))
390				reservedpages++;
391		}
392	num_physpages = ram;
393
394#ifdef CONFIG_HIGHMEM
395	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
396		struct page *page = pfn_to_page(tmp);
397
398		if (!page_is_ram(tmp)) {
399			SetPageReserved(page);
400			continue;
401		}
402		ClearPageReserved(page);
403		init_page_count(page);
404		__free_page(page);
405		totalhigh_pages++;
406	}
407	totalram_pages += totalhigh_pages;
408	num_physpages += totalhigh_pages;
409#endif
410
411	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
412	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
413	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
414
415#ifdef CONFIG_64BIT
416	if ((unsigned long) &_text > (unsigned long) CKSEG0)
417		/* The -4 is a hack so that user tools don't have to handle
418		   the overflow.  */
419		kclist_add(&kcore_kseg0, (void *) CKSEG0,
420				0x80000000 - 4, KCORE_TEXT);
421#endif
422
423	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
424	       "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
425	       nr_free_pages() << (PAGE_SHIFT-10),
426	       ram << (PAGE_SHIFT-10),
427	       codesize >> 10,
428	       reservedpages << (PAGE_SHIFT-10),
429	       datasize >> 10,
430	       initsize >> 10,
431	       totalhigh_pages << (PAGE_SHIFT-10));
432}
433#endif /* !CONFIG_NEED_MULTIPLE_NODES */
434
435void free_init_pages(const char *what, unsigned long begin, unsigned long end)
436{
437	unsigned long pfn;
438
439	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
440		struct page *page = pfn_to_page(pfn);
441		void *addr = phys_to_virt(PFN_PHYS(pfn));
442
443		ClearPageReserved(page);
444		init_page_count(page);
445		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
446		__free_page(page);
447		totalram_pages++;
448	}
449	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
450}
451
452#ifdef CONFIG_BLK_DEV_INITRD
453void free_initrd_mem(unsigned long start, unsigned long end)
454{
455	free_init_pages("initrd memory",
456			virt_to_phys((void *)start),
457			virt_to_phys((void *)end));
458}
459#endif
460
461void __init_refok free_initmem(void)
462{
463	prom_free_prom_memory();
464	free_init_pages("unused kernel memory",
465			__pa_symbol(&__init_begin),
466			__pa_symbol(&__init_end));
467}
468
469#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
470unsigned long pgd_current[NR_CPUS];
471#endif
472/*
473 * On 64-bit we've got three-level pagetables with a slightly
474 * different layout ...
475 */
476#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
477
478/*
479 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
480 * are constants.  So we use the variants from asm-offset.h until that gcc
481 * will officially be retired.
482 */
483pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
484#ifndef __PAGETABLE_PMD_FOLDED
485pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
486#endif
487pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
488