init.c revision 9282ed929758b82f448a40d3c17319d794970624
1/*
2 *  arch/s390/mm/init.c
3 *
4 *  S390 version
5 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 *    Author(s): Hartmut Penner (hp@de.ibm.com)
7 *
8 *  Derived from "arch/i386/mm/init.c"
9 *    Copyright (C) 1995  Linus Torvalds
10 */
11
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/ptrace.h>
19#include <linux/mman.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/pagemap.h>
25#include <linux/bootmem.h>
26#include <linux/pfn.h>
27
28#include <asm/processor.h>
29#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/pgalloc.h>
33#include <asm/dma.h>
34#include <asm/lowcore.h>
35#include <asm/tlb.h>
36#include <asm/tlbflush.h>
37#include <asm/sections.h>
38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40
41pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
42char  empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
43
44void diag10(unsigned long addr)
45{
46        if (addr >= 0x7ff00000)
47                return;
48#ifdef CONFIG_64BIT
49        asm volatile (
50		"   sam31\n"
51		"   diag %0,%0,0x10\n"
52		"0: sam64\n"
53		".section __ex_table,\"a\"\n"
54		"   .align 8\n"
55		"   .quad 0b, 0b\n"
56		".previous\n"
57		: : "a" (addr));
58#else
59        asm volatile (
60		"   diag %0,%0,0x10\n"
61		"0:\n"
62		".section __ex_table,\"a\"\n"
63		"   .align 4\n"
64		"   .long 0b, 0b\n"
65		".previous\n"
66		: : "a" (addr));
67#endif
68}
69
70void show_mem(void)
71{
72        int i, total = 0, reserved = 0;
73        int shared = 0, cached = 0;
74
75        printk("Mem-info:\n");
76        show_free_areas();
77        printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
78        i = max_mapnr;
79        while (i-- > 0) {
80                total++;
81                if (PageReserved(mem_map+i))
82                        reserved++;
83                else if (PageSwapCache(mem_map+i))
84                        cached++;
85                else if (page_count(mem_map+i))
86                        shared += page_count(mem_map+i) - 1;
87        }
88        printk("%d pages of RAM\n",total);
89        printk("%d reserved pages\n",reserved);
90        printk("%d pages shared\n",shared);
91        printk("%d pages swap cached\n",cached);
92}
93
94extern unsigned long __initdata zholes_size[];
95/*
96 * paging_init() sets up the page tables
97 */
98
99#ifndef CONFIG_64BIT
100void __init paging_init(void)
101{
102        pgd_t * pg_dir;
103        pte_t * pg_table;
104        pte_t   pte;
105	int     i;
106        unsigned long tmp;
107        unsigned long pfn = 0;
108        unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
109        static const int ssm_mask = 0x04000000L;
110	unsigned long ro_start_pfn, ro_end_pfn;
111	unsigned long zones_size[MAX_NR_ZONES];
112
113	ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
114	ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
115
116	memset(zones_size, 0, sizeof(zones_size));
117	zones_size[ZONE_DMA] = max_low_pfn;
118	free_area_init_node(0, &contig_page_data, zones_size,
119			    __pa(PAGE_OFFSET) >> PAGE_SHIFT,
120			    zholes_size);
121
122	/* unmap whole virtual address space */
123
124        pg_dir = swapper_pg_dir;
125
126	for (i = 0; i < PTRS_PER_PGD; i++)
127		pmd_clear((pmd_t *) pg_dir++);
128
129	/*
130	 * map whole physical memory to virtual memory (identity mapping)
131	 */
132
133        pg_dir = swapper_pg_dir;
134
135        while (pfn < max_low_pfn) {
136                /*
137                 * pg_table is physical at this point
138                 */
139		pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
140
141		pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
142                pg_dir++;
143
144                for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
145			if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
146				pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
147			else
148				pte = pfn_pte(pfn, PAGE_KERNEL);
149                        if (pfn >= max_low_pfn)
150				pte_val(pte) = _PAGE_TYPE_EMPTY;
151			set_pte(pg_table, pte);
152                        pfn++;
153                }
154        }
155
156	S390_lowcore.kernel_asce = pgdir_k;
157
158        /* enable virtual mapping in kernel mode */
159        __asm__ __volatile__("    LCTL  1,1,%0\n"
160                             "    LCTL  7,7,%0\n"
161                             "    LCTL  13,13,%0\n"
162                             "    SSM   %1"
163			     : : "m" (pgdir_k), "m" (ssm_mask));
164
165        local_flush_tlb();
166        return;
167}
168
169#else /* CONFIG_64BIT */
170
171void __init paging_init(void)
172{
173        pgd_t * pg_dir;
174	pmd_t * pm_dir;
175        pte_t * pt_dir;
176        pte_t   pte;
177	int     i,j,k;
178        unsigned long pfn = 0;
179        unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
180          _KERN_REGION_TABLE;
181	static const int ssm_mask = 0x04000000L;
182	unsigned long zones_size[MAX_NR_ZONES];
183	unsigned long dma_pfn, high_pfn;
184	unsigned long ro_start_pfn, ro_end_pfn;
185
186	memset(zones_size, 0, sizeof(zones_size));
187	dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
188	high_pfn = max_low_pfn;
189	ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
190	ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
191
192	if (dma_pfn > high_pfn)
193		zones_size[ZONE_DMA] = high_pfn;
194	else {
195		zones_size[ZONE_DMA] = dma_pfn;
196		zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
197	}
198
199	/* Initialize mem_map[].  */
200	free_area_init_node(0, &contig_page_data, zones_size,
201			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
202
203	/*
204	 * map whole physical memory to virtual memory (identity mapping)
205	 */
206
207        pg_dir = swapper_pg_dir;
208
209        for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
210
211                if (pfn >= max_low_pfn) {
212                        pgd_clear(pg_dir);
213                        continue;
214                }
215
216		pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4);
217                pgd_populate(&init_mm, pg_dir, pm_dir);
218
219                for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
220                        if (pfn >= max_low_pfn) {
221                                pmd_clear(pm_dir);
222                                continue;
223                        }
224
225			pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
226                        pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
227
228                        for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
229				if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
230					pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
231				else
232					pte = pfn_pte(pfn, PAGE_KERNEL);
233				if (pfn >= max_low_pfn)
234					pte_val(pte) = _PAGE_TYPE_EMPTY;
235                                set_pte(pt_dir, pte);
236                                pfn++;
237                        }
238                }
239        }
240
241	S390_lowcore.kernel_asce = pgdir_k;
242
243        /* enable virtual mapping in kernel mode */
244        __asm__ __volatile__("lctlg 1,1,%0\n\t"
245                             "lctlg 7,7,%0\n\t"
246                             "lctlg 13,13,%0\n\t"
247                             "ssm   %1"
248			     : :"m" (pgdir_k), "m" (ssm_mask));
249
250        local_flush_tlb();
251
252        return;
253}
254#endif /* CONFIG_64BIT */
255
256void __init mem_init(void)
257{
258	unsigned long codesize, reservedpages, datasize, initsize;
259
260        max_mapnr = num_physpages = max_low_pfn;
261        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
262
263        /* clear the zero-page */
264        memset(empty_zero_page, 0, PAGE_SIZE);
265
266	/* this will put all low memory onto the freelists */
267	totalram_pages += free_all_bootmem();
268
269	reservedpages = 0;
270
271	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
272	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
273	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
274        printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
275                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
276                max_mapnr << (PAGE_SHIFT-10),
277                codesize >> 10,
278                reservedpages << (PAGE_SHIFT-10),
279                datasize >>10,
280                initsize >> 10);
281	printk("Write protected kernel read-only data: %#lx - %#lx\n",
282	       (unsigned long)&__start_rodata,
283	       PFN_ALIGN((unsigned long)&__end_rodata) - 1);
284}
285
286void free_initmem(void)
287{
288        unsigned long addr;
289
290        addr = (unsigned long)(&__init_begin);
291        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
292		ClearPageReserved(virt_to_page(addr));
293		init_page_count(virt_to_page(addr));
294		free_page(addr);
295		totalram_pages++;
296        }
297        printk ("Freeing unused kernel memory: %ldk freed\n",
298		((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
299}
300
301#ifdef CONFIG_BLK_DEV_INITRD
302void free_initrd_mem(unsigned long start, unsigned long end)
303{
304        if (start < end)
305                printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
306        for (; start < end; start += PAGE_SIZE) {
307                ClearPageReserved(virt_to_page(start));
308                init_page_count(virt_to_page(start));
309                free_page(start);
310                totalram_pages++;
311        }
312}
313#endif
314