pgtable_32.c revision 8fe3deef013bebdbed1f75ae59ef9707fb6e5cc7
1/*
2 *  linux/arch/i386/mm/pgtable.c
3 */
4
5#include <linux/sched.h>
6#include <linux/kernel.h>
7#include <linux/errno.h>
8#include <linux/mm.h>
9#include <linux/nmi.h>
10#include <linux/swap.h>
11#include <linux/smp.h>
12#include <linux/highmem.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
18
19#include <asm/system.h>
20#include <asm/pgtable.h>
21#include <asm/pgalloc.h>
22#include <asm/fixmap.h>
23#include <asm/e820.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
26
27void show_mem(void)
28{
29	int total = 0, reserved = 0;
30	int shared = 0, cached = 0;
31	int highmem = 0;
32	struct page *page;
33	pg_data_t *pgdat;
34	unsigned long i;
35	unsigned long flags;
36
37	printk(KERN_INFO "Mem-info:\n");
38	show_free_areas();
39	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
40	for_each_online_pgdat(pgdat) {
41		pgdat_resize_lock(pgdat, &flags);
42		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
43			if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
44				touch_nmi_watchdog();
45			page = pgdat_page_nr(pgdat, i);
46			total++;
47			if (PageHighMem(page))
48				highmem++;
49			if (PageReserved(page))
50				reserved++;
51			else if (PageSwapCache(page))
52				cached++;
53			else if (page_count(page))
54				shared += page_count(page) - 1;
55		}
56		pgdat_resize_unlock(pgdat, &flags);
57	}
58	printk(KERN_INFO "%d pages of RAM\n", total);
59	printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
60	printk(KERN_INFO "%d reserved pages\n", reserved);
61	printk(KERN_INFO "%d pages shared\n", shared);
62	printk(KERN_INFO "%d pages swap cached\n", cached);
63
64	printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
65	printk(KERN_INFO "%lu pages writeback\n",
66					global_page_state(NR_WRITEBACK));
67	printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
68	printk(KERN_INFO "%lu pages slab\n",
69		global_page_state(NR_SLAB_RECLAIMABLE) +
70		global_page_state(NR_SLAB_UNRECLAIMABLE));
71	printk(KERN_INFO "%lu pages pagetables\n",
72					global_page_state(NR_PAGETABLE));
73}
74
75/*
76 * Associate a virtual page frame with a given physical page frame
77 * and protection flags for that frame.
78 */
79static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
80{
81	pgd_t *pgd;
82	pud_t *pud;
83	pmd_t *pmd;
84	pte_t *pte;
85
86	pgd = swapper_pg_dir + pgd_index(vaddr);
87	if (pgd_none(*pgd)) {
88		BUG();
89		return;
90	}
91	pud = pud_offset(pgd, vaddr);
92	if (pud_none(*pud)) {
93		BUG();
94		return;
95	}
96	pmd = pmd_offset(pud, vaddr);
97	if (pmd_none(*pmd)) {
98		BUG();
99		return;
100	}
101	pte = pte_offset_kernel(pmd, vaddr);
102	if (pgprot_val(flags))
103		set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
104	else
105		pte_clear(&init_mm, vaddr, pte);
106
107	/*
108	 * It's enough to flush this one mapping.
109	 * (PGE mappings get flushed as well)
110	 */
111	__flush_tlb_one(vaddr);
112}
113
114/*
115 * Associate a large virtual page frame with a given physical page frame
116 * and protection flags for that frame. pfn is for the base of the page,
117 * vaddr is what the page gets mapped to - both must be properly aligned.
118 * The pmd must already be instantiated. Assumes PAE mode.
119 */
120void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
121{
122	pgd_t *pgd;
123	pud_t *pud;
124	pmd_t *pmd;
125
126	if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */
127		printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
128		return; /* BUG(); */
129	}
130	if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */
131		printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
132		return; /* BUG(); */
133	}
134	pgd = swapper_pg_dir + pgd_index(vaddr);
135	if (pgd_none(*pgd)) {
136		printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
137		return; /* BUG(); */
138	}
139	pud = pud_offset(pgd, vaddr);
140	pmd = pmd_offset(pud, vaddr);
141	set_pmd(pmd, pfn_pmd(pfn, flags));
142	/*
143	 * It's enough to flush this one mapping.
144	 * (PGE mappings get flushed as well)
145	 */
146	__flush_tlb_one(vaddr);
147}
148
149static int fixmaps;
150unsigned long __FIXADDR_TOP = 0xfffff000;
151EXPORT_SYMBOL(__FIXADDR_TOP);
152
153void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
154{
155	unsigned long address = __fix_to_virt(idx);
156
157	if (idx >= __end_of_fixed_addresses) {
158		BUG();
159		return;
160	}
161	set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
162	fixmaps++;
163}
164
165/**
166 * reserve_top_address - reserves a hole in the top of kernel address space
167 * @reserve - size of hole to reserve
168 *
169 * Can be used to relocate the fixmap area and poke a hole in the top
170 * of kernel address space to make room for a hypervisor.
171 */
172void reserve_top_address(unsigned long reserve)
173{
174	BUG_ON(fixmaps > 0);
175	printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
176	       (int)-reserve);
177	__FIXADDR_TOP = -reserve - PAGE_SIZE;
178	__VMALLOC_RESERVE += reserve;
179}
180
181pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
182{
183	return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
184}
185
186struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
187{
188	struct page *pte;
189
190#ifdef CONFIG_HIGHPTE
191	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
192#else
193	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
194#endif
195	return pte;
196}
197
198void pmd_ctor(struct kmem_cache *cache, void *pmd)
199{
200	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
201}
202
203/*
204 * List of all pgd's needed for non-PAE so it can invalidate entries
205 * in both cached and uncached pgd's; not needed for PAE since the
206 * kernel pmd is shared. If PAE were not to share the pmd a similar
207 * tactic would be needed. This is essentially codepath-based locking
208 * against pageattr.c; it is the unique case in which a valid change
209 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
210 * vmalloc faults work because attached pagetables are never freed.
211 * -- wli
212 */
213DEFINE_SPINLOCK(pgd_lock);
214struct page *pgd_list;
215
216static inline void pgd_list_add(pgd_t *pgd)
217{
218	struct page *page = virt_to_page(pgd);
219	page->index = (unsigned long)pgd_list;
220	if (pgd_list)
221		set_page_private(pgd_list, (unsigned long)&page->index);
222	pgd_list = page;
223	set_page_private(page, (unsigned long)&pgd_list);
224}
225
226static inline void pgd_list_del(pgd_t *pgd)
227{
228	struct page *next, **pprev, *page = virt_to_page(pgd);
229	next = (struct page *)page->index;
230	pprev = (struct page **)page_private(page);
231	*pprev = next;
232	if (next)
233		set_page_private(next, (unsigned long)pprev);
234}
235
236
237
238#if (PTRS_PER_PMD == 1)
239/* Non-PAE pgd constructor */
240static void pgd_ctor(void *pgd)
241{
242	unsigned long flags;
243
244	/* !PAE, no pagetable sharing */
245	memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
246
247	spin_lock_irqsave(&pgd_lock, flags);
248
249	/* must happen under lock */
250	clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
251			swapper_pg_dir + USER_PTRS_PER_PGD,
252			KERNEL_PGD_PTRS);
253	paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
254				__pa(swapper_pg_dir) >> PAGE_SHIFT,
255				USER_PTRS_PER_PGD,
256				KERNEL_PGD_PTRS);
257	pgd_list_add(pgd);
258	spin_unlock_irqrestore(&pgd_lock, flags);
259}
260#else  /* PTRS_PER_PMD > 1 */
261/* PAE pgd constructor */
262static void pgd_ctor(void *pgd)
263{
264	/* PAE, kernel PMD may be shared */
265
266	if (SHARED_KERNEL_PMD) {
267		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
268				swapper_pg_dir + USER_PTRS_PER_PGD,
269				KERNEL_PGD_PTRS);
270	} else {
271		unsigned long flags;
272
273		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
274		spin_lock_irqsave(&pgd_lock, flags);
275		pgd_list_add(pgd);
276		spin_unlock_irqrestore(&pgd_lock, flags);
277	}
278}
279#endif	/* PTRS_PER_PMD */
280
281static void pgd_dtor(void *pgd)
282{
283	unsigned long flags; /* can be called from interrupt context */
284
285	if (SHARED_KERNEL_PMD)
286		return;
287
288	paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
289	spin_lock_irqsave(&pgd_lock, flags);
290	pgd_list_del(pgd);
291	spin_unlock_irqrestore(&pgd_lock, flags);
292}
293
294#define UNSHARED_PTRS_PER_PGD				\
295	(SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
296
297#ifdef CONFIG_X86_PAE
298/*
299 * Mop up any pmd pages which may still be attached to the pgd.
300 * Normally they will be freed by munmap/exit_mmap, but any pmd we
301 * preallocate which never got a corresponding vma will need to be
302 * freed manually.
303 */
304static void pgd_mop_up_pmds(pgd_t *pgdp)
305{
306	int i;
307
308	for(i = 0; i < USER_PTRS_PER_PGD; i++) {
309		pgd_t pgd = pgdp[i];
310
311		if (pgd_val(pgd) != 0) {
312			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
313
314			pgdp[i] = native_make_pgd(0);
315
316			paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
317			pmd_free(pmd);
318		}
319	}
320}
321
322/*
323 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
324 * updating the top-level pagetable entries to guarantee the
325 * processor notices the update.  Since this is expensive, and
326 * all 4 top-level entries are used almost immediately in a
327 * new process's life, we just pre-populate them here.
328 */
329static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
330{
331	pud_t *pud;
332	unsigned long addr;
333	int i;
334
335	pud = pud_offset(pgd, 0);
336 	for (addr = i = 0; i < USER_PTRS_PER_PGD; i++, pud++, addr += PUD_SIZE) {
337		pmd_t *pmd = pmd_alloc_one(mm, addr);
338
339		if (!pmd) {
340			pgd_mop_up_pmds(pgd);
341			return 0;
342		}
343
344		pud_populate(mm, pud, pmd);
345	}
346
347	return 1;
348}
349#else  /* !CONFIG_X86_PAE */
350/* No need to prepopulate any pagetable entries in non-PAE modes. */
351static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
352{
353	return 1;
354}
355
356static void pgd_mop_up_pmds(pgd_t *pgd)
357{
358}
359#endif	/* CONFIG_X86_PAE */
360
361/* If we allocate a pmd for part of the kernel address space, then
362   make sure its initialized with the appropriate kernel mappings.
363   Otherwise use a cached zeroed pmd.  */
364static pmd_t *pmd_cache_alloc(int idx)
365{
366	pmd_t *pmd;
367
368	if (idx >= USER_PTRS_PER_PGD) {
369		pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
370
371		if (pmd)
372			memcpy(pmd,
373			       (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
374			       sizeof(pmd_t) * PTRS_PER_PMD);
375	} else
376		pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
377
378	return pmd;
379}
380
381static void pmd_cache_free(pmd_t *pmd, int idx)
382{
383	if (idx >= USER_PTRS_PER_PGD)
384		free_page((unsigned long)pmd);
385	else
386		kmem_cache_free(pmd_cache, pmd);
387}
388
389pgd_t *pgd_alloc(struct mm_struct *mm)
390{
391	int i;
392	pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
393
394	if (PTRS_PER_PMD == 1 || !pgd)
395		return pgd;
396
397	mm->pgd = pgd;		/* so that alloc_pd can use it */
398
399 	for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
400		pmd_t *pmd = pmd_cache_alloc(i);
401
402		if (!pmd)
403			goto out_oom;
404
405		paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
406		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
407	}
408	if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
409		quicklist_free(0, pgd_dtor, pgd);
410		pgd = NULL;
411	}
412
413	return pgd;
414
415out_oom:
416	for (i--; i >= 0; i--) {
417		pgd_t pgdent = pgd[i];
418		void* pmd = (void *)__va(pgd_val(pgdent)-1);
419		paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
420		pmd_cache_free(pmd, i);
421	}
422	quicklist_free(0, pgd_dtor, pgd);
423	return NULL;
424}
425
426void pgd_free(pgd_t *pgd)
427{
428	int i;
429
430	/* in the PAE case user pgd entries are overwritten before usage */
431	if (PTRS_PER_PMD > 1)
432		for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
433			pgd_t pgdent = pgd[i];
434			void* pmd = (void *)__va(pgd_val(pgdent)-1);
435			paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
436			pmd_cache_free(pmd, i);
437		}
438	/* in the non-PAE case, free_pgtables() clears user pgd entries */
439	pgd_mop_up_pmds(pgd);
440	quicklist_free(0, pgd_dtor, pgd);
441}
442
443void check_pgt_cache(void)
444{
445	quicklist_trim(0, pgd_dtor, 25, 16);
446}
447
448