page_alloc.c revision a226f6c899799fe2c4919daa0767ac579c88f7bd
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/config.h>
18#include <linux/stddef.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/interrupt.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
31#include <linux/notifier.h>
32#include <linux/topology.h>
33#include <linux/sysctl.h>
34#include <linux/cpu.h>
35#include <linux/cpuset.h>
36#include <linux/memory_hotplug.h>
37#include <linux/nodemask.h>
38#include <linux/vmalloc.h>
39
40#include <asm/tlbflush.h>
41#include "internal.h"
42
43/*
44 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
45 * initializer cleaner
46 */
47nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
48EXPORT_SYMBOL(node_online_map);
49nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
50EXPORT_SYMBOL(node_possible_map);
51struct pglist_data *pgdat_list __read_mostly;
52unsigned long totalram_pages __read_mostly;
53unsigned long totalhigh_pages __read_mostly;
54long nr_swap_pages;
55
56static void fastcall free_hot_cold_page(struct page *page, int cold);
57
58/*
59 * results with 256, 32 in the lowmem_reserve sysctl:
60 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
61 *	1G machine -> (16M dma, 784M normal, 224M high)
62 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
63 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
64 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
65 *
66 * TBD: should special case ZONE_DMA32 machines here - in those we normally
67 * don't need any ZONE_NORMAL reservation
68 */
69int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
70
71EXPORT_SYMBOL(totalram_pages);
72
73/*
74 * Used by page_zone() to look up the address of the struct zone whose
75 * id is encoded in the upper bits of page->flags
76 */
77struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
78EXPORT_SYMBOL(zone_table);
79
80static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
81int min_free_kbytes = 1024;
82
83unsigned long __initdata nr_kernel_pages;
84unsigned long __initdata nr_all_pages;
85
86#ifdef CONFIG_DEBUG_VM
87static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
88{
89	int ret = 0;
90	unsigned seq;
91	unsigned long pfn = page_to_pfn(page);
92
93	do {
94		seq = zone_span_seqbegin(zone);
95		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
96			ret = 1;
97		else if (pfn < zone->zone_start_pfn)
98			ret = 1;
99	} while (zone_span_seqretry(zone, seq));
100
101	return ret;
102}
103
104static int page_is_consistent(struct zone *zone, struct page *page)
105{
106#ifdef CONFIG_HOLES_IN_ZONE
107	if (!pfn_valid(page_to_pfn(page)))
108		return 0;
109#endif
110	if (zone != page_zone(page))
111		return 0;
112
113	return 1;
114}
115/*
116 * Temporary debugging check for pages not lying within a given zone.
117 */
118static int bad_range(struct zone *zone, struct page *page)
119{
120	if (page_outside_zone_boundaries(zone, page))
121		return 1;
122	if (!page_is_consistent(zone, page))
123		return 1;
124
125	return 0;
126}
127
128#else
129static inline int bad_range(struct zone *zone, struct page *page)
130{
131	return 0;
132}
133#endif
134
135static void bad_page(const char *function, struct page *page)
136{
137	printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
138		function, current->comm, page);
139	printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
140		(int)(2*sizeof(unsigned long)), (unsigned long)page->flags,
141		page->mapping, page_mapcount(page), page_count(page));
142	printk(KERN_EMERG "Backtrace:\n");
143	dump_stack();
144	printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
145	page->flags &= ~(1 << PG_lru	|
146			1 << PG_private |
147			1 << PG_locked	|
148			1 << PG_active	|
149			1 << PG_dirty	|
150			1 << PG_reclaim |
151			1 << PG_slab    |
152			1 << PG_swapcache |
153			1 << PG_writeback );
154	set_page_count(page, 0);
155	reset_page_mapcount(page);
156	page->mapping = NULL;
157	add_taint(TAINT_BAD_PAGE);
158}
159
160/*
161 * Higher-order pages are called "compound pages".  They are structured thusly:
162 *
163 * The first PAGE_SIZE page is called the "head page".
164 *
165 * The remaining PAGE_SIZE pages are called "tail pages".
166 *
167 * All pages have PG_compound set.  All pages have their ->private pointing at
168 * the head page (even the head page has this).
169 *
170 * The first tail page's ->mapping, if non-zero, holds the address of the
171 * compound page's put_page() function.
172 *
173 * The order of the allocation is stored in the first tail page's ->index
174 * This is only for debug at present.  This usage means that zero-order pages
175 * may not be compound.
176 */
177static void prep_compound_page(struct page *page, unsigned long order)
178{
179	int i;
180	int nr_pages = 1 << order;
181
182	page[1].mapping = NULL;
183	page[1].index = order;
184	for (i = 0; i < nr_pages; i++) {
185		struct page *p = page + i;
186
187		SetPageCompound(p);
188		set_page_private(p, (unsigned long)page);
189	}
190}
191
192static void destroy_compound_page(struct page *page, unsigned long order)
193{
194	int i;
195	int nr_pages = 1 << order;
196
197	if (!PageCompound(page))
198		return;
199
200	if (page[1].index != order)
201		bad_page(__FUNCTION__, page);
202
203	for (i = 0; i < nr_pages; i++) {
204		struct page *p = page + i;
205
206		if (!PageCompound(p))
207			bad_page(__FUNCTION__, page);
208		if (page_private(p) != (unsigned long)page)
209			bad_page(__FUNCTION__, page);
210		ClearPageCompound(p);
211	}
212}
213
214/*
215 * function for dealing with page's order in buddy system.
216 * zone->lock is already acquired when we use these.
217 * So, we don't need atomic page->flags operations here.
218 */
219static inline unsigned long page_order(struct page *page) {
220	return page_private(page);
221}
222
223static inline void set_page_order(struct page *page, int order) {
224	set_page_private(page, order);
225	__SetPagePrivate(page);
226}
227
228static inline void rmv_page_order(struct page *page)
229{
230	__ClearPagePrivate(page);
231	set_page_private(page, 0);
232}
233
234/*
235 * Locate the struct page for both the matching buddy in our
236 * pair (buddy1) and the combined O(n+1) page they form (page).
237 *
238 * 1) Any buddy B1 will have an order O twin B2 which satisfies
239 * the following equation:
240 *     B2 = B1 ^ (1 << O)
241 * For example, if the starting buddy (buddy2) is #8 its order
242 * 1 buddy is #10:
243 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
244 *
245 * 2) Any buddy B will have an order O+1 parent P which
246 * satisfies the following equation:
247 *     P = B & ~(1 << O)
248 *
249 * Assumption: *_mem_map is contigious at least up to MAX_ORDER
250 */
251static inline struct page *
252__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
253{
254	unsigned long buddy_idx = page_idx ^ (1 << order);
255
256	return page + (buddy_idx - page_idx);
257}
258
259static inline unsigned long
260__find_combined_index(unsigned long page_idx, unsigned int order)
261{
262	return (page_idx & ~(1 << order));
263}
264
265/*
266 * This function checks whether a page is free && is the buddy
267 * we can do coalesce a page and its buddy if
268 * (a) the buddy is not in a hole &&
269 * (b) the buddy is free &&
270 * (c) the buddy is on the buddy system &&
271 * (d) a page and its buddy have the same order.
272 * for recording page's order, we use page_private(page) and PG_private.
273 *
274 */
275static inline int page_is_buddy(struct page *page, int order)
276{
277#ifdef CONFIG_HOLES_IN_ZONE
278	if (!pfn_valid(page_to_pfn(page)))
279		return 0;
280#endif
281
282       if (PagePrivate(page)           &&
283           (page_order(page) == order) &&
284            page_count(page) == 0)
285               return 1;
286       return 0;
287}
288
289/*
290 * Freeing function for a buddy system allocator.
291 *
292 * The concept of a buddy system is to maintain direct-mapped table
293 * (containing bit values) for memory blocks of various "orders".
294 * The bottom level table contains the map for the smallest allocatable
295 * units of memory (here, pages), and each level above it describes
296 * pairs of units from the levels below, hence, "buddies".
297 * At a high level, all that happens here is marking the table entry
298 * at the bottom level available, and propagating the changes upward
299 * as necessary, plus some accounting needed to play nicely with other
300 * parts of the VM system.
301 * At each level, we keep a list of pages, which are heads of continuous
302 * free pages of length of (1 << order) and marked with PG_Private.Page's
303 * order is recorded in page_private(page) field.
304 * So when we are allocating or freeing one, we can derive the state of the
305 * other.  That is, if we allocate a small block, and both were
306 * free, the remainder of the region must be split into blocks.
307 * If a block is freed, and its buddy is also free, then this
308 * triggers coalescing into a block of larger size.
309 *
310 * -- wli
311 */
312
313static inline void __free_pages_bulk (struct page *page,
314		struct zone *zone, unsigned int order)
315{
316	unsigned long page_idx;
317	int order_size = 1 << order;
318
319	if (unlikely(order))
320		destroy_compound_page(page, order);
321
322	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
323
324	BUG_ON(page_idx & (order_size - 1));
325	BUG_ON(bad_range(zone, page));
326
327	zone->free_pages += order_size;
328	while (order < MAX_ORDER-1) {
329		unsigned long combined_idx;
330		struct free_area *area;
331		struct page *buddy;
332
333		buddy = __page_find_buddy(page, page_idx, order);
334		if (!page_is_buddy(buddy, order))
335			break;		/* Move the buddy up one level. */
336
337		list_del(&buddy->lru);
338		area = zone->free_area + order;
339		area->nr_free--;
340		rmv_page_order(buddy);
341		combined_idx = __find_combined_index(page_idx, order);
342		page = page + (combined_idx - page_idx);
343		page_idx = combined_idx;
344		order++;
345	}
346	set_page_order(page, order);
347	list_add(&page->lru, &zone->free_area[order].free_list);
348	zone->free_area[order].nr_free++;
349}
350
351static inline int free_pages_check(const char *function, struct page *page)
352{
353	if (unlikely(page_mapcount(page) |
354		(page->mapping != NULL)  |
355		(page_count(page) != 0)  |
356		(page->flags & (
357			1 << PG_lru	|
358			1 << PG_private |
359			1 << PG_locked	|
360			1 << PG_active	|
361			1 << PG_reclaim	|
362			1 << PG_slab	|
363			1 << PG_swapcache |
364			1 << PG_writeback |
365			1 << PG_reserved ))))
366		bad_page(function, page);
367	if (PageDirty(page))
368		__ClearPageDirty(page);
369	/*
370	 * For now, we report if PG_reserved was found set, but do not
371	 * clear it, and do not free the page.  But we shall soon need
372	 * to do more, for when the ZERO_PAGE count wraps negative.
373	 */
374	return PageReserved(page);
375}
376
377/*
378 * Frees a list of pages.
379 * Assumes all pages on list are in same zone, and of same order.
380 * count is the number of pages to free.
381 *
382 * If the zone was previously in an "all pages pinned" state then look to
383 * see if this freeing clears that state.
384 *
385 * And clear the zone's pages_scanned counter, to hold off the "all pages are
386 * pinned" detection logic.
387 */
388static int
389free_pages_bulk(struct zone *zone, int count,
390		struct list_head *list, unsigned int order)
391{
392	struct page *page = NULL;
393	int ret = 0;
394
395	spin_lock(&zone->lock);
396	zone->all_unreclaimable = 0;
397	zone->pages_scanned = 0;
398	while (!list_empty(list) && count--) {
399		page = list_entry(list->prev, struct page, lru);
400		/* have to delete it as __free_pages_bulk list manipulates */
401		list_del(&page->lru);
402		__free_pages_bulk(page, zone, order);
403		ret++;
404	}
405	spin_unlock(&zone->lock);
406	return ret;
407}
408
409void __free_pages_ok(struct page *page, unsigned int order)
410{
411	unsigned long flags;
412	LIST_HEAD(list);
413	int i;
414	int reserved = 0;
415
416	arch_free_page(page, order);
417
418#ifndef CONFIG_MMU
419	if (order > 0)
420		for (i = 1 ; i < (1 << order) ; ++i)
421			__put_page(page + i);
422#endif
423
424	for (i = 0 ; i < (1 << order) ; ++i)
425		reserved += free_pages_check(__FUNCTION__, page + i);
426	if (reserved)
427		return;
428
429	list_add(&page->lru, &list);
430	mod_page_state(pgfree, 1 << order);
431	kernel_map_pages(page, 1<<order, 0);
432	local_irq_save(flags);
433	free_pages_bulk(page_zone(page), 1, &list, order);
434	local_irq_restore(flags);
435}
436
437/*
438 * permit the bootmem allocator to evade page validation on high-order frees
439 */
440void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
441{
442	if (order == 0) {
443		__ClearPageReserved(page);
444		set_page_count(page, 0);
445
446		free_hot_cold_page(page, 0);
447	} else {
448		LIST_HEAD(list);
449		int loop;
450
451		for (loop = 0; loop < BITS_PER_LONG; loop++) {
452			struct page *p = &page[loop];
453
454			if (loop + 16 < BITS_PER_LONG)
455				prefetchw(p + 16);
456			__ClearPageReserved(p);
457			set_page_count(p, 0);
458		}
459
460		arch_free_page(page, order);
461
462		mod_page_state(pgfree, 1 << order);
463
464		list_add(&page->lru, &list);
465		kernel_map_pages(page, 1 << order, 0);
466		free_pages_bulk(page_zone(page), 1, &list, order);
467	}
468}
469
470
471/*
472 * The order of subdivision here is critical for the IO subsystem.
473 * Please do not alter this order without good reasons and regression
474 * testing. Specifically, as large blocks of memory are subdivided,
475 * the order in which smaller blocks are delivered depends on the order
476 * they're subdivided in this function. This is the primary factor
477 * influencing the order in which pages are delivered to the IO
478 * subsystem according to empirical testing, and this is also justified
479 * by considering the behavior of a buddy system containing a single
480 * large block of memory acted on by a series of small allocations.
481 * This behavior is a critical factor in sglist merging's success.
482 *
483 * -- wli
484 */
485static inline void expand(struct zone *zone, struct page *page,
486 	int low, int high, struct free_area *area)
487{
488	unsigned long size = 1 << high;
489
490	while (high > low) {
491		area--;
492		high--;
493		size >>= 1;
494		BUG_ON(bad_range(zone, &page[size]));
495		list_add(&page[size].lru, &area->free_list);
496		area->nr_free++;
497		set_page_order(&page[size], high);
498	}
499}
500
501/*
502 * This page is about to be returned from the page allocator
503 */
504static int prep_new_page(struct page *page, int order)
505{
506	if (unlikely(page_mapcount(page) |
507		(page->mapping != NULL)  |
508		(page_count(page) != 0)  |
509		(page->flags & (
510			1 << PG_lru	|
511			1 << PG_private	|
512			1 << PG_locked	|
513			1 << PG_active	|
514			1 << PG_dirty	|
515			1 << PG_reclaim	|
516			1 << PG_slab    |
517			1 << PG_swapcache |
518			1 << PG_writeback |
519			1 << PG_reserved ))))
520		bad_page(__FUNCTION__, page);
521
522	/*
523	 * For now, we report if PG_reserved was found set, but do not
524	 * clear it, and do not allocate the page: as a safety net.
525	 */
526	if (PageReserved(page))
527		return 1;
528
529	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
530			1 << PG_referenced | 1 << PG_arch_1 |
531			1 << PG_checked | 1 << PG_mappedtodisk);
532	set_page_private(page, 0);
533	set_page_refs(page, order);
534	kernel_map_pages(page, 1 << order, 1);
535	return 0;
536}
537
538/*
539 * Do the hard work of removing an element from the buddy allocator.
540 * Call me with the zone->lock already held.
541 */
542static struct page *__rmqueue(struct zone *zone, unsigned int order)
543{
544	struct free_area * area;
545	unsigned int current_order;
546	struct page *page;
547
548	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
549		area = zone->free_area + current_order;
550		if (list_empty(&area->free_list))
551			continue;
552
553		page = list_entry(area->free_list.next, struct page, lru);
554		list_del(&page->lru);
555		rmv_page_order(page);
556		area->nr_free--;
557		zone->free_pages -= 1UL << order;
558		expand(zone, page, order, current_order, area);
559		return page;
560	}
561
562	return NULL;
563}
564
565/*
566 * Obtain a specified number of elements from the buddy allocator, all under
567 * a single hold of the lock, for efficiency.  Add them to the supplied list.
568 * Returns the number of new pages which were placed at *list.
569 */
570static int rmqueue_bulk(struct zone *zone, unsigned int order,
571			unsigned long count, struct list_head *list)
572{
573	int i;
574
575	spin_lock(&zone->lock);
576	for (i = 0; i < count; ++i) {
577		struct page *page = __rmqueue(zone, order);
578		if (unlikely(page == NULL))
579			break;
580		list_add_tail(&page->lru, list);
581	}
582	spin_unlock(&zone->lock);
583	return i;
584}
585
586#ifdef CONFIG_NUMA
587/* Called from the slab reaper to drain remote pagesets */
588void drain_remote_pages(void)
589{
590	struct zone *zone;
591	int i;
592	unsigned long flags;
593
594	local_irq_save(flags);
595	for_each_zone(zone) {
596		struct per_cpu_pageset *pset;
597
598		/* Do not drain local pagesets */
599		if (zone->zone_pgdat->node_id == numa_node_id())
600			continue;
601
602		pset = zone->pageset[smp_processor_id()];
603		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
604			struct per_cpu_pages *pcp;
605
606			pcp = &pset->pcp[i];
607			if (pcp->count)
608				pcp->count -= free_pages_bulk(zone, pcp->count,
609						&pcp->list, 0);
610		}
611	}
612	local_irq_restore(flags);
613}
614#endif
615
616#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
617static void __drain_pages(unsigned int cpu)
618{
619	unsigned long flags;
620	struct zone *zone;
621	int i;
622
623	for_each_zone(zone) {
624		struct per_cpu_pageset *pset;
625
626		pset = zone_pcp(zone, cpu);
627		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
628			struct per_cpu_pages *pcp;
629
630			pcp = &pset->pcp[i];
631			local_irq_save(flags);
632			pcp->count -= free_pages_bulk(zone, pcp->count,
633						&pcp->list, 0);
634			local_irq_restore(flags);
635		}
636	}
637}
638#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
639
640#ifdef CONFIG_PM
641
642void mark_free_pages(struct zone *zone)
643{
644	unsigned long zone_pfn, flags;
645	int order;
646	struct list_head *curr;
647
648	if (!zone->spanned_pages)
649		return;
650
651	spin_lock_irqsave(&zone->lock, flags);
652	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
653		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
654
655	for (order = MAX_ORDER - 1; order >= 0; --order)
656		list_for_each(curr, &zone->free_area[order].free_list) {
657			unsigned long start_pfn, i;
658
659			start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
660
661			for (i=0; i < (1<<order); i++)
662				SetPageNosaveFree(pfn_to_page(start_pfn+i));
663	}
664	spin_unlock_irqrestore(&zone->lock, flags);
665}
666
667/*
668 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
669 */
670void drain_local_pages(void)
671{
672	unsigned long flags;
673
674	local_irq_save(flags);
675	__drain_pages(smp_processor_id());
676	local_irq_restore(flags);
677}
678#endif /* CONFIG_PM */
679
680static void zone_statistics(struct zonelist *zonelist, struct zone *z)
681{
682#ifdef CONFIG_NUMA
683	unsigned long flags;
684	int cpu;
685	pg_data_t *pg = z->zone_pgdat;
686	pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
687	struct per_cpu_pageset *p;
688
689	local_irq_save(flags);
690	cpu = smp_processor_id();
691	p = zone_pcp(z,cpu);
692	if (pg == orig) {
693		p->numa_hit++;
694	} else {
695		p->numa_miss++;
696		zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
697	}
698	if (pg == NODE_DATA(numa_node_id()))
699		p->local_node++;
700	else
701		p->other_node++;
702	local_irq_restore(flags);
703#endif
704}
705
706/*
707 * Free a 0-order page
708 */
709static void fastcall free_hot_cold_page(struct page *page, int cold)
710{
711	struct zone *zone = page_zone(page);
712	struct per_cpu_pages *pcp;
713	unsigned long flags;
714
715	arch_free_page(page, 0);
716
717	if (PageAnon(page))
718		page->mapping = NULL;
719	if (free_pages_check(__FUNCTION__, page))
720		return;
721
722	inc_page_state(pgfree);
723	kernel_map_pages(page, 1, 0);
724
725	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
726	local_irq_save(flags);
727	list_add(&page->lru, &pcp->list);
728	pcp->count++;
729	if (pcp->count >= pcp->high)
730		pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
731	local_irq_restore(flags);
732	put_cpu();
733}
734
735void fastcall free_hot_page(struct page *page)
736{
737	free_hot_cold_page(page, 0);
738}
739
740void fastcall free_cold_page(struct page *page)
741{
742	free_hot_cold_page(page, 1);
743}
744
745static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
746{
747	int i;
748
749	BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
750	for(i = 0; i < (1 << order); i++)
751		clear_highpage(page + i);
752}
753
754/*
755 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
756 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
757 * or two.
758 */
759static struct page *
760buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
761{
762	unsigned long flags;
763	struct page *page;
764	int cold = !!(gfp_flags & __GFP_COLD);
765
766again:
767	if (order == 0) {
768		struct per_cpu_pages *pcp;
769
770		page = NULL;
771		pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
772		local_irq_save(flags);
773		if (!pcp->count)
774			pcp->count += rmqueue_bulk(zone, 0,
775						pcp->batch, &pcp->list);
776		if (likely(pcp->count)) {
777			page = list_entry(pcp->list.next, struct page, lru);
778			list_del(&page->lru);
779			pcp->count--;
780		}
781		local_irq_restore(flags);
782		put_cpu();
783	} else {
784		spin_lock_irqsave(&zone->lock, flags);
785		page = __rmqueue(zone, order);
786		spin_unlock_irqrestore(&zone->lock, flags);
787	}
788
789	if (page != NULL) {
790		BUG_ON(bad_range(zone, page));
791		mod_page_state_zone(zone, pgalloc, 1 << order);
792		if (prep_new_page(page, order))
793			goto again;
794
795		if (gfp_flags & __GFP_ZERO)
796			prep_zero_page(page, order, gfp_flags);
797
798		if (order && (gfp_flags & __GFP_COMP))
799			prep_compound_page(page, order);
800	}
801	return page;
802}
803
804#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
805#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
806#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
807#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
808#define ALLOC_HARDER		0x10 /* try to alloc harder */
809#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
810#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
811
812/*
813 * Return 1 if free pages are above 'mark'. This takes into account the order
814 * of the allocation.
815 */
816int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
817		      int classzone_idx, int alloc_flags)
818{
819	/* free_pages my go negative - that's OK */
820	long min = mark, free_pages = z->free_pages - (1 << order) + 1;
821	int o;
822
823	if (alloc_flags & ALLOC_HIGH)
824		min -= min / 2;
825	if (alloc_flags & ALLOC_HARDER)
826		min -= min / 4;
827
828	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
829		return 0;
830	for (o = 0; o < order; o++) {
831		/* At the next order, this order's pages become unavailable */
832		free_pages -= z->free_area[o].nr_free << o;
833
834		/* Require fewer higher order pages to be free */
835		min >>= 1;
836
837		if (free_pages <= min)
838			return 0;
839	}
840	return 1;
841}
842
843/*
844 * get_page_from_freeliest goes through the zonelist trying to allocate
845 * a page.
846 */
847static struct page *
848get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
849		struct zonelist *zonelist, int alloc_flags)
850{
851	struct zone **z = zonelist->zones;
852	struct page *page = NULL;
853	int classzone_idx = zone_idx(*z);
854
855	/*
856	 * Go through the zonelist once, looking for a zone with enough free.
857	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
858	 */
859	do {
860		if ((alloc_flags & ALLOC_CPUSET) &&
861				!cpuset_zone_allowed(*z, gfp_mask))
862			continue;
863
864		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
865			unsigned long mark;
866			if (alloc_flags & ALLOC_WMARK_MIN)
867				mark = (*z)->pages_min;
868			else if (alloc_flags & ALLOC_WMARK_LOW)
869				mark = (*z)->pages_low;
870			else
871				mark = (*z)->pages_high;
872			if (!zone_watermark_ok(*z, order, mark,
873				    classzone_idx, alloc_flags))
874				continue;
875		}
876
877		page = buffered_rmqueue(*z, order, gfp_mask);
878		if (page) {
879			zone_statistics(zonelist, *z);
880			break;
881		}
882	} while (*(++z) != NULL);
883	return page;
884}
885
886/*
887 * This is the 'heart' of the zoned buddy allocator.
888 */
889struct page * fastcall
890__alloc_pages(gfp_t gfp_mask, unsigned int order,
891		struct zonelist *zonelist)
892{
893	const gfp_t wait = gfp_mask & __GFP_WAIT;
894	struct zone **z;
895	struct page *page;
896	struct reclaim_state reclaim_state;
897	struct task_struct *p = current;
898	int do_retry;
899	int alloc_flags;
900	int did_some_progress;
901
902	might_sleep_if(wait);
903
904restart:
905	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
906
907	if (unlikely(*z == NULL)) {
908		/* Should this ever happen?? */
909		return NULL;
910	}
911
912	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
913				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
914	if (page)
915		goto got_pg;
916
917	do {
918		wakeup_kswapd(*z, order);
919	} while (*(++z));
920
921	/*
922	 * OK, we're below the kswapd watermark and have kicked background
923	 * reclaim. Now things get more complex, so set up alloc_flags according
924	 * to how we want to proceed.
925	 *
926	 * The caller may dip into page reserves a bit more if the caller
927	 * cannot run direct reclaim, or if the caller has realtime scheduling
928	 * policy.
929	 */
930	alloc_flags = ALLOC_WMARK_MIN;
931	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
932		alloc_flags |= ALLOC_HARDER;
933	if (gfp_mask & __GFP_HIGH)
934		alloc_flags |= ALLOC_HIGH;
935	alloc_flags |= ALLOC_CPUSET;
936
937	/*
938	 * Go through the zonelist again. Let __GFP_HIGH and allocations
939	 * coming from realtime tasks go deeper into reserves.
940	 *
941	 * This is the last chance, in general, before the goto nopage.
942	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
943	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
944	 */
945	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
946	if (page)
947		goto got_pg;
948
949	/* This allocation should allow future memory freeing. */
950
951	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
952			&& !in_interrupt()) {
953		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
954nofail_alloc:
955			/* go through the zonelist yet again, ignoring mins */
956			page = get_page_from_freelist(gfp_mask, order,
957				zonelist, ALLOC_NO_WATERMARKS);
958			if (page)
959				goto got_pg;
960			if (gfp_mask & __GFP_NOFAIL) {
961				blk_congestion_wait(WRITE, HZ/50);
962				goto nofail_alloc;
963			}
964		}
965		goto nopage;
966	}
967
968	/* Atomic allocations - we can't balance anything */
969	if (!wait)
970		goto nopage;
971
972rebalance:
973	cond_resched();
974
975	/* We now go into synchronous reclaim */
976	p->flags |= PF_MEMALLOC;
977	reclaim_state.reclaimed_slab = 0;
978	p->reclaim_state = &reclaim_state;
979
980	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
981
982	p->reclaim_state = NULL;
983	p->flags &= ~PF_MEMALLOC;
984
985	cond_resched();
986
987	if (likely(did_some_progress)) {
988		page = get_page_from_freelist(gfp_mask, order,
989						zonelist, alloc_flags);
990		if (page)
991			goto got_pg;
992	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
993		/*
994		 * Go through the zonelist yet one more time, keep
995		 * very high watermark here, this is only to catch
996		 * a parallel oom killing, we must fail if we're still
997		 * under heavy pressure.
998		 */
999		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1000				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1001		if (page)
1002			goto got_pg;
1003
1004		out_of_memory(gfp_mask, order);
1005		goto restart;
1006	}
1007
1008	/*
1009	 * Don't let big-order allocations loop unless the caller explicitly
1010	 * requests that.  Wait for some write requests to complete then retry.
1011	 *
1012	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1013	 * <= 3, but that may not be true in other implementations.
1014	 */
1015	do_retry = 0;
1016	if (!(gfp_mask & __GFP_NORETRY)) {
1017		if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
1018			do_retry = 1;
1019		if (gfp_mask & __GFP_NOFAIL)
1020			do_retry = 1;
1021	}
1022	if (do_retry) {
1023		blk_congestion_wait(WRITE, HZ/50);
1024		goto rebalance;
1025	}
1026
1027nopage:
1028	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1029		printk(KERN_WARNING "%s: page allocation failure."
1030			" order:%d, mode:0x%x\n",
1031			p->comm, order, gfp_mask);
1032		dump_stack();
1033		show_mem();
1034	}
1035got_pg:
1036	return page;
1037}
1038
1039EXPORT_SYMBOL(__alloc_pages);
1040
1041/*
1042 * Common helper functions.
1043 */
1044fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1045{
1046	struct page * page;
1047	page = alloc_pages(gfp_mask, order);
1048	if (!page)
1049		return 0;
1050	return (unsigned long) page_address(page);
1051}
1052
1053EXPORT_SYMBOL(__get_free_pages);
1054
1055fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1056{
1057	struct page * page;
1058
1059	/*
1060	 * get_zeroed_page() returns a 32-bit address, which cannot represent
1061	 * a highmem page
1062	 */
1063	BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1064
1065	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1066	if (page)
1067		return (unsigned long) page_address(page);
1068	return 0;
1069}
1070
1071EXPORT_SYMBOL(get_zeroed_page);
1072
1073void __pagevec_free(struct pagevec *pvec)
1074{
1075	int i = pagevec_count(pvec);
1076
1077	while (--i >= 0)
1078		free_hot_cold_page(pvec->pages[i], pvec->cold);
1079}
1080
1081fastcall void __free_pages(struct page *page, unsigned int order)
1082{
1083	if (put_page_testzero(page)) {
1084		if (order == 0)
1085			free_hot_page(page);
1086		else
1087			__free_pages_ok(page, order);
1088	}
1089}
1090
1091EXPORT_SYMBOL(__free_pages);
1092
1093fastcall void free_pages(unsigned long addr, unsigned int order)
1094{
1095	if (addr != 0) {
1096		BUG_ON(!virt_addr_valid((void *)addr));
1097		__free_pages(virt_to_page((void *)addr), order);
1098	}
1099}
1100
1101EXPORT_SYMBOL(free_pages);
1102
1103/*
1104 * Total amount of free (allocatable) RAM:
1105 */
1106unsigned int nr_free_pages(void)
1107{
1108	unsigned int sum = 0;
1109	struct zone *zone;
1110
1111	for_each_zone(zone)
1112		sum += zone->free_pages;
1113
1114	return sum;
1115}
1116
1117EXPORT_SYMBOL(nr_free_pages);
1118
1119#ifdef CONFIG_NUMA
1120unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
1121{
1122	unsigned int i, sum = 0;
1123
1124	for (i = 0; i < MAX_NR_ZONES; i++)
1125		sum += pgdat->node_zones[i].free_pages;
1126
1127	return sum;
1128}
1129#endif
1130
1131static unsigned int nr_free_zone_pages(int offset)
1132{
1133	/* Just pick one node, since fallback list is circular */
1134	pg_data_t *pgdat = NODE_DATA(numa_node_id());
1135	unsigned int sum = 0;
1136
1137	struct zonelist *zonelist = pgdat->node_zonelists + offset;
1138	struct zone **zonep = zonelist->zones;
1139	struct zone *zone;
1140
1141	for (zone = *zonep++; zone; zone = *zonep++) {
1142		unsigned long size = zone->present_pages;
1143		unsigned long high = zone->pages_high;
1144		if (size > high)
1145			sum += size - high;
1146	}
1147
1148	return sum;
1149}
1150
1151/*
1152 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1153 */
1154unsigned int nr_free_buffer_pages(void)
1155{
1156	return nr_free_zone_pages(gfp_zone(GFP_USER));
1157}
1158
1159/*
1160 * Amount of free RAM allocatable within all zones
1161 */
1162unsigned int nr_free_pagecache_pages(void)
1163{
1164	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
1165}
1166
1167#ifdef CONFIG_HIGHMEM
1168unsigned int nr_free_highpages (void)
1169{
1170	pg_data_t *pgdat;
1171	unsigned int pages = 0;
1172
1173	for_each_pgdat(pgdat)
1174		pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1175
1176	return pages;
1177}
1178#endif
1179
1180#ifdef CONFIG_NUMA
1181static void show_node(struct zone *zone)
1182{
1183	printk("Node %d ", zone->zone_pgdat->node_id);
1184}
1185#else
1186#define show_node(zone)	do { } while (0)
1187#endif
1188
1189/*
1190 * Accumulate the page_state information across all CPUs.
1191 * The result is unavoidably approximate - it can change
1192 * during and after execution of this function.
1193 */
1194static DEFINE_PER_CPU(struct page_state, page_states) = {0};
1195
1196atomic_t nr_pagecache = ATOMIC_INIT(0);
1197EXPORT_SYMBOL(nr_pagecache);
1198#ifdef CONFIG_SMP
1199DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
1200#endif
1201
1202static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
1203{
1204	int cpu = 0;
1205
1206	memset(ret, 0, sizeof(*ret));
1207
1208	cpu = first_cpu(*cpumask);
1209	while (cpu < NR_CPUS) {
1210		unsigned long *in, *out, off;
1211
1212		in = (unsigned long *)&per_cpu(page_states, cpu);
1213
1214		cpu = next_cpu(cpu, *cpumask);
1215
1216		if (cpu < NR_CPUS)
1217			prefetch(&per_cpu(page_states, cpu));
1218
1219		out = (unsigned long *)ret;
1220		for (off = 0; off < nr; off++)
1221			*out++ += *in++;
1222	}
1223}
1224
1225void get_page_state_node(struct page_state *ret, int node)
1226{
1227	int nr;
1228	cpumask_t mask = node_to_cpumask(node);
1229
1230	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1231	nr /= sizeof(unsigned long);
1232
1233	__get_page_state(ret, nr+1, &mask);
1234}
1235
1236void get_page_state(struct page_state *ret)
1237{
1238	int nr;
1239	cpumask_t mask = CPU_MASK_ALL;
1240
1241	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1242	nr /= sizeof(unsigned long);
1243
1244	__get_page_state(ret, nr + 1, &mask);
1245}
1246
1247void get_full_page_state(struct page_state *ret)
1248{
1249	cpumask_t mask = CPU_MASK_ALL;
1250
1251	__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
1252}
1253
1254unsigned long __read_page_state(unsigned long offset)
1255{
1256	unsigned long ret = 0;
1257	int cpu;
1258
1259	for_each_cpu(cpu) {
1260		unsigned long in;
1261
1262		in = (unsigned long)&per_cpu(page_states, cpu) + offset;
1263		ret += *((unsigned long *)in);
1264	}
1265	return ret;
1266}
1267
1268void __mod_page_state(unsigned long offset, unsigned long delta)
1269{
1270	unsigned long flags;
1271	void* ptr;
1272
1273	local_irq_save(flags);
1274	ptr = &__get_cpu_var(page_states);
1275	*(unsigned long*)(ptr + offset) += delta;
1276	local_irq_restore(flags);
1277}
1278
1279EXPORT_SYMBOL(__mod_page_state);
1280
1281void __get_zone_counts(unsigned long *active, unsigned long *inactive,
1282			unsigned long *free, struct pglist_data *pgdat)
1283{
1284	struct zone *zones = pgdat->node_zones;
1285	int i;
1286
1287	*active = 0;
1288	*inactive = 0;
1289	*free = 0;
1290	for (i = 0; i < MAX_NR_ZONES; i++) {
1291		*active += zones[i].nr_active;
1292		*inactive += zones[i].nr_inactive;
1293		*free += zones[i].free_pages;
1294	}
1295}
1296
1297void get_zone_counts(unsigned long *active,
1298		unsigned long *inactive, unsigned long *free)
1299{
1300	struct pglist_data *pgdat;
1301
1302	*active = 0;
1303	*inactive = 0;
1304	*free = 0;
1305	for_each_pgdat(pgdat) {
1306		unsigned long l, m, n;
1307		__get_zone_counts(&l, &m, &n, pgdat);
1308		*active += l;
1309		*inactive += m;
1310		*free += n;
1311	}
1312}
1313
1314void si_meminfo(struct sysinfo *val)
1315{
1316	val->totalram = totalram_pages;
1317	val->sharedram = 0;
1318	val->freeram = nr_free_pages();
1319	val->bufferram = nr_blockdev_pages();
1320#ifdef CONFIG_HIGHMEM
1321	val->totalhigh = totalhigh_pages;
1322	val->freehigh = nr_free_highpages();
1323#else
1324	val->totalhigh = 0;
1325	val->freehigh = 0;
1326#endif
1327	val->mem_unit = PAGE_SIZE;
1328}
1329
1330EXPORT_SYMBOL(si_meminfo);
1331
1332#ifdef CONFIG_NUMA
1333void si_meminfo_node(struct sysinfo *val, int nid)
1334{
1335	pg_data_t *pgdat = NODE_DATA(nid);
1336
1337	val->totalram = pgdat->node_present_pages;
1338	val->freeram = nr_free_pages_pgdat(pgdat);
1339	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1340	val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1341	val->mem_unit = PAGE_SIZE;
1342}
1343#endif
1344
1345#define K(x) ((x) << (PAGE_SHIFT-10))
1346
1347/*
1348 * Show free area list (used inside shift_scroll-lock stuff)
1349 * We also calculate the percentage fragmentation. We do this by counting the
1350 * memory on each free list with the exception of the first item on the list.
1351 */
1352void show_free_areas(void)
1353{
1354	struct page_state ps;
1355	int cpu, temperature;
1356	unsigned long active;
1357	unsigned long inactive;
1358	unsigned long free;
1359	struct zone *zone;
1360
1361	for_each_zone(zone) {
1362		show_node(zone);
1363		printk("%s per-cpu:", zone->name);
1364
1365		if (!zone->present_pages) {
1366			printk(" empty\n");
1367			continue;
1368		} else
1369			printk("\n");
1370
1371		for_each_online_cpu(cpu) {
1372			struct per_cpu_pageset *pageset;
1373
1374			pageset = zone_pcp(zone, cpu);
1375
1376			for (temperature = 0; temperature < 2; temperature++)
1377				printk("cpu %d %s: high %d, batch %d used:%d\n",
1378					cpu,
1379					temperature ? "cold" : "hot",
1380					pageset->pcp[temperature].high,
1381					pageset->pcp[temperature].batch,
1382					pageset->pcp[temperature].count);
1383		}
1384	}
1385
1386	get_page_state(&ps);
1387	get_zone_counts(&active, &inactive, &free);
1388
1389	printk("Free pages: %11ukB (%ukB HighMem)\n",
1390		K(nr_free_pages()),
1391		K(nr_free_highpages()));
1392
1393	printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
1394		"unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
1395		active,
1396		inactive,
1397		ps.nr_dirty,
1398		ps.nr_writeback,
1399		ps.nr_unstable,
1400		nr_free_pages(),
1401		ps.nr_slab,
1402		ps.nr_mapped,
1403		ps.nr_page_table_pages);
1404
1405	for_each_zone(zone) {
1406		int i;
1407
1408		show_node(zone);
1409		printk("%s"
1410			" free:%lukB"
1411			" min:%lukB"
1412			" low:%lukB"
1413			" high:%lukB"
1414			" active:%lukB"
1415			" inactive:%lukB"
1416			" present:%lukB"
1417			" pages_scanned:%lu"
1418			" all_unreclaimable? %s"
1419			"\n",
1420			zone->name,
1421			K(zone->free_pages),
1422			K(zone->pages_min),
1423			K(zone->pages_low),
1424			K(zone->pages_high),
1425			K(zone->nr_active),
1426			K(zone->nr_inactive),
1427			K(zone->present_pages),
1428			zone->pages_scanned,
1429			(zone->all_unreclaimable ? "yes" : "no")
1430			);
1431		printk("lowmem_reserve[]:");
1432		for (i = 0; i < MAX_NR_ZONES; i++)
1433			printk(" %lu", zone->lowmem_reserve[i]);
1434		printk("\n");
1435	}
1436
1437	for_each_zone(zone) {
1438 		unsigned long nr, flags, order, total = 0;
1439
1440		show_node(zone);
1441		printk("%s: ", zone->name);
1442		if (!zone->present_pages) {
1443			printk("empty\n");
1444			continue;
1445		}
1446
1447		spin_lock_irqsave(&zone->lock, flags);
1448		for (order = 0; order < MAX_ORDER; order++) {
1449			nr = zone->free_area[order].nr_free;
1450			total += nr << order;
1451			printk("%lu*%lukB ", nr, K(1UL) << order);
1452		}
1453		spin_unlock_irqrestore(&zone->lock, flags);
1454		printk("= %lukB\n", K(total));
1455	}
1456
1457	show_swap_cache_info();
1458}
1459
1460/*
1461 * Builds allocation fallback zone lists.
1462 */
1463static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
1464{
1465	switch (k) {
1466		struct zone *zone;
1467	default:
1468		BUG();
1469	case ZONE_HIGHMEM:
1470		zone = pgdat->node_zones + ZONE_HIGHMEM;
1471		if (zone->present_pages) {
1472#ifndef CONFIG_HIGHMEM
1473			BUG();
1474#endif
1475			zonelist->zones[j++] = zone;
1476		}
1477	case ZONE_NORMAL:
1478		zone = pgdat->node_zones + ZONE_NORMAL;
1479		if (zone->present_pages)
1480			zonelist->zones[j++] = zone;
1481	case ZONE_DMA32:
1482		zone = pgdat->node_zones + ZONE_DMA32;
1483		if (zone->present_pages)
1484			zonelist->zones[j++] = zone;
1485	case ZONE_DMA:
1486		zone = pgdat->node_zones + ZONE_DMA;
1487		if (zone->present_pages)
1488			zonelist->zones[j++] = zone;
1489	}
1490
1491	return j;
1492}
1493
1494static inline int highest_zone(int zone_bits)
1495{
1496	int res = ZONE_NORMAL;
1497	if (zone_bits & (__force int)__GFP_HIGHMEM)
1498		res = ZONE_HIGHMEM;
1499	if (zone_bits & (__force int)__GFP_DMA32)
1500		res = ZONE_DMA32;
1501	if (zone_bits & (__force int)__GFP_DMA)
1502		res = ZONE_DMA;
1503	return res;
1504}
1505
1506#ifdef CONFIG_NUMA
1507#define MAX_NODE_LOAD (num_online_nodes())
1508static int __initdata node_load[MAX_NUMNODES];
1509/**
1510 * find_next_best_node - find the next node that should appear in a given node's fallback list
1511 * @node: node whose fallback list we're appending
1512 * @used_node_mask: nodemask_t of already used nodes
1513 *
1514 * We use a number of factors to determine which is the next node that should
1515 * appear on a given node's fallback list.  The node should not have appeared
1516 * already in @node's fallback list, and it should be the next closest node
1517 * according to the distance array (which contains arbitrary distance values
1518 * from each node to each node in the system), and should also prefer nodes
1519 * with no CPUs, since presumably they'll have very little allocation pressure
1520 * on them otherwise.
1521 * It returns -1 if no node is found.
1522 */
1523static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1524{
1525	int i, n, val;
1526	int min_val = INT_MAX;
1527	int best_node = -1;
1528
1529	for_each_online_node(i) {
1530		cpumask_t tmp;
1531
1532		/* Start from local node */
1533		n = (node+i) % num_online_nodes();
1534
1535		/* Don't want a node to appear more than once */
1536		if (node_isset(n, *used_node_mask))
1537			continue;
1538
1539		/* Use the local node if we haven't already */
1540		if (!node_isset(node, *used_node_mask)) {
1541			best_node = node;
1542			break;
1543		}
1544
1545		/* Use the distance array to find the distance */
1546		val = node_distance(node, n);
1547
1548		/* Give preference to headless and unused nodes */
1549		tmp = node_to_cpumask(n);
1550		if (!cpus_empty(tmp))
1551			val += PENALTY_FOR_NODE_WITH_CPUS;
1552
1553		/* Slight preference for less loaded node */
1554		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1555		val += node_load[n];
1556
1557		if (val < min_val) {
1558			min_val = val;
1559			best_node = n;
1560		}
1561	}
1562
1563	if (best_node >= 0)
1564		node_set(best_node, *used_node_mask);
1565
1566	return best_node;
1567}
1568
1569static void __init build_zonelists(pg_data_t *pgdat)
1570{
1571	int i, j, k, node, local_node;
1572	int prev_node, load;
1573	struct zonelist *zonelist;
1574	nodemask_t used_mask;
1575
1576	/* initialize zonelists */
1577	for (i = 0; i < GFP_ZONETYPES; i++) {
1578		zonelist = pgdat->node_zonelists + i;
1579		zonelist->zones[0] = NULL;
1580	}
1581
1582	/* NUMA-aware ordering of nodes */
1583	local_node = pgdat->node_id;
1584	load = num_online_nodes();
1585	prev_node = local_node;
1586	nodes_clear(used_mask);
1587	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
1588		/*
1589		 * We don't want to pressure a particular node.
1590		 * So adding penalty to the first node in same
1591		 * distance group to make it round-robin.
1592		 */
1593		if (node_distance(local_node, node) !=
1594				node_distance(local_node, prev_node))
1595			node_load[node] += load;
1596		prev_node = node;
1597		load--;
1598		for (i = 0; i < GFP_ZONETYPES; i++) {
1599			zonelist = pgdat->node_zonelists + i;
1600			for (j = 0; zonelist->zones[j] != NULL; j++);
1601
1602			k = highest_zone(i);
1603
1604	 		j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1605			zonelist->zones[j] = NULL;
1606		}
1607	}
1608}
1609
1610#else	/* CONFIG_NUMA */
1611
1612static void __init build_zonelists(pg_data_t *pgdat)
1613{
1614	int i, j, k, node, local_node;
1615
1616	local_node = pgdat->node_id;
1617	for (i = 0; i < GFP_ZONETYPES; i++) {
1618		struct zonelist *zonelist;
1619
1620		zonelist = pgdat->node_zonelists + i;
1621
1622		j = 0;
1623		k = highest_zone(i);
1624 		j = build_zonelists_node(pgdat, zonelist, j, k);
1625 		/*
1626 		 * Now we build the zonelist so that it contains the zones
1627 		 * of all the other nodes.
1628 		 * We don't want to pressure a particular node, so when
1629 		 * building the zones for node N, we make sure that the
1630 		 * zones coming right after the local ones are those from
1631 		 * node N+1 (modulo N)
1632 		 */
1633		for (node = local_node + 1; node < MAX_NUMNODES; node++) {
1634			if (!node_online(node))
1635				continue;
1636			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1637		}
1638		for (node = 0; node < local_node; node++) {
1639			if (!node_online(node))
1640				continue;
1641			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1642		}
1643
1644		zonelist->zones[j] = NULL;
1645	}
1646}
1647
1648#endif	/* CONFIG_NUMA */
1649
1650void __init build_all_zonelists(void)
1651{
1652	int i;
1653
1654	for_each_online_node(i)
1655		build_zonelists(NODE_DATA(i));
1656	printk("Built %i zonelists\n", num_online_nodes());
1657	cpuset_init_current_mems_allowed();
1658}
1659
1660/*
1661 * Helper functions to size the waitqueue hash table.
1662 * Essentially these want to choose hash table sizes sufficiently
1663 * large so that collisions trying to wait on pages are rare.
1664 * But in fact, the number of active page waitqueues on typical
1665 * systems is ridiculously low, less than 200. So this is even
1666 * conservative, even though it seems large.
1667 *
1668 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
1669 * waitqueues, i.e. the size of the waitq table given the number of pages.
1670 */
1671#define PAGES_PER_WAITQUEUE	256
1672
1673static inline unsigned long wait_table_size(unsigned long pages)
1674{
1675	unsigned long size = 1;
1676
1677	pages /= PAGES_PER_WAITQUEUE;
1678
1679	while (size < pages)
1680		size <<= 1;
1681
1682	/*
1683	 * Once we have dozens or even hundreds of threads sleeping
1684	 * on IO we've got bigger problems than wait queue collision.
1685	 * Limit the size of the wait table to a reasonable size.
1686	 */
1687	size = min(size, 4096UL);
1688
1689	return max(size, 4UL);
1690}
1691
1692/*
1693 * This is an integer logarithm so that shifts can be used later
1694 * to extract the more random high bits from the multiplicative
1695 * hash function before the remainder is taken.
1696 */
1697static inline unsigned long wait_table_bits(unsigned long size)
1698{
1699	return ffz(~size);
1700}
1701
1702#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1703
1704static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
1705		unsigned long *zones_size, unsigned long *zholes_size)
1706{
1707	unsigned long realtotalpages, totalpages = 0;
1708	int i;
1709
1710	for (i = 0; i < MAX_NR_ZONES; i++)
1711		totalpages += zones_size[i];
1712	pgdat->node_spanned_pages = totalpages;
1713
1714	realtotalpages = totalpages;
1715	if (zholes_size)
1716		for (i = 0; i < MAX_NR_ZONES; i++)
1717			realtotalpages -= zholes_size[i];
1718	pgdat->node_present_pages = realtotalpages;
1719	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1720}
1721
1722
1723/*
1724 * Initially all pages are reserved - free ones are freed
1725 * up by free_all_bootmem() once the early boot process is
1726 * done. Non-atomic initialization, single-pass.
1727 */
1728void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1729		unsigned long start_pfn)
1730{
1731	struct page *page;
1732	unsigned long end_pfn = start_pfn + size;
1733	unsigned long pfn;
1734
1735	for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
1736		if (!early_pfn_valid(pfn))
1737			continue;
1738		page = pfn_to_page(pfn);
1739		set_page_links(page, zone, nid, pfn);
1740		set_page_count(page, 1);
1741		reset_page_mapcount(page);
1742		SetPageReserved(page);
1743		INIT_LIST_HEAD(&page->lru);
1744#ifdef WANT_PAGE_VIRTUAL
1745		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1746		if (!is_highmem_idx(zone))
1747			set_page_address(page, __va(pfn << PAGE_SHIFT));
1748#endif
1749	}
1750}
1751
1752void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1753				unsigned long size)
1754{
1755	int order;
1756	for (order = 0; order < MAX_ORDER ; order++) {
1757		INIT_LIST_HEAD(&zone->free_area[order].free_list);
1758		zone->free_area[order].nr_free = 0;
1759	}
1760}
1761
1762#define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr)
1763void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
1764		unsigned long size)
1765{
1766	unsigned long snum = pfn_to_section_nr(pfn);
1767	unsigned long end = pfn_to_section_nr(pfn + size);
1768
1769	if (FLAGS_HAS_NODE)
1770		zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
1771	else
1772		for (; snum <= end; snum++)
1773			zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
1774}
1775
1776#ifndef __HAVE_ARCH_MEMMAP_INIT
1777#define memmap_init(size, nid, zone, start_pfn) \
1778	memmap_init_zone((size), (nid), (zone), (start_pfn))
1779#endif
1780
1781static int __devinit zone_batchsize(struct zone *zone)
1782{
1783	int batch;
1784
1785	/*
1786	 * The per-cpu-pages pools are set to around 1000th of the
1787	 * size of the zone.  But no more than 1/2 of a meg.
1788	 *
1789	 * OK, so we don't know how big the cache is.  So guess.
1790	 */
1791	batch = zone->present_pages / 1024;
1792	if (batch * PAGE_SIZE > 512 * 1024)
1793		batch = (512 * 1024) / PAGE_SIZE;
1794	batch /= 4;		/* We effectively *= 4 below */
1795	if (batch < 1)
1796		batch = 1;
1797
1798	/*
1799	 * Clamp the batch to a 2^n - 1 value. Having a power
1800	 * of 2 value was found to be more likely to have
1801	 * suboptimal cache aliasing properties in some cases.
1802	 *
1803	 * For example if 2 tasks are alternately allocating
1804	 * batches of pages, one task can end up with a lot
1805	 * of pages of one half of the possible page colors
1806	 * and the other with pages of the other colors.
1807	 */
1808	batch = (1 << (fls(batch + batch/2)-1)) - 1;
1809
1810	return batch;
1811}
1812
1813inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
1814{
1815	struct per_cpu_pages *pcp;
1816
1817	memset(p, 0, sizeof(*p));
1818
1819	pcp = &p->pcp[0];		/* hot */
1820	pcp->count = 0;
1821	pcp->high = 6 * batch;
1822	pcp->batch = max(1UL, 1 * batch);
1823	INIT_LIST_HEAD(&pcp->list);
1824
1825	pcp = &p->pcp[1];		/* cold*/
1826	pcp->count = 0;
1827	pcp->high = 2 * batch;
1828	pcp->batch = max(1UL, batch/2);
1829	INIT_LIST_HEAD(&pcp->list);
1830}
1831
1832#ifdef CONFIG_NUMA
1833/*
1834 * Boot pageset table. One per cpu which is going to be used for all
1835 * zones and all nodes. The parameters will be set in such a way
1836 * that an item put on a list will immediately be handed over to
1837 * the buddy list. This is safe since pageset manipulation is done
1838 * with interrupts disabled.
1839 *
1840 * Some NUMA counter updates may also be caught by the boot pagesets.
1841 *
1842 * The boot_pagesets must be kept even after bootup is complete for
1843 * unused processors and/or zones. They do play a role for bootstrapping
1844 * hotplugged processors.
1845 *
1846 * zoneinfo_show() and maybe other functions do
1847 * not check if the processor is online before following the pageset pointer.
1848 * Other parts of the kernel may not check if the zone is available.
1849 */
1850static struct per_cpu_pageset
1851	boot_pageset[NR_CPUS];
1852
1853/*
1854 * Dynamically allocate memory for the
1855 * per cpu pageset array in struct zone.
1856 */
1857static int __devinit process_zones(int cpu)
1858{
1859	struct zone *zone, *dzone;
1860
1861	for_each_zone(zone) {
1862
1863		zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset),
1864					 GFP_KERNEL, cpu_to_node(cpu));
1865		if (!zone->pageset[cpu])
1866			goto bad;
1867
1868		setup_pageset(zone->pageset[cpu], zone_batchsize(zone));
1869	}
1870
1871	return 0;
1872bad:
1873	for_each_zone(dzone) {
1874		if (dzone == zone)
1875			break;
1876		kfree(dzone->pageset[cpu]);
1877		dzone->pageset[cpu] = NULL;
1878	}
1879	return -ENOMEM;
1880}
1881
1882static inline void free_zone_pagesets(int cpu)
1883{
1884#ifdef CONFIG_NUMA
1885	struct zone *zone;
1886
1887	for_each_zone(zone) {
1888		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
1889
1890		zone_pcp(zone, cpu) = NULL;
1891		kfree(pset);
1892	}
1893#endif
1894}
1895
1896static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
1897		unsigned long action,
1898		void *hcpu)
1899{
1900	int cpu = (long)hcpu;
1901	int ret = NOTIFY_OK;
1902
1903	switch (action) {
1904		case CPU_UP_PREPARE:
1905			if (process_zones(cpu))
1906				ret = NOTIFY_BAD;
1907			break;
1908		case CPU_UP_CANCELED:
1909		case CPU_DEAD:
1910			free_zone_pagesets(cpu);
1911			break;
1912		default:
1913			break;
1914	}
1915	return ret;
1916}
1917
1918static struct notifier_block pageset_notifier =
1919	{ &pageset_cpuup_callback, NULL, 0 };
1920
1921void __init setup_per_cpu_pageset(void)
1922{
1923	int err;
1924
1925	/* Initialize per_cpu_pageset for cpu 0.
1926	 * A cpuup callback will do this for every cpu
1927	 * as it comes online
1928	 */
1929	err = process_zones(smp_processor_id());
1930	BUG_ON(err);
1931	register_cpu_notifier(&pageset_notifier);
1932}
1933
1934#endif
1935
1936static __devinit
1937void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
1938{
1939	int i;
1940	struct pglist_data *pgdat = zone->zone_pgdat;
1941
1942	/*
1943	 * The per-page waitqueue mechanism uses hashed waitqueues
1944	 * per zone.
1945	 */
1946	zone->wait_table_size = wait_table_size(zone_size_pages);
1947	zone->wait_table_bits =	wait_table_bits(zone->wait_table_size);
1948	zone->wait_table = (wait_queue_head_t *)
1949		alloc_bootmem_node(pgdat, zone->wait_table_size
1950					* sizeof(wait_queue_head_t));
1951
1952	for(i = 0; i < zone->wait_table_size; ++i)
1953		init_waitqueue_head(zone->wait_table + i);
1954}
1955
1956static __devinit void zone_pcp_init(struct zone *zone)
1957{
1958	int cpu;
1959	unsigned long batch = zone_batchsize(zone);
1960
1961	for (cpu = 0; cpu < NR_CPUS; cpu++) {
1962#ifdef CONFIG_NUMA
1963		/* Early boot. Slab allocator not functional yet */
1964		zone->pageset[cpu] = &boot_pageset[cpu];
1965		setup_pageset(&boot_pageset[cpu],0);
1966#else
1967		setup_pageset(zone_pcp(zone,cpu), batch);
1968#endif
1969	}
1970	printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
1971		zone->name, zone->present_pages, batch);
1972}
1973
1974static __devinit void init_currently_empty_zone(struct zone *zone,
1975		unsigned long zone_start_pfn, unsigned long size)
1976{
1977	struct pglist_data *pgdat = zone->zone_pgdat;
1978
1979	zone_wait_table_init(zone, size);
1980	pgdat->nr_zones = zone_idx(zone) + 1;
1981
1982	zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1983	zone->zone_start_pfn = zone_start_pfn;
1984
1985	memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
1986
1987	zone_init_free_lists(pgdat, zone, zone->spanned_pages);
1988}
1989
1990/*
1991 * Set up the zone data structures:
1992 *   - mark all pages reserved
1993 *   - mark all memory queues empty
1994 *   - clear the memory bitmaps
1995 */
1996static void __init free_area_init_core(struct pglist_data *pgdat,
1997		unsigned long *zones_size, unsigned long *zholes_size)
1998{
1999	unsigned long j;
2000	int nid = pgdat->node_id;
2001	unsigned long zone_start_pfn = pgdat->node_start_pfn;
2002
2003	pgdat_resize_init(pgdat);
2004	pgdat->nr_zones = 0;
2005	init_waitqueue_head(&pgdat->kswapd_wait);
2006	pgdat->kswapd_max_order = 0;
2007
2008	for (j = 0; j < MAX_NR_ZONES; j++) {
2009		struct zone *zone = pgdat->node_zones + j;
2010		unsigned long size, realsize;
2011
2012		realsize = size = zones_size[j];
2013		if (zholes_size)
2014			realsize -= zholes_size[j];
2015
2016		if (j < ZONE_HIGHMEM)
2017			nr_kernel_pages += realsize;
2018		nr_all_pages += realsize;
2019
2020		zone->spanned_pages = size;
2021		zone->present_pages = realsize;
2022		zone->name = zone_names[j];
2023		spin_lock_init(&zone->lock);
2024		spin_lock_init(&zone->lru_lock);
2025		zone_seqlock_init(zone);
2026		zone->zone_pgdat = pgdat;
2027		zone->free_pages = 0;
2028
2029		zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
2030
2031		zone_pcp_init(zone);
2032		INIT_LIST_HEAD(&zone->active_list);
2033		INIT_LIST_HEAD(&zone->inactive_list);
2034		zone->nr_scan_active = 0;
2035		zone->nr_scan_inactive = 0;
2036		zone->nr_active = 0;
2037		zone->nr_inactive = 0;
2038		atomic_set(&zone->reclaim_in_progress, 0);
2039		if (!size)
2040			continue;
2041
2042		zonetable_add(zone, nid, j, zone_start_pfn, size);
2043		init_currently_empty_zone(zone, zone_start_pfn, size);
2044		zone_start_pfn += size;
2045	}
2046}
2047
2048static void __init alloc_node_mem_map(struct pglist_data *pgdat)
2049{
2050	/* Skip empty nodes */
2051	if (!pgdat->node_spanned_pages)
2052		return;
2053
2054#ifdef CONFIG_FLAT_NODE_MEM_MAP
2055	/* ia64 gets its own node_mem_map, before this, without bootmem */
2056	if (!pgdat->node_mem_map) {
2057		unsigned long size;
2058		struct page *map;
2059
2060		size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
2061		map = alloc_remap(pgdat->node_id, size);
2062		if (!map)
2063			map = alloc_bootmem_node(pgdat, size);
2064		pgdat->node_mem_map = map;
2065	}
2066#ifdef CONFIG_FLATMEM
2067	/*
2068	 * With no DISCONTIG, the global mem_map is just set as node 0's
2069	 */
2070	if (pgdat == NODE_DATA(0))
2071		mem_map = NODE_DATA(0)->node_mem_map;
2072#endif
2073#endif /* CONFIG_FLAT_NODE_MEM_MAP */
2074}
2075
2076void __init free_area_init_node(int nid, struct pglist_data *pgdat,
2077		unsigned long *zones_size, unsigned long node_start_pfn,
2078		unsigned long *zholes_size)
2079{
2080	pgdat->node_id = nid;
2081	pgdat->node_start_pfn = node_start_pfn;
2082	calculate_zone_totalpages(pgdat, zones_size, zholes_size);
2083
2084	alloc_node_mem_map(pgdat);
2085
2086	free_area_init_core(pgdat, zones_size, zholes_size);
2087}
2088
2089#ifndef CONFIG_NEED_MULTIPLE_NODES
2090static bootmem_data_t contig_bootmem_data;
2091struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
2092
2093EXPORT_SYMBOL(contig_page_data);
2094#endif
2095
2096void __init free_area_init(unsigned long *zones_size)
2097{
2098	free_area_init_node(0, NODE_DATA(0), zones_size,
2099			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
2100}
2101
2102#ifdef CONFIG_PROC_FS
2103
2104#include <linux/seq_file.h>
2105
2106static void *frag_start(struct seq_file *m, loff_t *pos)
2107{
2108	pg_data_t *pgdat;
2109	loff_t node = *pos;
2110
2111	for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
2112		--node;
2113
2114	return pgdat;
2115}
2116
2117static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
2118{
2119	pg_data_t *pgdat = (pg_data_t *)arg;
2120
2121	(*pos)++;
2122	return pgdat->pgdat_next;
2123}
2124
2125static void frag_stop(struct seq_file *m, void *arg)
2126{
2127}
2128
2129/*
2130 * This walks the free areas for each zone.
2131 */
2132static int frag_show(struct seq_file *m, void *arg)
2133{
2134	pg_data_t *pgdat = (pg_data_t *)arg;
2135	struct zone *zone;
2136	struct zone *node_zones = pgdat->node_zones;
2137	unsigned long flags;
2138	int order;
2139
2140	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2141		if (!zone->present_pages)
2142			continue;
2143
2144		spin_lock_irqsave(&zone->lock, flags);
2145		seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
2146		for (order = 0; order < MAX_ORDER; ++order)
2147			seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
2148		spin_unlock_irqrestore(&zone->lock, flags);
2149		seq_putc(m, '\n');
2150	}
2151	return 0;
2152}
2153
2154struct seq_operations fragmentation_op = {
2155	.start	= frag_start,
2156	.next	= frag_next,
2157	.stop	= frag_stop,
2158	.show	= frag_show,
2159};
2160
2161/*
2162 * Output information about zones in @pgdat.
2163 */
2164static int zoneinfo_show(struct seq_file *m, void *arg)
2165{
2166	pg_data_t *pgdat = arg;
2167	struct zone *zone;
2168	struct zone *node_zones = pgdat->node_zones;
2169	unsigned long flags;
2170
2171	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
2172		int i;
2173
2174		if (!zone->present_pages)
2175			continue;
2176
2177		spin_lock_irqsave(&zone->lock, flags);
2178		seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
2179		seq_printf(m,
2180			   "\n  pages free     %lu"
2181			   "\n        min      %lu"
2182			   "\n        low      %lu"
2183			   "\n        high     %lu"
2184			   "\n        active   %lu"
2185			   "\n        inactive %lu"
2186			   "\n        scanned  %lu (a: %lu i: %lu)"
2187			   "\n        spanned  %lu"
2188			   "\n        present  %lu",
2189			   zone->free_pages,
2190			   zone->pages_min,
2191			   zone->pages_low,
2192			   zone->pages_high,
2193			   zone->nr_active,
2194			   zone->nr_inactive,
2195			   zone->pages_scanned,
2196			   zone->nr_scan_active, zone->nr_scan_inactive,
2197			   zone->spanned_pages,
2198			   zone->present_pages);
2199		seq_printf(m,
2200			   "\n        protection: (%lu",
2201			   zone->lowmem_reserve[0]);
2202		for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
2203			seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
2204		seq_printf(m,
2205			   ")"
2206			   "\n  pagesets");
2207		for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) {
2208			struct per_cpu_pageset *pageset;
2209			int j;
2210
2211			pageset = zone_pcp(zone, i);
2212			for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
2213				if (pageset->pcp[j].count)
2214					break;
2215			}
2216			if (j == ARRAY_SIZE(pageset->pcp))
2217				continue;
2218			for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
2219				seq_printf(m,
2220					   "\n    cpu: %i pcp: %i"
2221					   "\n              count: %i"
2222					   "\n              high:  %i"
2223					   "\n              batch: %i",
2224					   i, j,
2225					   pageset->pcp[j].count,
2226					   pageset->pcp[j].high,
2227					   pageset->pcp[j].batch);
2228			}
2229#ifdef CONFIG_NUMA
2230			seq_printf(m,
2231				   "\n            numa_hit:       %lu"
2232				   "\n            numa_miss:      %lu"
2233				   "\n            numa_foreign:   %lu"
2234				   "\n            interleave_hit: %lu"
2235				   "\n            local_node:     %lu"
2236				   "\n            other_node:     %lu",
2237				   pageset->numa_hit,
2238				   pageset->numa_miss,
2239				   pageset->numa_foreign,
2240				   pageset->interleave_hit,
2241				   pageset->local_node,
2242				   pageset->other_node);
2243#endif
2244		}
2245		seq_printf(m,
2246			   "\n  all_unreclaimable: %u"
2247			   "\n  prev_priority:     %i"
2248			   "\n  temp_priority:     %i"
2249			   "\n  start_pfn:         %lu",
2250			   zone->all_unreclaimable,
2251			   zone->prev_priority,
2252			   zone->temp_priority,
2253			   zone->zone_start_pfn);
2254		spin_unlock_irqrestore(&zone->lock, flags);
2255		seq_putc(m, '\n');
2256	}
2257	return 0;
2258}
2259
2260struct seq_operations zoneinfo_op = {
2261	.start	= frag_start, /* iterate over all zones. The same as in
2262			       * fragmentation. */
2263	.next	= frag_next,
2264	.stop	= frag_stop,
2265	.show	= zoneinfo_show,
2266};
2267
2268static char *vmstat_text[] = {
2269	"nr_dirty",
2270	"nr_writeback",
2271	"nr_unstable",
2272	"nr_page_table_pages",
2273	"nr_mapped",
2274	"nr_slab",
2275
2276	"pgpgin",
2277	"pgpgout",
2278	"pswpin",
2279	"pswpout",
2280	"pgalloc_high",
2281
2282	"pgalloc_normal",
2283	"pgalloc_dma",
2284	"pgfree",
2285	"pgactivate",
2286	"pgdeactivate",
2287
2288	"pgfault",
2289	"pgmajfault",
2290	"pgrefill_high",
2291	"pgrefill_normal",
2292	"pgrefill_dma",
2293
2294	"pgsteal_high",
2295	"pgsteal_normal",
2296	"pgsteal_dma",
2297	"pgscan_kswapd_high",
2298	"pgscan_kswapd_normal",
2299
2300	"pgscan_kswapd_dma",
2301	"pgscan_direct_high",
2302	"pgscan_direct_normal",
2303	"pgscan_direct_dma",
2304	"pginodesteal",
2305
2306	"slabs_scanned",
2307	"kswapd_steal",
2308	"kswapd_inodesteal",
2309	"pageoutrun",
2310	"allocstall",
2311
2312	"pgrotated",
2313	"nr_bounce",
2314};
2315
2316static void *vmstat_start(struct seq_file *m, loff_t *pos)
2317{
2318	struct page_state *ps;
2319
2320	if (*pos >= ARRAY_SIZE(vmstat_text))
2321		return NULL;
2322
2323	ps = kmalloc(sizeof(*ps), GFP_KERNEL);
2324	m->private = ps;
2325	if (!ps)
2326		return ERR_PTR(-ENOMEM);
2327	get_full_page_state(ps);
2328	ps->pgpgin /= 2;		/* sectors -> kbytes */
2329	ps->pgpgout /= 2;
2330	return (unsigned long *)ps + *pos;
2331}
2332
2333static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
2334{
2335	(*pos)++;
2336	if (*pos >= ARRAY_SIZE(vmstat_text))
2337		return NULL;
2338	return (unsigned long *)m->private + *pos;
2339}
2340
2341static int vmstat_show(struct seq_file *m, void *arg)
2342{
2343	unsigned long *l = arg;
2344	unsigned long off = l - (unsigned long *)m->private;
2345
2346	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
2347	return 0;
2348}
2349
2350static void vmstat_stop(struct seq_file *m, void *arg)
2351{
2352	kfree(m->private);
2353	m->private = NULL;
2354}
2355
2356struct seq_operations vmstat_op = {
2357	.start	= vmstat_start,
2358	.next	= vmstat_next,
2359	.stop	= vmstat_stop,
2360	.show	= vmstat_show,
2361};
2362
2363#endif /* CONFIG_PROC_FS */
2364
2365#ifdef CONFIG_HOTPLUG_CPU
2366static int page_alloc_cpu_notify(struct notifier_block *self,
2367				 unsigned long action, void *hcpu)
2368{
2369	int cpu = (unsigned long)hcpu;
2370	long *count;
2371	unsigned long *src, *dest;
2372
2373	if (action == CPU_DEAD) {
2374		int i;
2375
2376		/* Drain local pagecache count. */
2377		count = &per_cpu(nr_pagecache_local, cpu);
2378		atomic_add(*count, &nr_pagecache);
2379		*count = 0;
2380		local_irq_disable();
2381		__drain_pages(cpu);
2382
2383		/* Add dead cpu's page_states to our own. */
2384		dest = (unsigned long *)&__get_cpu_var(page_states);
2385		src = (unsigned long *)&per_cpu(page_states, cpu);
2386
2387		for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
2388				i++) {
2389			dest[i] += src[i];
2390			src[i] = 0;
2391		}
2392
2393		local_irq_enable();
2394	}
2395	return NOTIFY_OK;
2396}
2397#endif /* CONFIG_HOTPLUG_CPU */
2398
2399void __init page_alloc_init(void)
2400{
2401	hotcpu_notifier(page_alloc_cpu_notify, 0);
2402}
2403
2404/*
2405 * setup_per_zone_lowmem_reserve - called whenever
2406 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
2407 *	has a correct pages reserved value, so an adequate number of
2408 *	pages are left in the zone after a successful __alloc_pages().
2409 */
2410static void setup_per_zone_lowmem_reserve(void)
2411{
2412	struct pglist_data *pgdat;
2413	int j, idx;
2414
2415	for_each_pgdat(pgdat) {
2416		for (j = 0; j < MAX_NR_ZONES; j++) {
2417			struct zone *zone = pgdat->node_zones + j;
2418			unsigned long present_pages = zone->present_pages;
2419
2420			zone->lowmem_reserve[j] = 0;
2421
2422			for (idx = j-1; idx >= 0; idx--) {
2423				struct zone *lower_zone;
2424
2425				if (sysctl_lowmem_reserve_ratio[idx] < 1)
2426					sysctl_lowmem_reserve_ratio[idx] = 1;
2427
2428				lower_zone = pgdat->node_zones + idx;
2429				lower_zone->lowmem_reserve[j] = present_pages /
2430					sysctl_lowmem_reserve_ratio[idx];
2431				present_pages += lower_zone->present_pages;
2432			}
2433		}
2434	}
2435}
2436
2437/*
2438 * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures
2439 *	that the pages_{min,low,high} values for each zone are set correctly
2440 *	with respect to min_free_kbytes.
2441 */
2442void setup_per_zone_pages_min(void)
2443{
2444	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
2445	unsigned long lowmem_pages = 0;
2446	struct zone *zone;
2447	unsigned long flags;
2448
2449	/* Calculate total number of !ZONE_HIGHMEM pages */
2450	for_each_zone(zone) {
2451		if (!is_highmem(zone))
2452			lowmem_pages += zone->present_pages;
2453	}
2454
2455	for_each_zone(zone) {
2456		unsigned long tmp;
2457		spin_lock_irqsave(&zone->lru_lock, flags);
2458		tmp = (pages_min * zone->present_pages) / lowmem_pages;
2459		if (is_highmem(zone)) {
2460			/*
2461			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
2462			 * need highmem pages, so cap pages_min to a small
2463			 * value here.
2464			 *
2465			 * The (pages_high-pages_low) and (pages_low-pages_min)
2466			 * deltas controls asynch page reclaim, and so should
2467			 * not be capped for highmem.
2468			 */
2469			int min_pages;
2470
2471			min_pages = zone->present_pages / 1024;
2472			if (min_pages < SWAP_CLUSTER_MAX)
2473				min_pages = SWAP_CLUSTER_MAX;
2474			if (min_pages > 128)
2475				min_pages = 128;
2476			zone->pages_min = min_pages;
2477		} else {
2478			/*
2479			 * If it's a lowmem zone, reserve a number of pages
2480			 * proportionate to the zone's size.
2481			 */
2482			zone->pages_min = tmp;
2483		}
2484
2485		zone->pages_low   = zone->pages_min + tmp / 4;
2486		zone->pages_high  = zone->pages_min + tmp / 2;
2487		spin_unlock_irqrestore(&zone->lru_lock, flags);
2488	}
2489}
2490
2491/*
2492 * Initialise min_free_kbytes.
2493 *
2494 * For small machines we want it small (128k min).  For large machines
2495 * we want it large (64MB max).  But it is not linear, because network
2496 * bandwidth does not increase linearly with machine size.  We use
2497 *
2498 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
2499 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
2500 *
2501 * which yields
2502 *
2503 * 16MB:	512k
2504 * 32MB:	724k
2505 * 64MB:	1024k
2506 * 128MB:	1448k
2507 * 256MB:	2048k
2508 * 512MB:	2896k
2509 * 1024MB:	4096k
2510 * 2048MB:	5792k
2511 * 4096MB:	8192k
2512 * 8192MB:	11584k
2513 * 16384MB:	16384k
2514 */
2515static int __init init_per_zone_pages_min(void)
2516{
2517	unsigned long lowmem_kbytes;
2518
2519	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
2520
2521	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
2522	if (min_free_kbytes < 128)
2523		min_free_kbytes = 128;
2524	if (min_free_kbytes > 65536)
2525		min_free_kbytes = 65536;
2526	setup_per_zone_pages_min();
2527	setup_per_zone_lowmem_reserve();
2528	return 0;
2529}
2530module_init(init_per_zone_pages_min)
2531
2532/*
2533 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
2534 *	that we can call two helper functions whenever min_free_kbytes
2535 *	changes.
2536 */
2537int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
2538	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2539{
2540	proc_dointvec(table, write, file, buffer, length, ppos);
2541	setup_per_zone_pages_min();
2542	return 0;
2543}
2544
2545/*
2546 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
2547 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
2548 *	whenever sysctl_lowmem_reserve_ratio changes.
2549 *
2550 * The reserve ratio obviously has absolutely no relation with the
2551 * pages_min watermarks. The lowmem reserve ratio can only make sense
2552 * if in function of the boot time zone sizes.
2553 */
2554int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
2555	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2556{
2557	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2558	setup_per_zone_lowmem_reserve();
2559	return 0;
2560}
2561
2562__initdata int hashdist = HASHDIST_DEFAULT;
2563
2564#ifdef CONFIG_NUMA
2565static int __init set_hashdist(char *str)
2566{
2567	if (!str)
2568		return 0;
2569	hashdist = simple_strtoul(str, &str, 0);
2570	return 1;
2571}
2572__setup("hashdist=", set_hashdist);
2573#endif
2574
2575/*
2576 * allocate a large system hash table from bootmem
2577 * - it is assumed that the hash table must contain an exact power-of-2
2578 *   quantity of entries
2579 * - limit is the number of hash buckets, not the total allocation size
2580 */
2581void *__init alloc_large_system_hash(const char *tablename,
2582				     unsigned long bucketsize,
2583				     unsigned long numentries,
2584				     int scale,
2585				     int flags,
2586				     unsigned int *_hash_shift,
2587				     unsigned int *_hash_mask,
2588				     unsigned long limit)
2589{
2590	unsigned long long max = limit;
2591	unsigned long log2qty, size;
2592	void *table = NULL;
2593
2594	/* allow the kernel cmdline to have a say */
2595	if (!numentries) {
2596		/* round applicable memory size up to nearest megabyte */
2597		numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
2598		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
2599		numentries >>= 20 - PAGE_SHIFT;
2600		numentries <<= 20 - PAGE_SHIFT;
2601
2602		/* limit to 1 bucket per 2^scale bytes of low memory */
2603		if (scale > PAGE_SHIFT)
2604			numentries >>= (scale - PAGE_SHIFT);
2605		else
2606			numentries <<= (PAGE_SHIFT - scale);
2607	}
2608	/* rounded up to nearest power of 2 in size */
2609	numentries = 1UL << (long_log2(numentries) + 1);
2610
2611	/* limit allocation size to 1/16 total memory by default */
2612	if (max == 0) {
2613		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2614		do_div(max, bucketsize);
2615	}
2616
2617	if (numentries > max)
2618		numentries = max;
2619
2620	log2qty = long_log2(numentries);
2621
2622	do {
2623		size = bucketsize << log2qty;
2624		if (flags & HASH_EARLY)
2625			table = alloc_bootmem(size);
2626		else if (hashdist)
2627			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
2628		else {
2629			unsigned long order;
2630			for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
2631				;
2632			table = (void*) __get_free_pages(GFP_ATOMIC, order);
2633		}
2634	} while (!table && size > PAGE_SIZE && --log2qty);
2635
2636	if (!table)
2637		panic("Failed to allocate %s hash table\n", tablename);
2638
2639	printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
2640	       tablename,
2641	       (1U << log2qty),
2642	       long_log2(size) - PAGE_SHIFT,
2643	       size);
2644
2645	if (_hash_shift)
2646		*_hash_shift = log2qty;
2647	if (_hash_mask)
2648		*_hash_mask = (1 << log2qty) - 1;
2649
2650	return table;
2651}
2652