page_alloc.c revision 8d22ba1b74aa9420b6032d856446564fb21f8090
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/kmemcheck.h>
27#include <linux/module.h>
28#include <linux/suspend.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h>
32#include <linux/oom.h>
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/sysctl.h>
36#include <linux/cpu.h>
37#include <linux/cpuset.h>
38#include <linux/memory_hotplug.h>
39#include <linux/nodemask.h>
40#include <linux/vmalloc.h>
41#include <linux/mempolicy.h>
42#include <linux/stop_machine.h>
43#include <linux/sort.h>
44#include <linux/pfn.h>
45#include <linux/backing-dev.h>
46#include <linux/fault-inject.h>
47#include <linux/page-isolation.h>
48#include <linux/page_cgroup.h>
49#include <linux/debugobjects.h>
50#include <linux/kmemleak.h>
51#include <trace/events/kmem.h>
52
53#include <asm/tlbflush.h>
54#include <asm/div64.h>
55#include "internal.h"
56
57/*
58 * Array of node states.
59 */
60nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
61	[N_POSSIBLE] = NODE_MASK_ALL,
62	[N_ONLINE] = { { [0] = 1UL } },
63#ifndef CONFIG_NUMA
64	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
65#ifdef CONFIG_HIGHMEM
66	[N_HIGH_MEMORY] = { { [0] = 1UL } },
67#endif
68	[N_CPU] = { { [0] = 1UL } },
69#endif	/* NUMA */
70};
71EXPORT_SYMBOL(node_states);
72
73unsigned long totalram_pages __read_mostly;
74unsigned long totalreserve_pages __read_mostly;
75int percpu_pagelist_fraction;
76gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
77
78#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
79int pageblock_order __read_mostly;
80#endif
81
82static void __free_pages_ok(struct page *page, unsigned int order);
83
84/*
85 * results with 256, 32 in the lowmem_reserve sysctl:
86 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
87 *	1G machine -> (16M dma, 784M normal, 224M high)
88 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
89 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
90 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
91 *
92 * TBD: should special case ZONE_DMA32 machines here - in those we normally
93 * don't need any ZONE_NORMAL reservation
94 */
95int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
96#ifdef CONFIG_ZONE_DMA
97	 256,
98#endif
99#ifdef CONFIG_ZONE_DMA32
100	 256,
101#endif
102#ifdef CONFIG_HIGHMEM
103	 32,
104#endif
105	 32,
106};
107
108EXPORT_SYMBOL(totalram_pages);
109
110static char * const zone_names[MAX_NR_ZONES] = {
111#ifdef CONFIG_ZONE_DMA
112	 "DMA",
113#endif
114#ifdef CONFIG_ZONE_DMA32
115	 "DMA32",
116#endif
117	 "Normal",
118#ifdef CONFIG_HIGHMEM
119	 "HighMem",
120#endif
121	 "Movable",
122};
123
124int min_free_kbytes = 1024;
125
126static unsigned long __meminitdata nr_kernel_pages;
127static unsigned long __meminitdata nr_all_pages;
128static unsigned long __meminitdata dma_reserve;
129
130#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
131  /*
132   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
133   * ranges of memory (RAM) that may be registered with add_active_range().
134   * Ranges passed to add_active_range() will be merged if possible
135   * so the number of times add_active_range() can be called is
136   * related to the number of nodes and the number of holes
137   */
138  #ifdef CONFIG_MAX_ACTIVE_REGIONS
139    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
140    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
141  #else
142    #if MAX_NUMNODES >= 32
143      /* If there can be many nodes, allow up to 50 holes per node */
144      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
145    #else
146      /* By default, allow up to 256 distinct regions */
147      #define MAX_ACTIVE_REGIONS 256
148    #endif
149  #endif
150
151  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
152  static int __meminitdata nr_nodemap_entries;
153  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
154  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
155  static unsigned long __initdata required_kernelcore;
156  static unsigned long __initdata required_movablecore;
157  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
158
159  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
160  int movable_zone;
161  EXPORT_SYMBOL(movable_zone);
162#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
163
164#if MAX_NUMNODES > 1
165int nr_node_ids __read_mostly = MAX_NUMNODES;
166int nr_online_nodes __read_mostly = 1;
167EXPORT_SYMBOL(nr_node_ids);
168EXPORT_SYMBOL(nr_online_nodes);
169#endif
170
171int page_group_by_mobility_disabled __read_mostly;
172
173static void set_pageblock_migratetype(struct page *page, int migratetype)
174{
175
176	if (unlikely(page_group_by_mobility_disabled))
177		migratetype = MIGRATE_UNMOVABLE;
178
179	set_pageblock_flags_group(page, (unsigned long)migratetype,
180					PB_migrate, PB_migrate_end);
181}
182
183bool oom_killer_disabled __read_mostly;
184
185#ifdef CONFIG_DEBUG_VM
186static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
187{
188	int ret = 0;
189	unsigned seq;
190	unsigned long pfn = page_to_pfn(page);
191
192	do {
193		seq = zone_span_seqbegin(zone);
194		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
195			ret = 1;
196		else if (pfn < zone->zone_start_pfn)
197			ret = 1;
198	} while (zone_span_seqretry(zone, seq));
199
200	return ret;
201}
202
203static int page_is_consistent(struct zone *zone, struct page *page)
204{
205	if (!pfn_valid_within(page_to_pfn(page)))
206		return 0;
207	if (zone != page_zone(page))
208		return 0;
209
210	return 1;
211}
212/*
213 * Temporary debugging check for pages not lying within a given zone.
214 */
215static int bad_range(struct zone *zone, struct page *page)
216{
217	if (page_outside_zone_boundaries(zone, page))
218		return 1;
219	if (!page_is_consistent(zone, page))
220		return 1;
221
222	return 0;
223}
224#else
225static inline int bad_range(struct zone *zone, struct page *page)
226{
227	return 0;
228}
229#endif
230
231static void bad_page(struct page *page)
232{
233	static unsigned long resume;
234	static unsigned long nr_shown;
235	static unsigned long nr_unshown;
236
237	/* Don't complain about poisoned pages */
238	if (PageHWPoison(page)) {
239		__ClearPageBuddy(page);
240		return;
241	}
242
243	/*
244	 * Allow a burst of 60 reports, then keep quiet for that minute;
245	 * or allow a steady drip of one report per second.
246	 */
247	if (nr_shown == 60) {
248		if (time_before(jiffies, resume)) {
249			nr_unshown++;
250			goto out;
251		}
252		if (nr_unshown) {
253			printk(KERN_ALERT
254			      "BUG: Bad page state: %lu messages suppressed\n",
255				nr_unshown);
256			nr_unshown = 0;
257		}
258		nr_shown = 0;
259	}
260	if (nr_shown++ == 0)
261		resume = jiffies + 60 * HZ;
262
263	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
264		current->comm, page_to_pfn(page));
265	printk(KERN_ALERT
266		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
267		page, (void *)page->flags, page_count(page),
268		page_mapcount(page), page->mapping, page->index);
269
270	dump_stack();
271out:
272	/* Leave bad fields for debug, except PageBuddy could make trouble */
273	__ClearPageBuddy(page);
274	add_taint(TAINT_BAD_PAGE);
275}
276
277/*
278 * Higher-order pages are called "compound pages".  They are structured thusly:
279 *
280 * The first PAGE_SIZE page is called the "head page".
281 *
282 * The remaining PAGE_SIZE pages are called "tail pages".
283 *
284 * All pages have PG_compound set.  All pages have their ->private pointing at
285 * the head page (even the head page has this).
286 *
287 * The first tail page's ->lru.next holds the address of the compound page's
288 * put_page() function.  Its ->lru.prev holds the order of allocation.
289 * This usage means that zero-order pages may not be compound.
290 */
291
292static void free_compound_page(struct page *page)
293{
294	__free_pages_ok(page, compound_order(page));
295}
296
297void prep_compound_page(struct page *page, unsigned long order)
298{
299	int i;
300	int nr_pages = 1 << order;
301
302	set_compound_page_dtor(page, free_compound_page);
303	set_compound_order(page, order);
304	__SetPageHead(page);
305	for (i = 1; i < nr_pages; i++) {
306		struct page *p = page + i;
307
308		__SetPageTail(p);
309		p->first_page = page;
310	}
311}
312
313static int destroy_compound_page(struct page *page, unsigned long order)
314{
315	int i;
316	int nr_pages = 1 << order;
317	int bad = 0;
318
319	if (unlikely(compound_order(page) != order) ||
320	    unlikely(!PageHead(page))) {
321		bad_page(page);
322		bad++;
323	}
324
325	__ClearPageHead(page);
326
327	for (i = 1; i < nr_pages; i++) {
328		struct page *p = page + i;
329
330		if (unlikely(!PageTail(p) || (p->first_page != page))) {
331			bad_page(page);
332			bad++;
333		}
334		__ClearPageTail(p);
335	}
336
337	return bad;
338}
339
340static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
341{
342	int i;
343
344	/*
345	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
346	 * and __GFP_HIGHMEM from hard or soft interrupt context.
347	 */
348	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
349	for (i = 0; i < (1 << order); i++)
350		clear_highpage(page + i);
351}
352
353static inline void set_page_order(struct page *page, int order)
354{
355	set_page_private(page, order);
356	__SetPageBuddy(page);
357}
358
359static inline void rmv_page_order(struct page *page)
360{
361	__ClearPageBuddy(page);
362	set_page_private(page, 0);
363}
364
365/*
366 * Locate the struct page for both the matching buddy in our
367 * pair (buddy1) and the combined O(n+1) page they form (page).
368 *
369 * 1) Any buddy B1 will have an order O twin B2 which satisfies
370 * the following equation:
371 *     B2 = B1 ^ (1 << O)
372 * For example, if the starting buddy (buddy2) is #8 its order
373 * 1 buddy is #10:
374 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
375 *
376 * 2) Any buddy B will have an order O+1 parent P which
377 * satisfies the following equation:
378 *     P = B & ~(1 << O)
379 *
380 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
381 */
382static inline struct page *
383__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
384{
385	unsigned long buddy_idx = page_idx ^ (1 << order);
386
387	return page + (buddy_idx - page_idx);
388}
389
390static inline unsigned long
391__find_combined_index(unsigned long page_idx, unsigned int order)
392{
393	return (page_idx & ~(1 << order));
394}
395
396/*
397 * This function checks whether a page is free && is the buddy
398 * we can do coalesce a page and its buddy if
399 * (a) the buddy is not in a hole &&
400 * (b) the buddy is in the buddy system &&
401 * (c) a page and its buddy have the same order &&
402 * (d) a page and its buddy are in the same zone.
403 *
404 * For recording whether a page is in the buddy system, we use PG_buddy.
405 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
406 *
407 * For recording page's order, we use page_private(page).
408 */
409static inline int page_is_buddy(struct page *page, struct page *buddy,
410								int order)
411{
412	if (!pfn_valid_within(page_to_pfn(buddy)))
413		return 0;
414
415	if (page_zone_id(page) != page_zone_id(buddy))
416		return 0;
417
418	if (PageBuddy(buddy) && page_order(buddy) == order) {
419		VM_BUG_ON(page_count(buddy) != 0);
420		return 1;
421	}
422	return 0;
423}
424
425/*
426 * Freeing function for a buddy system allocator.
427 *
428 * The concept of a buddy system is to maintain direct-mapped table
429 * (containing bit values) for memory blocks of various "orders".
430 * The bottom level table contains the map for the smallest allocatable
431 * units of memory (here, pages), and each level above it describes
432 * pairs of units from the levels below, hence, "buddies".
433 * At a high level, all that happens here is marking the table entry
434 * at the bottom level available, and propagating the changes upward
435 * as necessary, plus some accounting needed to play nicely with other
436 * parts of the VM system.
437 * At each level, we keep a list of pages, which are heads of continuous
438 * free pages of length of (1 << order) and marked with PG_buddy. Page's
439 * order is recorded in page_private(page) field.
440 * So when we are allocating or freeing one, we can derive the state of the
441 * other.  That is, if we allocate a small block, and both were
442 * free, the remainder of the region must be split into blocks.
443 * If a block is freed, and its buddy is also free, then this
444 * triggers coalescing into a block of larger size.
445 *
446 * -- wli
447 */
448
449static inline void __free_one_page(struct page *page,
450		struct zone *zone, unsigned int order,
451		int migratetype)
452{
453	unsigned long page_idx;
454
455	if (unlikely(PageCompound(page)))
456		if (unlikely(destroy_compound_page(page, order)))
457			return;
458
459	VM_BUG_ON(migratetype == -1);
460
461	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
462
463	VM_BUG_ON(page_idx & ((1 << order) - 1));
464	VM_BUG_ON(bad_range(zone, page));
465
466	while (order < MAX_ORDER-1) {
467		unsigned long combined_idx;
468		struct page *buddy;
469
470		buddy = __page_find_buddy(page, page_idx, order);
471		if (!page_is_buddy(page, buddy, order))
472			break;
473
474		/* Our buddy is free, merge with it and move up one order. */
475		list_del(&buddy->lru);
476		zone->free_area[order].nr_free--;
477		rmv_page_order(buddy);
478		combined_idx = __find_combined_index(page_idx, order);
479		page = page + (combined_idx - page_idx);
480		page_idx = combined_idx;
481		order++;
482	}
483	set_page_order(page, order);
484	list_add(&page->lru,
485		&zone->free_area[order].free_list[migratetype]);
486	zone->free_area[order].nr_free++;
487}
488
489/*
490 * free_page_mlock() -- clean up attempts to free and mlocked() page.
491 * Page should not be on lru, so no need to fix that up.
492 * free_pages_check() will verify...
493 */
494static inline void free_page_mlock(struct page *page)
495{
496	__dec_zone_page_state(page, NR_MLOCK);
497	__count_vm_event(UNEVICTABLE_MLOCKFREED);
498}
499
500static inline int free_pages_check(struct page *page)
501{
502	if (unlikely(page_mapcount(page) |
503		(page->mapping != NULL)  |
504		(atomic_read(&page->_count) != 0) |
505		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
506		bad_page(page);
507		return 1;
508	}
509	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
510		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
511	return 0;
512}
513
514/*
515 * Frees a number of pages from the PCP lists
516 * Assumes all pages on list are in same zone, and of same order.
517 * count is the number of pages to free.
518 *
519 * If the zone was previously in an "all pages pinned" state then look to
520 * see if this freeing clears that state.
521 *
522 * And clear the zone's pages_scanned counter, to hold off the "all pages are
523 * pinned" detection logic.
524 */
525static void free_pcppages_bulk(struct zone *zone, int count,
526					struct per_cpu_pages *pcp)
527{
528	int migratetype = 0;
529	int batch_free = 0;
530
531	spin_lock(&zone->lock);
532	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
533	zone->pages_scanned = 0;
534
535	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
536	while (count) {
537		struct page *page;
538		struct list_head *list;
539
540		/*
541		 * Remove pages from lists in a round-robin fashion. A
542		 * batch_free count is maintained that is incremented when an
543		 * empty list is encountered.  This is so more pages are freed
544		 * off fuller lists instead of spinning excessively around empty
545		 * lists
546		 */
547		do {
548			batch_free++;
549			if (++migratetype == MIGRATE_PCPTYPES)
550				migratetype = 0;
551			list = &pcp->lists[migratetype];
552		} while (list_empty(list));
553
554		do {
555			page = list_entry(list->prev, struct page, lru);
556			/* must delete as __free_one_page list manipulates */
557			list_del(&page->lru);
558			__free_one_page(page, zone, 0, migratetype);
559			trace_mm_page_pcpu_drain(page, 0, migratetype);
560		} while (--count && --batch_free && !list_empty(list));
561	}
562	spin_unlock(&zone->lock);
563}
564
565static void free_one_page(struct zone *zone, struct page *page, int order,
566				int migratetype)
567{
568	spin_lock(&zone->lock);
569	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
570	zone->pages_scanned = 0;
571
572	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
573	__free_one_page(page, zone, order, migratetype);
574	spin_unlock(&zone->lock);
575}
576
577static void __free_pages_ok(struct page *page, unsigned int order)
578{
579	unsigned long flags;
580	int i;
581	int bad = 0;
582	int wasMlocked = __TestClearPageMlocked(page);
583
584	kmemcheck_free_shadow(page, order);
585
586	for (i = 0 ; i < (1 << order) ; ++i)
587		bad += free_pages_check(page + i);
588	if (bad)
589		return;
590
591	if (!PageHighMem(page)) {
592		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
593		debug_check_no_obj_freed(page_address(page),
594					   PAGE_SIZE << order);
595	}
596	arch_free_page(page, order);
597	kernel_map_pages(page, 1 << order, 0);
598
599	local_irq_save(flags);
600	if (unlikely(wasMlocked))
601		free_page_mlock(page);
602	__count_vm_events(PGFREE, 1 << order);
603	free_one_page(page_zone(page), page, order,
604					get_pageblock_migratetype(page));
605	local_irq_restore(flags);
606}
607
608/*
609 * permit the bootmem allocator to evade page validation on high-order frees
610 */
611void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
612{
613	if (order == 0) {
614		__ClearPageReserved(page);
615		set_page_count(page, 0);
616		set_page_refcounted(page);
617		__free_page(page);
618	} else {
619		int loop;
620
621		prefetchw(page);
622		for (loop = 0; loop < BITS_PER_LONG; loop++) {
623			struct page *p = &page[loop];
624
625			if (loop + 1 < BITS_PER_LONG)
626				prefetchw(p + 1);
627			__ClearPageReserved(p);
628			set_page_count(p, 0);
629		}
630
631		set_page_refcounted(page);
632		__free_pages(page, order);
633	}
634}
635
636
637/*
638 * The order of subdivision here is critical for the IO subsystem.
639 * Please do not alter this order without good reasons and regression
640 * testing. Specifically, as large blocks of memory are subdivided,
641 * the order in which smaller blocks are delivered depends on the order
642 * they're subdivided in this function. This is the primary factor
643 * influencing the order in which pages are delivered to the IO
644 * subsystem according to empirical testing, and this is also justified
645 * by considering the behavior of a buddy system containing a single
646 * large block of memory acted on by a series of small allocations.
647 * This behavior is a critical factor in sglist merging's success.
648 *
649 * -- wli
650 */
651static inline void expand(struct zone *zone, struct page *page,
652	int low, int high, struct free_area *area,
653	int migratetype)
654{
655	unsigned long size = 1 << high;
656
657	while (high > low) {
658		area--;
659		high--;
660		size >>= 1;
661		VM_BUG_ON(bad_range(zone, &page[size]));
662		list_add(&page[size].lru, &area->free_list[migratetype]);
663		area->nr_free++;
664		set_page_order(&page[size], high);
665	}
666}
667
668/*
669 * This page is about to be returned from the page allocator
670 */
671static inline int check_new_page(struct page *page)
672{
673	if (unlikely(page_mapcount(page) |
674		(page->mapping != NULL)  |
675		(atomic_read(&page->_count) != 0)  |
676		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
677		bad_page(page);
678		return 1;
679	}
680	return 0;
681}
682
683static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
684{
685	int i;
686
687	for (i = 0; i < (1 << order); i++) {
688		struct page *p = page + i;
689		if (unlikely(check_new_page(p)))
690			return 1;
691	}
692
693	set_page_private(page, 0);
694	set_page_refcounted(page);
695
696	arch_alloc_page(page, order);
697	kernel_map_pages(page, 1 << order, 1);
698
699	if (gfp_flags & __GFP_ZERO)
700		prep_zero_page(page, order, gfp_flags);
701
702	if (order && (gfp_flags & __GFP_COMP))
703		prep_compound_page(page, order);
704
705	return 0;
706}
707
708/*
709 * Go through the free lists for the given migratetype and remove
710 * the smallest available page from the freelists
711 */
712static inline
713struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
714						int migratetype)
715{
716	unsigned int current_order;
717	struct free_area * area;
718	struct page *page;
719
720	/* Find a page of the appropriate size in the preferred list */
721	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
722		area = &(zone->free_area[current_order]);
723		if (list_empty(&area->free_list[migratetype]))
724			continue;
725
726		page = list_entry(area->free_list[migratetype].next,
727							struct page, lru);
728		list_del(&page->lru);
729		rmv_page_order(page);
730		area->nr_free--;
731		expand(zone, page, order, current_order, area, migratetype);
732		return page;
733	}
734
735	return NULL;
736}
737
738
739/*
740 * This array describes the order lists are fallen back to when
741 * the free lists for the desirable migrate type are depleted
742 */
743static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
744	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
745	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
746	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
747	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
748};
749
750/*
751 * Move the free pages in a range to the free lists of the requested type.
752 * Note that start_page and end_pages are not aligned on a pageblock
753 * boundary. If alignment is required, use move_freepages_block()
754 */
755static int move_freepages(struct zone *zone,
756			  struct page *start_page, struct page *end_page,
757			  int migratetype)
758{
759	struct page *page;
760	unsigned long order;
761	int pages_moved = 0;
762
763#ifndef CONFIG_HOLES_IN_ZONE
764	/*
765	 * page_zone is not safe to call in this context when
766	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
767	 * anyway as we check zone boundaries in move_freepages_block().
768	 * Remove at a later date when no bug reports exist related to
769	 * grouping pages by mobility
770	 */
771	BUG_ON(page_zone(start_page) != page_zone(end_page));
772#endif
773
774	for (page = start_page; page <= end_page;) {
775		/* Make sure we are not inadvertently changing nodes */
776		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
777
778		if (!pfn_valid_within(page_to_pfn(page))) {
779			page++;
780			continue;
781		}
782
783		if (!PageBuddy(page)) {
784			page++;
785			continue;
786		}
787
788		order = page_order(page);
789		list_del(&page->lru);
790		list_add(&page->lru,
791			&zone->free_area[order].free_list[migratetype]);
792		page += 1 << order;
793		pages_moved += 1 << order;
794	}
795
796	return pages_moved;
797}
798
799static int move_freepages_block(struct zone *zone, struct page *page,
800				int migratetype)
801{
802	unsigned long start_pfn, end_pfn;
803	struct page *start_page, *end_page;
804
805	start_pfn = page_to_pfn(page);
806	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
807	start_page = pfn_to_page(start_pfn);
808	end_page = start_page + pageblock_nr_pages - 1;
809	end_pfn = start_pfn + pageblock_nr_pages - 1;
810
811	/* Do not cross zone boundaries */
812	if (start_pfn < zone->zone_start_pfn)
813		start_page = page;
814	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
815		return 0;
816
817	return move_freepages(zone, start_page, end_page, migratetype);
818}
819
820static void change_pageblock_range(struct page *pageblock_page,
821					int start_order, int migratetype)
822{
823	int nr_pageblocks = 1 << (start_order - pageblock_order);
824
825	while (nr_pageblocks--) {
826		set_pageblock_migratetype(pageblock_page, migratetype);
827		pageblock_page += pageblock_nr_pages;
828	}
829}
830
831/* Remove an element from the buddy allocator from the fallback list */
832static inline struct page *
833__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
834{
835	struct free_area * area;
836	int current_order;
837	struct page *page;
838	int migratetype, i;
839
840	/* Find the largest possible block of pages in the other list */
841	for (current_order = MAX_ORDER-1; current_order >= order;
842						--current_order) {
843		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
844			migratetype = fallbacks[start_migratetype][i];
845
846			/* MIGRATE_RESERVE handled later if necessary */
847			if (migratetype == MIGRATE_RESERVE)
848				continue;
849
850			area = &(zone->free_area[current_order]);
851			if (list_empty(&area->free_list[migratetype]))
852				continue;
853
854			page = list_entry(area->free_list[migratetype].next,
855					struct page, lru);
856			area->nr_free--;
857
858			/*
859			 * If breaking a large block of pages, move all free
860			 * pages to the preferred allocation list. If falling
861			 * back for a reclaimable kernel allocation, be more
862			 * agressive about taking ownership of free pages
863			 */
864			if (unlikely(current_order >= (pageblock_order >> 1)) ||
865					start_migratetype == MIGRATE_RECLAIMABLE ||
866					page_group_by_mobility_disabled) {
867				unsigned long pages;
868				pages = move_freepages_block(zone, page,
869								start_migratetype);
870
871				/* Claim the whole block if over half of it is free */
872				if (pages >= (1 << (pageblock_order-1)) ||
873						page_group_by_mobility_disabled)
874					set_pageblock_migratetype(page,
875								start_migratetype);
876
877				migratetype = start_migratetype;
878			}
879
880			/* Remove the page from the freelists */
881			list_del(&page->lru);
882			rmv_page_order(page);
883
884			/* Take ownership for orders >= pageblock_order */
885			if (current_order >= pageblock_order)
886				change_pageblock_range(page, current_order,
887							start_migratetype);
888
889			expand(zone, page, order, current_order, area, migratetype);
890
891			trace_mm_page_alloc_extfrag(page, order, current_order,
892				start_migratetype, migratetype);
893
894			return page;
895		}
896	}
897
898	return NULL;
899}
900
901/*
902 * Do the hard work of removing an element from the buddy allocator.
903 * Call me with the zone->lock already held.
904 */
905static struct page *__rmqueue(struct zone *zone, unsigned int order,
906						int migratetype)
907{
908	struct page *page;
909
910retry_reserve:
911	page = __rmqueue_smallest(zone, order, migratetype);
912
913	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
914		page = __rmqueue_fallback(zone, order, migratetype);
915
916		/*
917		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
918		 * is used because __rmqueue_smallest is an inline function
919		 * and we want just one call site
920		 */
921		if (!page) {
922			migratetype = MIGRATE_RESERVE;
923			goto retry_reserve;
924		}
925	}
926
927	trace_mm_page_alloc_zone_locked(page, order, migratetype);
928	return page;
929}
930
931/*
932 * Obtain a specified number of elements from the buddy allocator, all under
933 * a single hold of the lock, for efficiency.  Add them to the supplied list.
934 * Returns the number of new pages which were placed at *list.
935 */
936static int rmqueue_bulk(struct zone *zone, unsigned int order,
937			unsigned long count, struct list_head *list,
938			int migratetype, int cold)
939{
940	int i;
941
942	spin_lock(&zone->lock);
943	for (i = 0; i < count; ++i) {
944		struct page *page = __rmqueue(zone, order, migratetype);
945		if (unlikely(page == NULL))
946			break;
947
948		/*
949		 * Split buddy pages returned by expand() are received here
950		 * in physical page order. The page is added to the callers and
951		 * list and the list head then moves forward. From the callers
952		 * perspective, the linked list is ordered by page number in
953		 * some conditions. This is useful for IO devices that can
954		 * merge IO requests if the physical pages are ordered
955		 * properly.
956		 */
957		if (likely(cold == 0))
958			list_add(&page->lru, list);
959		else
960			list_add_tail(&page->lru, list);
961		set_page_private(page, migratetype);
962		list = &page->lru;
963	}
964	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
965	spin_unlock(&zone->lock);
966	return i;
967}
968
969#ifdef CONFIG_NUMA
970/*
971 * Called from the vmstat counter updater to drain pagesets of this
972 * currently executing processor on remote nodes after they have
973 * expired.
974 *
975 * Note that this function must be called with the thread pinned to
976 * a single processor.
977 */
978void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
979{
980	unsigned long flags;
981	int to_drain;
982
983	local_irq_save(flags);
984	if (pcp->count >= pcp->batch)
985		to_drain = pcp->batch;
986	else
987		to_drain = pcp->count;
988	free_pcppages_bulk(zone, to_drain, pcp);
989	pcp->count -= to_drain;
990	local_irq_restore(flags);
991}
992#endif
993
994/*
995 * Drain pages of the indicated processor.
996 *
997 * The processor must either be the current processor and the
998 * thread pinned to the current processor or a processor that
999 * is not online.
1000 */
1001static void drain_pages(unsigned int cpu)
1002{
1003	unsigned long flags;
1004	struct zone *zone;
1005
1006	for_each_populated_zone(zone) {
1007		struct per_cpu_pageset *pset;
1008		struct per_cpu_pages *pcp;
1009
1010		pset = zone_pcp(zone, cpu);
1011
1012		pcp = &pset->pcp;
1013		local_irq_save(flags);
1014		free_pcppages_bulk(zone, pcp->count, pcp);
1015		pcp->count = 0;
1016		local_irq_restore(flags);
1017	}
1018}
1019
1020/*
1021 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1022 */
1023void drain_local_pages(void *arg)
1024{
1025	drain_pages(smp_processor_id());
1026}
1027
1028/*
1029 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1030 */
1031void drain_all_pages(void)
1032{
1033	on_each_cpu(drain_local_pages, NULL, 1);
1034}
1035
1036#ifdef CONFIG_HIBERNATION
1037
1038void mark_free_pages(struct zone *zone)
1039{
1040	unsigned long pfn, max_zone_pfn;
1041	unsigned long flags;
1042	int order, t;
1043	struct list_head *curr;
1044
1045	if (!zone->spanned_pages)
1046		return;
1047
1048	spin_lock_irqsave(&zone->lock, flags);
1049
1050	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1051	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1052		if (pfn_valid(pfn)) {
1053			struct page *page = pfn_to_page(pfn);
1054
1055			if (!swsusp_page_is_forbidden(page))
1056				swsusp_unset_page_free(page);
1057		}
1058
1059	for_each_migratetype_order(order, t) {
1060		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1061			unsigned long i;
1062
1063			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1064			for (i = 0; i < (1UL << order); i++)
1065				swsusp_set_page_free(pfn_to_page(pfn + i));
1066		}
1067	}
1068	spin_unlock_irqrestore(&zone->lock, flags);
1069}
1070#endif /* CONFIG_PM */
1071
1072/*
1073 * Free a 0-order page
1074 */
1075static void free_hot_cold_page(struct page *page, int cold)
1076{
1077	struct zone *zone = page_zone(page);
1078	struct per_cpu_pages *pcp;
1079	unsigned long flags;
1080	int migratetype;
1081	int wasMlocked = __TestClearPageMlocked(page);
1082
1083	kmemcheck_free_shadow(page, 0);
1084
1085	if (PageAnon(page))
1086		page->mapping = NULL;
1087	if (free_pages_check(page))
1088		return;
1089
1090	if (!PageHighMem(page)) {
1091		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1092		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1093	}
1094	arch_free_page(page, 0);
1095	kernel_map_pages(page, 1, 0);
1096
1097	pcp = &zone_pcp(zone, get_cpu())->pcp;
1098	migratetype = get_pageblock_migratetype(page);
1099	set_page_private(page, migratetype);
1100	local_irq_save(flags);
1101	if (unlikely(wasMlocked))
1102		free_page_mlock(page);
1103	__count_vm_event(PGFREE);
1104
1105	/*
1106	 * We only track unmovable, reclaimable and movable on pcp lists.
1107	 * Free ISOLATE pages back to the allocator because they are being
1108	 * offlined but treat RESERVE as movable pages so we can get those
1109	 * areas back if necessary. Otherwise, we may have to free
1110	 * excessively into the page allocator
1111	 */
1112	if (migratetype >= MIGRATE_PCPTYPES) {
1113		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1114			free_one_page(zone, page, 0, migratetype);
1115			goto out;
1116		}
1117		migratetype = MIGRATE_MOVABLE;
1118	}
1119
1120	if (cold)
1121		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1122	else
1123		list_add(&page->lru, &pcp->lists[migratetype]);
1124	pcp->count++;
1125	if (pcp->count >= pcp->high) {
1126		free_pcppages_bulk(zone, pcp->batch, pcp);
1127		pcp->count -= pcp->batch;
1128	}
1129
1130out:
1131	local_irq_restore(flags);
1132	put_cpu();
1133}
1134
1135void free_hot_page(struct page *page)
1136{
1137	trace_mm_page_free_direct(page, 0);
1138	free_hot_cold_page(page, 0);
1139}
1140
1141/*
1142 * split_page takes a non-compound higher-order page, and splits it into
1143 * n (1<<order) sub-pages: page[0..n]
1144 * Each sub-page must be freed individually.
1145 *
1146 * Note: this is probably too low level an operation for use in drivers.
1147 * Please consult with lkml before using this in your driver.
1148 */
1149void split_page(struct page *page, unsigned int order)
1150{
1151	int i;
1152
1153	VM_BUG_ON(PageCompound(page));
1154	VM_BUG_ON(!page_count(page));
1155
1156#ifdef CONFIG_KMEMCHECK
1157	/*
1158	 * Split shadow pages too, because free(page[0]) would
1159	 * otherwise free the whole shadow.
1160	 */
1161	if (kmemcheck_page_is_tracked(page))
1162		split_page(virt_to_page(page[0].shadow), order);
1163#endif
1164
1165	for (i = 1; i < (1 << order); i++)
1166		set_page_refcounted(page + i);
1167}
1168
1169/*
1170 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1171 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1172 * or two.
1173 */
1174static inline
1175struct page *buffered_rmqueue(struct zone *preferred_zone,
1176			struct zone *zone, int order, gfp_t gfp_flags,
1177			int migratetype)
1178{
1179	unsigned long flags;
1180	struct page *page;
1181	int cold = !!(gfp_flags & __GFP_COLD);
1182	int cpu;
1183
1184again:
1185	cpu  = get_cpu();
1186	if (likely(order == 0)) {
1187		struct per_cpu_pages *pcp;
1188		struct list_head *list;
1189
1190		pcp = &zone_pcp(zone, cpu)->pcp;
1191		list = &pcp->lists[migratetype];
1192		local_irq_save(flags);
1193		if (list_empty(list)) {
1194			pcp->count += rmqueue_bulk(zone, 0,
1195					pcp->batch, list,
1196					migratetype, cold);
1197			if (unlikely(list_empty(list)))
1198				goto failed;
1199		}
1200
1201		if (cold)
1202			page = list_entry(list->prev, struct page, lru);
1203		else
1204			page = list_entry(list->next, struct page, lru);
1205
1206		list_del(&page->lru);
1207		pcp->count--;
1208	} else {
1209		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1210			/*
1211			 * __GFP_NOFAIL is not to be used in new code.
1212			 *
1213			 * All __GFP_NOFAIL callers should be fixed so that they
1214			 * properly detect and handle allocation failures.
1215			 *
1216			 * We most definitely don't want callers attempting to
1217			 * allocate greater than order-1 page units with
1218			 * __GFP_NOFAIL.
1219			 */
1220			WARN_ON_ONCE(order > 1);
1221		}
1222		spin_lock_irqsave(&zone->lock, flags);
1223		page = __rmqueue(zone, order, migratetype);
1224		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1225		spin_unlock(&zone->lock);
1226		if (!page)
1227			goto failed;
1228	}
1229
1230	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1231	zone_statistics(preferred_zone, zone);
1232	local_irq_restore(flags);
1233	put_cpu();
1234
1235	VM_BUG_ON(bad_range(zone, page));
1236	if (prep_new_page(page, order, gfp_flags))
1237		goto again;
1238	return page;
1239
1240failed:
1241	local_irq_restore(flags);
1242	put_cpu();
1243	return NULL;
1244}
1245
1246/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1247#define ALLOC_WMARK_MIN		WMARK_MIN
1248#define ALLOC_WMARK_LOW		WMARK_LOW
1249#define ALLOC_WMARK_HIGH	WMARK_HIGH
1250#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1251
1252/* Mask to get the watermark bits */
1253#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1254
1255#define ALLOC_HARDER		0x10 /* try to alloc harder */
1256#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1257#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1258
1259#ifdef CONFIG_FAIL_PAGE_ALLOC
1260
1261static struct fail_page_alloc_attr {
1262	struct fault_attr attr;
1263
1264	u32 ignore_gfp_highmem;
1265	u32 ignore_gfp_wait;
1266	u32 min_order;
1267
1268#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1269
1270	struct dentry *ignore_gfp_highmem_file;
1271	struct dentry *ignore_gfp_wait_file;
1272	struct dentry *min_order_file;
1273
1274#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1275
1276} fail_page_alloc = {
1277	.attr = FAULT_ATTR_INITIALIZER,
1278	.ignore_gfp_wait = 1,
1279	.ignore_gfp_highmem = 1,
1280	.min_order = 1,
1281};
1282
1283static int __init setup_fail_page_alloc(char *str)
1284{
1285	return setup_fault_attr(&fail_page_alloc.attr, str);
1286}
1287__setup("fail_page_alloc=", setup_fail_page_alloc);
1288
1289static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1290{
1291	if (order < fail_page_alloc.min_order)
1292		return 0;
1293	if (gfp_mask & __GFP_NOFAIL)
1294		return 0;
1295	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1296		return 0;
1297	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1298		return 0;
1299
1300	return should_fail(&fail_page_alloc.attr, 1 << order);
1301}
1302
1303#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1304
1305static int __init fail_page_alloc_debugfs(void)
1306{
1307	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1308	struct dentry *dir;
1309	int err;
1310
1311	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1312				       "fail_page_alloc");
1313	if (err)
1314		return err;
1315	dir = fail_page_alloc.attr.dentries.dir;
1316
1317	fail_page_alloc.ignore_gfp_wait_file =
1318		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1319				      &fail_page_alloc.ignore_gfp_wait);
1320
1321	fail_page_alloc.ignore_gfp_highmem_file =
1322		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1323				      &fail_page_alloc.ignore_gfp_highmem);
1324	fail_page_alloc.min_order_file =
1325		debugfs_create_u32("min-order", mode, dir,
1326				   &fail_page_alloc.min_order);
1327
1328	if (!fail_page_alloc.ignore_gfp_wait_file ||
1329            !fail_page_alloc.ignore_gfp_highmem_file ||
1330            !fail_page_alloc.min_order_file) {
1331		err = -ENOMEM;
1332		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1333		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1334		debugfs_remove(fail_page_alloc.min_order_file);
1335		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1336	}
1337
1338	return err;
1339}
1340
1341late_initcall(fail_page_alloc_debugfs);
1342
1343#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1344
1345#else /* CONFIG_FAIL_PAGE_ALLOC */
1346
1347static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1348{
1349	return 0;
1350}
1351
1352#endif /* CONFIG_FAIL_PAGE_ALLOC */
1353
1354/*
1355 * Return 1 if free pages are above 'mark'. This takes into account the order
1356 * of the allocation.
1357 */
1358int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1359		      int classzone_idx, int alloc_flags)
1360{
1361	/* free_pages my go negative - that's OK */
1362	long min = mark;
1363	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1364	int o;
1365
1366	if (alloc_flags & ALLOC_HIGH)
1367		min -= min / 2;
1368	if (alloc_flags & ALLOC_HARDER)
1369		min -= min / 4;
1370
1371	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1372		return 0;
1373	for (o = 0; o < order; o++) {
1374		/* At the next order, this order's pages become unavailable */
1375		free_pages -= z->free_area[o].nr_free << o;
1376
1377		/* Require fewer higher order pages to be free */
1378		min >>= 1;
1379
1380		if (free_pages <= min)
1381			return 0;
1382	}
1383	return 1;
1384}
1385
1386#ifdef CONFIG_NUMA
1387/*
1388 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1389 * skip over zones that are not allowed by the cpuset, or that have
1390 * been recently (in last second) found to be nearly full.  See further
1391 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1392 * that have to skip over a lot of full or unallowed zones.
1393 *
1394 * If the zonelist cache is present in the passed in zonelist, then
1395 * returns a pointer to the allowed node mask (either the current
1396 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1397 *
1398 * If the zonelist cache is not available for this zonelist, does
1399 * nothing and returns NULL.
1400 *
1401 * If the fullzones BITMAP in the zonelist cache is stale (more than
1402 * a second since last zap'd) then we zap it out (clear its bits.)
1403 *
1404 * We hold off even calling zlc_setup, until after we've checked the
1405 * first zone in the zonelist, on the theory that most allocations will
1406 * be satisfied from that first zone, so best to examine that zone as
1407 * quickly as we can.
1408 */
1409static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1410{
1411	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1412	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1413
1414	zlc = zonelist->zlcache_ptr;
1415	if (!zlc)
1416		return NULL;
1417
1418	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1419		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1420		zlc->last_full_zap = jiffies;
1421	}
1422
1423	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1424					&cpuset_current_mems_allowed :
1425					&node_states[N_HIGH_MEMORY];
1426	return allowednodes;
1427}
1428
1429/*
1430 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1431 * if it is worth looking at further for free memory:
1432 *  1) Check that the zone isn't thought to be full (doesn't have its
1433 *     bit set in the zonelist_cache fullzones BITMAP).
1434 *  2) Check that the zones node (obtained from the zonelist_cache
1435 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1436 * Return true (non-zero) if zone is worth looking at further, or
1437 * else return false (zero) if it is not.
1438 *
1439 * This check -ignores- the distinction between various watermarks,
1440 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1441 * found to be full for any variation of these watermarks, it will
1442 * be considered full for up to one second by all requests, unless
1443 * we are so low on memory on all allowed nodes that we are forced
1444 * into the second scan of the zonelist.
1445 *
1446 * In the second scan we ignore this zonelist cache and exactly
1447 * apply the watermarks to all zones, even it is slower to do so.
1448 * We are low on memory in the second scan, and should leave no stone
1449 * unturned looking for a free page.
1450 */
1451static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1452						nodemask_t *allowednodes)
1453{
1454	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1455	int i;				/* index of *z in zonelist zones */
1456	int n;				/* node that zone *z is on */
1457
1458	zlc = zonelist->zlcache_ptr;
1459	if (!zlc)
1460		return 1;
1461
1462	i = z - zonelist->_zonerefs;
1463	n = zlc->z_to_n[i];
1464
1465	/* This zone is worth trying if it is allowed but not full */
1466	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1467}
1468
1469/*
1470 * Given 'z' scanning a zonelist, set the corresponding bit in
1471 * zlc->fullzones, so that subsequent attempts to allocate a page
1472 * from that zone don't waste time re-examining it.
1473 */
1474static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1475{
1476	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1477	int i;				/* index of *z in zonelist zones */
1478
1479	zlc = zonelist->zlcache_ptr;
1480	if (!zlc)
1481		return;
1482
1483	i = z - zonelist->_zonerefs;
1484
1485	set_bit(i, zlc->fullzones);
1486}
1487
1488#else	/* CONFIG_NUMA */
1489
1490static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1491{
1492	return NULL;
1493}
1494
1495static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1496				nodemask_t *allowednodes)
1497{
1498	return 1;
1499}
1500
1501static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1502{
1503}
1504#endif	/* CONFIG_NUMA */
1505
1506/*
1507 * get_page_from_freelist goes through the zonelist trying to allocate
1508 * a page.
1509 */
1510static struct page *
1511get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1512		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1513		struct zone *preferred_zone, int migratetype)
1514{
1515	struct zoneref *z;
1516	struct page *page = NULL;
1517	int classzone_idx;
1518	struct zone *zone;
1519	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1520	int zlc_active = 0;		/* set if using zonelist_cache */
1521	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1522
1523	classzone_idx = zone_idx(preferred_zone);
1524zonelist_scan:
1525	/*
1526	 * Scan zonelist, looking for a zone with enough free.
1527	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1528	 */
1529	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1530						high_zoneidx, nodemask) {
1531		if (NUMA_BUILD && zlc_active &&
1532			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1533				continue;
1534		if ((alloc_flags & ALLOC_CPUSET) &&
1535			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1536				goto try_next_zone;
1537
1538		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1539		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1540			unsigned long mark;
1541			int ret;
1542
1543			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1544			if (zone_watermark_ok(zone, order, mark,
1545				    classzone_idx, alloc_flags))
1546				goto try_this_zone;
1547
1548			if (zone_reclaim_mode == 0)
1549				goto this_zone_full;
1550
1551			ret = zone_reclaim(zone, gfp_mask, order);
1552			switch (ret) {
1553			case ZONE_RECLAIM_NOSCAN:
1554				/* did not scan */
1555				goto try_next_zone;
1556			case ZONE_RECLAIM_FULL:
1557				/* scanned but unreclaimable */
1558				goto this_zone_full;
1559			default:
1560				/* did we reclaim enough */
1561				if (!zone_watermark_ok(zone, order, mark,
1562						classzone_idx, alloc_flags))
1563					goto this_zone_full;
1564			}
1565		}
1566
1567try_this_zone:
1568		page = buffered_rmqueue(preferred_zone, zone, order,
1569						gfp_mask, migratetype);
1570		if (page)
1571			break;
1572this_zone_full:
1573		if (NUMA_BUILD)
1574			zlc_mark_zone_full(zonelist, z);
1575try_next_zone:
1576		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1577			/*
1578			 * we do zlc_setup after the first zone is tried but only
1579			 * if there are multiple nodes make it worthwhile
1580			 */
1581			allowednodes = zlc_setup(zonelist, alloc_flags);
1582			zlc_active = 1;
1583			did_zlc_setup = 1;
1584		}
1585	}
1586
1587	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1588		/* Disable zlc cache for second zonelist scan */
1589		zlc_active = 0;
1590		goto zonelist_scan;
1591	}
1592	return page;
1593}
1594
1595static inline int
1596should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1597				unsigned long pages_reclaimed)
1598{
1599	/* Do not loop if specifically requested */
1600	if (gfp_mask & __GFP_NORETRY)
1601		return 0;
1602
1603	/*
1604	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1605	 * means __GFP_NOFAIL, but that may not be true in other
1606	 * implementations.
1607	 */
1608	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1609		return 1;
1610
1611	/*
1612	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1613	 * specified, then we retry until we no longer reclaim any pages
1614	 * (above), or we've reclaimed an order of pages at least as
1615	 * large as the allocation's order. In both cases, if the
1616	 * allocation still fails, we stop retrying.
1617	 */
1618	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1619		return 1;
1620
1621	/*
1622	 * Don't let big-order allocations loop unless the caller
1623	 * explicitly requests that.
1624	 */
1625	if (gfp_mask & __GFP_NOFAIL)
1626		return 1;
1627
1628	return 0;
1629}
1630
1631static inline struct page *
1632__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1633	struct zonelist *zonelist, enum zone_type high_zoneidx,
1634	nodemask_t *nodemask, struct zone *preferred_zone,
1635	int migratetype)
1636{
1637	struct page *page;
1638
1639	/* Acquire the OOM killer lock for the zones in zonelist */
1640	if (!try_set_zone_oom(zonelist, gfp_mask)) {
1641		schedule_timeout_uninterruptible(1);
1642		return NULL;
1643	}
1644
1645	/*
1646	 * Go through the zonelist yet one more time, keep very high watermark
1647	 * here, this is only to catch a parallel oom killing, we must fail if
1648	 * we're still under heavy pressure.
1649	 */
1650	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1651		order, zonelist, high_zoneidx,
1652		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1653		preferred_zone, migratetype);
1654	if (page)
1655		goto out;
1656
1657	/* The OOM killer will not help higher order allocs */
1658	if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
1659		goto out;
1660
1661	/* Exhausted what can be done so it's blamo time */
1662	out_of_memory(zonelist, gfp_mask, order);
1663
1664out:
1665	clear_zonelist_oom(zonelist, gfp_mask);
1666	return page;
1667}
1668
1669/* The really slow allocator path where we enter direct reclaim */
1670static inline struct page *
1671__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1672	struct zonelist *zonelist, enum zone_type high_zoneidx,
1673	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1674	int migratetype, unsigned long *did_some_progress)
1675{
1676	struct page *page = NULL;
1677	struct reclaim_state reclaim_state;
1678	struct task_struct *p = current;
1679
1680	cond_resched();
1681
1682	/* We now go into synchronous reclaim */
1683	cpuset_memory_pressure_bump();
1684	p->flags |= PF_MEMALLOC;
1685	lockdep_set_current_reclaim_state(gfp_mask);
1686	reclaim_state.reclaimed_slab = 0;
1687	p->reclaim_state = &reclaim_state;
1688
1689	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1690
1691	p->reclaim_state = NULL;
1692	lockdep_clear_current_reclaim_state();
1693	p->flags &= ~PF_MEMALLOC;
1694
1695	cond_resched();
1696
1697	if (order != 0)
1698		drain_all_pages();
1699
1700	if (likely(*did_some_progress))
1701		page = get_page_from_freelist(gfp_mask, nodemask, order,
1702					zonelist, high_zoneidx,
1703					alloc_flags, preferred_zone,
1704					migratetype);
1705	return page;
1706}
1707
1708/*
1709 * This is called in the allocator slow-path if the allocation request is of
1710 * sufficient urgency to ignore watermarks and take other desperate measures
1711 */
1712static inline struct page *
1713__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1714	struct zonelist *zonelist, enum zone_type high_zoneidx,
1715	nodemask_t *nodemask, struct zone *preferred_zone,
1716	int migratetype)
1717{
1718	struct page *page;
1719
1720	do {
1721		page = get_page_from_freelist(gfp_mask, nodemask, order,
1722			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1723			preferred_zone, migratetype);
1724
1725		if (!page && gfp_mask & __GFP_NOFAIL)
1726			congestion_wait(BLK_RW_ASYNC, HZ/50);
1727	} while (!page && (gfp_mask & __GFP_NOFAIL));
1728
1729	return page;
1730}
1731
1732static inline
1733void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1734						enum zone_type high_zoneidx)
1735{
1736	struct zoneref *z;
1737	struct zone *zone;
1738
1739	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1740		wakeup_kswapd(zone, order);
1741}
1742
1743static inline int
1744gfp_to_alloc_flags(gfp_t gfp_mask)
1745{
1746	struct task_struct *p = current;
1747	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1748	const gfp_t wait = gfp_mask & __GFP_WAIT;
1749
1750	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1751	BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1752
1753	/*
1754	 * The caller may dip into page reserves a bit more if the caller
1755	 * cannot run direct reclaim, or if the caller has realtime scheduling
1756	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1757	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1758	 */
1759	alloc_flags |= (gfp_mask & __GFP_HIGH);
1760
1761	if (!wait) {
1762		alloc_flags |= ALLOC_HARDER;
1763		/*
1764		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1765		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1766		 */
1767		alloc_flags &= ~ALLOC_CPUSET;
1768	} else if (unlikely(rt_task(p)) && !in_interrupt())
1769		alloc_flags |= ALLOC_HARDER;
1770
1771	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1772		if (!in_interrupt() &&
1773		    ((p->flags & PF_MEMALLOC) ||
1774		     unlikely(test_thread_flag(TIF_MEMDIE))))
1775			alloc_flags |= ALLOC_NO_WATERMARKS;
1776	}
1777
1778	return alloc_flags;
1779}
1780
1781static inline struct page *
1782__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1783	struct zonelist *zonelist, enum zone_type high_zoneidx,
1784	nodemask_t *nodemask, struct zone *preferred_zone,
1785	int migratetype)
1786{
1787	const gfp_t wait = gfp_mask & __GFP_WAIT;
1788	struct page *page = NULL;
1789	int alloc_flags;
1790	unsigned long pages_reclaimed = 0;
1791	unsigned long did_some_progress;
1792	struct task_struct *p = current;
1793
1794	/*
1795	 * In the slowpath, we sanity check order to avoid ever trying to
1796	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1797	 * be using allocators in order of preference for an area that is
1798	 * too large.
1799	 */
1800	if (order >= MAX_ORDER) {
1801		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1802		return NULL;
1803	}
1804
1805	/*
1806	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1807	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1808	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1809	 * using a larger set of nodes after it has established that the
1810	 * allowed per node queues are empty and that nodes are
1811	 * over allocated.
1812	 */
1813	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1814		goto nopage;
1815
1816restart:
1817	wake_all_kswapd(order, zonelist, high_zoneidx);
1818
1819	/*
1820	 * OK, we're below the kswapd watermark and have kicked background
1821	 * reclaim. Now things get more complex, so set up alloc_flags according
1822	 * to how we want to proceed.
1823	 */
1824	alloc_flags = gfp_to_alloc_flags(gfp_mask);
1825
1826	/* This is the last chance, in general, before the goto nopage. */
1827	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1828			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1829			preferred_zone, migratetype);
1830	if (page)
1831		goto got_pg;
1832
1833rebalance:
1834	/* Allocate without watermarks if the context allows */
1835	if (alloc_flags & ALLOC_NO_WATERMARKS) {
1836		page = __alloc_pages_high_priority(gfp_mask, order,
1837				zonelist, high_zoneidx, nodemask,
1838				preferred_zone, migratetype);
1839		if (page)
1840			goto got_pg;
1841	}
1842
1843	/* Atomic allocations - we can't balance anything */
1844	if (!wait)
1845		goto nopage;
1846
1847	/* Avoid recursion of direct reclaim */
1848	if (p->flags & PF_MEMALLOC)
1849		goto nopage;
1850
1851	/* Avoid allocations with no watermarks from looping endlessly */
1852	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1853		goto nopage;
1854
1855	/* Try direct reclaim and then allocating */
1856	page = __alloc_pages_direct_reclaim(gfp_mask, order,
1857					zonelist, high_zoneidx,
1858					nodemask,
1859					alloc_flags, preferred_zone,
1860					migratetype, &did_some_progress);
1861	if (page)
1862		goto got_pg;
1863
1864	/*
1865	 * If we failed to make any progress reclaiming, then we are
1866	 * running out of options and have to consider going OOM
1867	 */
1868	if (!did_some_progress) {
1869		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1870			if (oom_killer_disabled)
1871				goto nopage;
1872			page = __alloc_pages_may_oom(gfp_mask, order,
1873					zonelist, high_zoneidx,
1874					nodemask, preferred_zone,
1875					migratetype);
1876			if (page)
1877				goto got_pg;
1878
1879			/*
1880			 * The OOM killer does not trigger for high-order
1881			 * ~__GFP_NOFAIL allocations so if no progress is being
1882			 * made, there are no other options and retrying is
1883			 * unlikely to help.
1884			 */
1885			if (order > PAGE_ALLOC_COSTLY_ORDER &&
1886						!(gfp_mask & __GFP_NOFAIL))
1887				goto nopage;
1888
1889			goto restart;
1890		}
1891	}
1892
1893	/* Check if we should retry the allocation */
1894	pages_reclaimed += did_some_progress;
1895	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1896		/* Wait for some write requests to complete then retry */
1897		congestion_wait(BLK_RW_ASYNC, HZ/50);
1898		goto rebalance;
1899	}
1900
1901nopage:
1902	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1903		printk(KERN_WARNING "%s: page allocation failure."
1904			" order:%d, mode:0x%x\n",
1905			p->comm, order, gfp_mask);
1906		dump_stack();
1907		show_mem();
1908	}
1909	return page;
1910got_pg:
1911	if (kmemcheck_enabled)
1912		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1913	return page;
1914
1915}
1916
1917/*
1918 * This is the 'heart' of the zoned buddy allocator.
1919 */
1920struct page *
1921__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1922			struct zonelist *zonelist, nodemask_t *nodemask)
1923{
1924	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1925	struct zone *preferred_zone;
1926	struct page *page;
1927	int migratetype = allocflags_to_migratetype(gfp_mask);
1928
1929	gfp_mask &= gfp_allowed_mask;
1930
1931	lockdep_trace_alloc(gfp_mask);
1932
1933	might_sleep_if(gfp_mask & __GFP_WAIT);
1934
1935	if (should_fail_alloc_page(gfp_mask, order))
1936		return NULL;
1937
1938	/*
1939	 * Check the zones suitable for the gfp_mask contain at least one
1940	 * valid zone. It's possible to have an empty zonelist as a result
1941	 * of GFP_THISNODE and a memoryless node
1942	 */
1943	if (unlikely(!zonelist->_zonerefs->zone))
1944		return NULL;
1945
1946	/* The preferred zone is used for statistics later */
1947	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1948	if (!preferred_zone)
1949		return NULL;
1950
1951	/* First allocation attempt */
1952	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1953			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1954			preferred_zone, migratetype);
1955	if (unlikely(!page))
1956		page = __alloc_pages_slowpath(gfp_mask, order,
1957				zonelist, high_zoneidx, nodemask,
1958				preferred_zone, migratetype);
1959
1960	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
1961	return page;
1962}
1963EXPORT_SYMBOL(__alloc_pages_nodemask);
1964
1965/*
1966 * Common helper functions.
1967 */
1968unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1969{
1970	struct page *page;
1971
1972	/*
1973	 * __get_free_pages() returns a 32-bit address, which cannot represent
1974	 * a highmem page
1975	 */
1976	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1977
1978	page = alloc_pages(gfp_mask, order);
1979	if (!page)
1980		return 0;
1981	return (unsigned long) page_address(page);
1982}
1983EXPORT_SYMBOL(__get_free_pages);
1984
1985unsigned long get_zeroed_page(gfp_t gfp_mask)
1986{
1987	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1988}
1989EXPORT_SYMBOL(get_zeroed_page);
1990
1991void __pagevec_free(struct pagevec *pvec)
1992{
1993	int i = pagevec_count(pvec);
1994
1995	while (--i >= 0) {
1996		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
1997		free_hot_cold_page(pvec->pages[i], pvec->cold);
1998	}
1999}
2000
2001void __free_pages(struct page *page, unsigned int order)
2002{
2003	if (put_page_testzero(page)) {
2004		trace_mm_page_free_direct(page, order);
2005		if (order == 0)
2006			free_hot_page(page);
2007		else
2008			__free_pages_ok(page, order);
2009	}
2010}
2011
2012EXPORT_SYMBOL(__free_pages);
2013
2014void free_pages(unsigned long addr, unsigned int order)
2015{
2016	if (addr != 0) {
2017		VM_BUG_ON(!virt_addr_valid((void *)addr));
2018		__free_pages(virt_to_page((void *)addr), order);
2019	}
2020}
2021
2022EXPORT_SYMBOL(free_pages);
2023
2024/**
2025 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2026 * @size: the number of bytes to allocate
2027 * @gfp_mask: GFP flags for the allocation
2028 *
2029 * This function is similar to alloc_pages(), except that it allocates the
2030 * minimum number of pages to satisfy the request.  alloc_pages() can only
2031 * allocate memory in power-of-two pages.
2032 *
2033 * This function is also limited by MAX_ORDER.
2034 *
2035 * Memory allocated by this function must be released by free_pages_exact().
2036 */
2037void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2038{
2039	unsigned int order = get_order(size);
2040	unsigned long addr;
2041
2042	addr = __get_free_pages(gfp_mask, order);
2043	if (addr) {
2044		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2045		unsigned long used = addr + PAGE_ALIGN(size);
2046
2047		split_page(virt_to_page((void *)addr), order);
2048		while (used < alloc_end) {
2049			free_page(used);
2050			used += PAGE_SIZE;
2051		}
2052	}
2053
2054	return (void *)addr;
2055}
2056EXPORT_SYMBOL(alloc_pages_exact);
2057
2058/**
2059 * free_pages_exact - release memory allocated via alloc_pages_exact()
2060 * @virt: the value returned by alloc_pages_exact.
2061 * @size: size of allocation, same value as passed to alloc_pages_exact().
2062 *
2063 * Release the memory allocated by a previous call to alloc_pages_exact.
2064 */
2065void free_pages_exact(void *virt, size_t size)
2066{
2067	unsigned long addr = (unsigned long)virt;
2068	unsigned long end = addr + PAGE_ALIGN(size);
2069
2070	while (addr < end) {
2071		free_page(addr);
2072		addr += PAGE_SIZE;
2073	}
2074}
2075EXPORT_SYMBOL(free_pages_exact);
2076
2077static unsigned int nr_free_zone_pages(int offset)
2078{
2079	struct zoneref *z;
2080	struct zone *zone;
2081
2082	/* Just pick one node, since fallback list is circular */
2083	unsigned int sum = 0;
2084
2085	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2086
2087	for_each_zone_zonelist(zone, z, zonelist, offset) {
2088		unsigned long size = zone->present_pages;
2089		unsigned long high = high_wmark_pages(zone);
2090		if (size > high)
2091			sum += size - high;
2092	}
2093
2094	return sum;
2095}
2096
2097/*
2098 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2099 */
2100unsigned int nr_free_buffer_pages(void)
2101{
2102	return nr_free_zone_pages(gfp_zone(GFP_USER));
2103}
2104EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2105
2106/*
2107 * Amount of free RAM allocatable within all zones
2108 */
2109unsigned int nr_free_pagecache_pages(void)
2110{
2111	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2112}
2113
2114static inline void show_node(struct zone *zone)
2115{
2116	if (NUMA_BUILD)
2117		printk("Node %d ", zone_to_nid(zone));
2118}
2119
2120void si_meminfo(struct sysinfo *val)
2121{
2122	val->totalram = totalram_pages;
2123	val->sharedram = 0;
2124	val->freeram = global_page_state(NR_FREE_PAGES);
2125	val->bufferram = nr_blockdev_pages();
2126	val->totalhigh = totalhigh_pages;
2127	val->freehigh = nr_free_highpages();
2128	val->mem_unit = PAGE_SIZE;
2129}
2130
2131EXPORT_SYMBOL(si_meminfo);
2132
2133#ifdef CONFIG_NUMA
2134void si_meminfo_node(struct sysinfo *val, int nid)
2135{
2136	pg_data_t *pgdat = NODE_DATA(nid);
2137
2138	val->totalram = pgdat->node_present_pages;
2139	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2140#ifdef CONFIG_HIGHMEM
2141	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2142	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2143			NR_FREE_PAGES);
2144#else
2145	val->totalhigh = 0;
2146	val->freehigh = 0;
2147#endif
2148	val->mem_unit = PAGE_SIZE;
2149}
2150#endif
2151
2152#define K(x) ((x) << (PAGE_SHIFT-10))
2153
2154/*
2155 * Show free area list (used inside shift_scroll-lock stuff)
2156 * We also calculate the percentage fragmentation. We do this by counting the
2157 * memory on each free list with the exception of the first item on the list.
2158 */
2159void show_free_areas(void)
2160{
2161	int cpu;
2162	struct zone *zone;
2163
2164	for_each_populated_zone(zone) {
2165		show_node(zone);
2166		printk("%s per-cpu:\n", zone->name);
2167
2168		for_each_online_cpu(cpu) {
2169			struct per_cpu_pageset *pageset;
2170
2171			pageset = zone_pcp(zone, cpu);
2172
2173			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2174			       cpu, pageset->pcp.high,
2175			       pageset->pcp.batch, pageset->pcp.count);
2176		}
2177	}
2178
2179	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2180		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2181		" unevictable:%lu"
2182		" dirty:%lu writeback:%lu unstable:%lu\n"
2183		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2184		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2185		global_page_state(NR_ACTIVE_ANON),
2186		global_page_state(NR_INACTIVE_ANON),
2187		global_page_state(NR_ISOLATED_ANON),
2188		global_page_state(NR_ACTIVE_FILE),
2189		global_page_state(NR_INACTIVE_FILE),
2190		global_page_state(NR_ISOLATED_FILE),
2191		global_page_state(NR_UNEVICTABLE),
2192		global_page_state(NR_FILE_DIRTY),
2193		global_page_state(NR_WRITEBACK),
2194		global_page_state(NR_UNSTABLE_NFS),
2195		global_page_state(NR_FREE_PAGES),
2196		global_page_state(NR_SLAB_RECLAIMABLE),
2197		global_page_state(NR_SLAB_UNRECLAIMABLE),
2198		global_page_state(NR_FILE_MAPPED),
2199		global_page_state(NR_SHMEM),
2200		global_page_state(NR_PAGETABLE),
2201		global_page_state(NR_BOUNCE));
2202
2203	for_each_populated_zone(zone) {
2204		int i;
2205
2206		show_node(zone);
2207		printk("%s"
2208			" free:%lukB"
2209			" min:%lukB"
2210			" low:%lukB"
2211			" high:%lukB"
2212			" active_anon:%lukB"
2213			" inactive_anon:%lukB"
2214			" active_file:%lukB"
2215			" inactive_file:%lukB"
2216			" unevictable:%lukB"
2217			" isolated(anon):%lukB"
2218			" isolated(file):%lukB"
2219			" present:%lukB"
2220			" mlocked:%lukB"
2221			" dirty:%lukB"
2222			" writeback:%lukB"
2223			" mapped:%lukB"
2224			" shmem:%lukB"
2225			" slab_reclaimable:%lukB"
2226			" slab_unreclaimable:%lukB"
2227			" kernel_stack:%lukB"
2228			" pagetables:%lukB"
2229			" unstable:%lukB"
2230			" bounce:%lukB"
2231			" writeback_tmp:%lukB"
2232			" pages_scanned:%lu"
2233			" all_unreclaimable? %s"
2234			"\n",
2235			zone->name,
2236			K(zone_page_state(zone, NR_FREE_PAGES)),
2237			K(min_wmark_pages(zone)),
2238			K(low_wmark_pages(zone)),
2239			K(high_wmark_pages(zone)),
2240			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2241			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2242			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2243			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2244			K(zone_page_state(zone, NR_UNEVICTABLE)),
2245			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2246			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2247			K(zone->present_pages),
2248			K(zone_page_state(zone, NR_MLOCK)),
2249			K(zone_page_state(zone, NR_FILE_DIRTY)),
2250			K(zone_page_state(zone, NR_WRITEBACK)),
2251			K(zone_page_state(zone, NR_FILE_MAPPED)),
2252			K(zone_page_state(zone, NR_SHMEM)),
2253			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2254			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2255			zone_page_state(zone, NR_KERNEL_STACK) *
2256				THREAD_SIZE / 1024,
2257			K(zone_page_state(zone, NR_PAGETABLE)),
2258			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2259			K(zone_page_state(zone, NR_BOUNCE)),
2260			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2261			zone->pages_scanned,
2262			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
2263			);
2264		printk("lowmem_reserve[]:");
2265		for (i = 0; i < MAX_NR_ZONES; i++)
2266			printk(" %lu", zone->lowmem_reserve[i]);
2267		printk("\n");
2268	}
2269
2270	for_each_populated_zone(zone) {
2271 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2272
2273		show_node(zone);
2274		printk("%s: ", zone->name);
2275
2276		spin_lock_irqsave(&zone->lock, flags);
2277		for (order = 0; order < MAX_ORDER; order++) {
2278			nr[order] = zone->free_area[order].nr_free;
2279			total += nr[order] << order;
2280		}
2281		spin_unlock_irqrestore(&zone->lock, flags);
2282		for (order = 0; order < MAX_ORDER; order++)
2283			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2284		printk("= %lukB\n", K(total));
2285	}
2286
2287	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2288
2289	show_swap_cache_info();
2290}
2291
2292static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2293{
2294	zoneref->zone = zone;
2295	zoneref->zone_idx = zone_idx(zone);
2296}
2297
2298/*
2299 * Builds allocation fallback zone lists.
2300 *
2301 * Add all populated zones of a node to the zonelist.
2302 */
2303static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2304				int nr_zones, enum zone_type zone_type)
2305{
2306	struct zone *zone;
2307
2308	BUG_ON(zone_type >= MAX_NR_ZONES);
2309	zone_type++;
2310
2311	do {
2312		zone_type--;
2313		zone = pgdat->node_zones + zone_type;
2314		if (populated_zone(zone)) {
2315			zoneref_set_zone(zone,
2316				&zonelist->_zonerefs[nr_zones++]);
2317			check_highest_zone(zone_type);
2318		}
2319
2320	} while (zone_type);
2321	return nr_zones;
2322}
2323
2324
2325/*
2326 *  zonelist_order:
2327 *  0 = automatic detection of better ordering.
2328 *  1 = order by ([node] distance, -zonetype)
2329 *  2 = order by (-zonetype, [node] distance)
2330 *
2331 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2332 *  the same zonelist. So only NUMA can configure this param.
2333 */
2334#define ZONELIST_ORDER_DEFAULT  0
2335#define ZONELIST_ORDER_NODE     1
2336#define ZONELIST_ORDER_ZONE     2
2337
2338/* zonelist order in the kernel.
2339 * set_zonelist_order() will set this to NODE or ZONE.
2340 */
2341static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2342static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2343
2344
2345#ifdef CONFIG_NUMA
2346/* The value user specified ....changed by config */
2347static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2348/* string for sysctl */
2349#define NUMA_ZONELIST_ORDER_LEN	16
2350char numa_zonelist_order[16] = "default";
2351
2352/*
2353 * interface for configure zonelist ordering.
2354 * command line option "numa_zonelist_order"
2355 *	= "[dD]efault	- default, automatic configuration.
2356 *	= "[nN]ode 	- order by node locality, then by zone within node
2357 *	= "[zZ]one      - order by zone, then by locality within zone
2358 */
2359
2360static int __parse_numa_zonelist_order(char *s)
2361{
2362	if (*s == 'd' || *s == 'D') {
2363		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2364	} else if (*s == 'n' || *s == 'N') {
2365		user_zonelist_order = ZONELIST_ORDER_NODE;
2366	} else if (*s == 'z' || *s == 'Z') {
2367		user_zonelist_order = ZONELIST_ORDER_ZONE;
2368	} else {
2369		printk(KERN_WARNING
2370			"Ignoring invalid numa_zonelist_order value:  "
2371			"%s\n", s);
2372		return -EINVAL;
2373	}
2374	return 0;
2375}
2376
2377static __init int setup_numa_zonelist_order(char *s)
2378{
2379	if (s)
2380		return __parse_numa_zonelist_order(s);
2381	return 0;
2382}
2383early_param("numa_zonelist_order", setup_numa_zonelist_order);
2384
2385/*
2386 * sysctl handler for numa_zonelist_order
2387 */
2388int numa_zonelist_order_handler(ctl_table *table, int write,
2389		void __user *buffer, size_t *length,
2390		loff_t *ppos)
2391{
2392	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2393	int ret;
2394
2395	if (write)
2396		strncpy(saved_string, (char*)table->data,
2397			NUMA_ZONELIST_ORDER_LEN);
2398	ret = proc_dostring(table, write, buffer, length, ppos);
2399	if (ret)
2400		return ret;
2401	if (write) {
2402		int oldval = user_zonelist_order;
2403		if (__parse_numa_zonelist_order((char*)table->data)) {
2404			/*
2405			 * bogus value.  restore saved string
2406			 */
2407			strncpy((char*)table->data, saved_string,
2408				NUMA_ZONELIST_ORDER_LEN);
2409			user_zonelist_order = oldval;
2410		} else if (oldval != user_zonelist_order)
2411			build_all_zonelists();
2412	}
2413	return 0;
2414}
2415
2416
2417#define MAX_NODE_LOAD (nr_online_nodes)
2418static int node_load[MAX_NUMNODES];
2419
2420/**
2421 * find_next_best_node - find the next node that should appear in a given node's fallback list
2422 * @node: node whose fallback list we're appending
2423 * @used_node_mask: nodemask_t of already used nodes
2424 *
2425 * We use a number of factors to determine which is the next node that should
2426 * appear on a given node's fallback list.  The node should not have appeared
2427 * already in @node's fallback list, and it should be the next closest node
2428 * according to the distance array (which contains arbitrary distance values
2429 * from each node to each node in the system), and should also prefer nodes
2430 * with no CPUs, since presumably they'll have very little allocation pressure
2431 * on them otherwise.
2432 * It returns -1 if no node is found.
2433 */
2434static int find_next_best_node(int node, nodemask_t *used_node_mask)
2435{
2436	int n, val;
2437	int min_val = INT_MAX;
2438	int best_node = -1;
2439	const struct cpumask *tmp = cpumask_of_node(0);
2440
2441	/* Use the local node if we haven't already */
2442	if (!node_isset(node, *used_node_mask)) {
2443		node_set(node, *used_node_mask);
2444		return node;
2445	}
2446
2447	for_each_node_state(n, N_HIGH_MEMORY) {
2448
2449		/* Don't want a node to appear more than once */
2450		if (node_isset(n, *used_node_mask))
2451			continue;
2452
2453		/* Use the distance array to find the distance */
2454		val = node_distance(node, n);
2455
2456		/* Penalize nodes under us ("prefer the next node") */
2457		val += (n < node);
2458
2459		/* Give preference to headless and unused nodes */
2460		tmp = cpumask_of_node(n);
2461		if (!cpumask_empty(tmp))
2462			val += PENALTY_FOR_NODE_WITH_CPUS;
2463
2464		/* Slight preference for less loaded node */
2465		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2466		val += node_load[n];
2467
2468		if (val < min_val) {
2469			min_val = val;
2470			best_node = n;
2471		}
2472	}
2473
2474	if (best_node >= 0)
2475		node_set(best_node, *used_node_mask);
2476
2477	return best_node;
2478}
2479
2480
2481/*
2482 * Build zonelists ordered by node and zones within node.
2483 * This results in maximum locality--normal zone overflows into local
2484 * DMA zone, if any--but risks exhausting DMA zone.
2485 */
2486static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2487{
2488	int j;
2489	struct zonelist *zonelist;
2490
2491	zonelist = &pgdat->node_zonelists[0];
2492	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2493		;
2494	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2495							MAX_NR_ZONES - 1);
2496	zonelist->_zonerefs[j].zone = NULL;
2497	zonelist->_zonerefs[j].zone_idx = 0;
2498}
2499
2500/*
2501 * Build gfp_thisnode zonelists
2502 */
2503static void build_thisnode_zonelists(pg_data_t *pgdat)
2504{
2505	int j;
2506	struct zonelist *zonelist;
2507
2508	zonelist = &pgdat->node_zonelists[1];
2509	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2510	zonelist->_zonerefs[j].zone = NULL;
2511	zonelist->_zonerefs[j].zone_idx = 0;
2512}
2513
2514/*
2515 * Build zonelists ordered by zone and nodes within zones.
2516 * This results in conserving DMA zone[s] until all Normal memory is
2517 * exhausted, but results in overflowing to remote node while memory
2518 * may still exist in local DMA zone.
2519 */
2520static int node_order[MAX_NUMNODES];
2521
2522static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2523{
2524	int pos, j, node;
2525	int zone_type;		/* needs to be signed */
2526	struct zone *z;
2527	struct zonelist *zonelist;
2528
2529	zonelist = &pgdat->node_zonelists[0];
2530	pos = 0;
2531	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2532		for (j = 0; j < nr_nodes; j++) {
2533			node = node_order[j];
2534			z = &NODE_DATA(node)->node_zones[zone_type];
2535			if (populated_zone(z)) {
2536				zoneref_set_zone(z,
2537					&zonelist->_zonerefs[pos++]);
2538				check_highest_zone(zone_type);
2539			}
2540		}
2541	}
2542	zonelist->_zonerefs[pos].zone = NULL;
2543	zonelist->_zonerefs[pos].zone_idx = 0;
2544}
2545
2546static int default_zonelist_order(void)
2547{
2548	int nid, zone_type;
2549	unsigned long low_kmem_size,total_size;
2550	struct zone *z;
2551	int average_size;
2552	/*
2553         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2554	 * If they are really small and used heavily, the system can fall
2555	 * into OOM very easily.
2556	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2557	 */
2558	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2559	low_kmem_size = 0;
2560	total_size = 0;
2561	for_each_online_node(nid) {
2562		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2563			z = &NODE_DATA(nid)->node_zones[zone_type];
2564			if (populated_zone(z)) {
2565				if (zone_type < ZONE_NORMAL)
2566					low_kmem_size += z->present_pages;
2567				total_size += z->present_pages;
2568			}
2569		}
2570	}
2571	if (!low_kmem_size ||  /* there are no DMA area. */
2572	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2573		return ZONELIST_ORDER_NODE;
2574	/*
2575	 * look into each node's config.
2576  	 * If there is a node whose DMA/DMA32 memory is very big area on
2577 	 * local memory, NODE_ORDER may be suitable.
2578         */
2579	average_size = total_size /
2580				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2581	for_each_online_node(nid) {
2582		low_kmem_size = 0;
2583		total_size = 0;
2584		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2585			z = &NODE_DATA(nid)->node_zones[zone_type];
2586			if (populated_zone(z)) {
2587				if (zone_type < ZONE_NORMAL)
2588					low_kmem_size += z->present_pages;
2589				total_size += z->present_pages;
2590			}
2591		}
2592		if (low_kmem_size &&
2593		    total_size > average_size && /* ignore small node */
2594		    low_kmem_size > total_size * 70/100)
2595			return ZONELIST_ORDER_NODE;
2596	}
2597	return ZONELIST_ORDER_ZONE;
2598}
2599
2600static void set_zonelist_order(void)
2601{
2602	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2603		current_zonelist_order = default_zonelist_order();
2604	else
2605		current_zonelist_order = user_zonelist_order;
2606}
2607
2608static void build_zonelists(pg_data_t *pgdat)
2609{
2610	int j, node, load;
2611	enum zone_type i;
2612	nodemask_t used_mask;
2613	int local_node, prev_node;
2614	struct zonelist *zonelist;
2615	int order = current_zonelist_order;
2616
2617	/* initialize zonelists */
2618	for (i = 0; i < MAX_ZONELISTS; i++) {
2619		zonelist = pgdat->node_zonelists + i;
2620		zonelist->_zonerefs[0].zone = NULL;
2621		zonelist->_zonerefs[0].zone_idx = 0;
2622	}
2623
2624	/* NUMA-aware ordering of nodes */
2625	local_node = pgdat->node_id;
2626	load = nr_online_nodes;
2627	prev_node = local_node;
2628	nodes_clear(used_mask);
2629
2630	memset(node_order, 0, sizeof(node_order));
2631	j = 0;
2632
2633	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2634		int distance = node_distance(local_node, node);
2635
2636		/*
2637		 * If another node is sufficiently far away then it is better
2638		 * to reclaim pages in a zone before going off node.
2639		 */
2640		if (distance > RECLAIM_DISTANCE)
2641			zone_reclaim_mode = 1;
2642
2643		/*
2644		 * We don't want to pressure a particular node.
2645		 * So adding penalty to the first node in same
2646		 * distance group to make it round-robin.
2647		 */
2648		if (distance != node_distance(local_node, prev_node))
2649			node_load[node] = load;
2650
2651		prev_node = node;
2652		load--;
2653		if (order == ZONELIST_ORDER_NODE)
2654			build_zonelists_in_node_order(pgdat, node);
2655		else
2656			node_order[j++] = node;	/* remember order */
2657	}
2658
2659	if (order == ZONELIST_ORDER_ZONE) {
2660		/* calculate node order -- i.e., DMA last! */
2661		build_zonelists_in_zone_order(pgdat, j);
2662	}
2663
2664	build_thisnode_zonelists(pgdat);
2665}
2666
2667/* Construct the zonelist performance cache - see further mmzone.h */
2668static void build_zonelist_cache(pg_data_t *pgdat)
2669{
2670	struct zonelist *zonelist;
2671	struct zonelist_cache *zlc;
2672	struct zoneref *z;
2673
2674	zonelist = &pgdat->node_zonelists[0];
2675	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2676	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2677	for (z = zonelist->_zonerefs; z->zone; z++)
2678		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2679}
2680
2681
2682#else	/* CONFIG_NUMA */
2683
2684static void set_zonelist_order(void)
2685{
2686	current_zonelist_order = ZONELIST_ORDER_ZONE;
2687}
2688
2689static void build_zonelists(pg_data_t *pgdat)
2690{
2691	int node, local_node;
2692	enum zone_type j;
2693	struct zonelist *zonelist;
2694
2695	local_node = pgdat->node_id;
2696
2697	zonelist = &pgdat->node_zonelists[0];
2698	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2699
2700	/*
2701	 * Now we build the zonelist so that it contains the zones
2702	 * of all the other nodes.
2703	 * We don't want to pressure a particular node, so when
2704	 * building the zones for node N, we make sure that the
2705	 * zones coming right after the local ones are those from
2706	 * node N+1 (modulo N)
2707	 */
2708	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2709		if (!node_online(node))
2710			continue;
2711		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2712							MAX_NR_ZONES - 1);
2713	}
2714	for (node = 0; node < local_node; node++) {
2715		if (!node_online(node))
2716			continue;
2717		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2718							MAX_NR_ZONES - 1);
2719	}
2720
2721	zonelist->_zonerefs[j].zone = NULL;
2722	zonelist->_zonerefs[j].zone_idx = 0;
2723}
2724
2725/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2726static void build_zonelist_cache(pg_data_t *pgdat)
2727{
2728	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2729}
2730
2731#endif	/* CONFIG_NUMA */
2732
2733/* return values int ....just for stop_machine() */
2734static int __build_all_zonelists(void *dummy)
2735{
2736	int nid;
2737
2738#ifdef CONFIG_NUMA
2739	memset(node_load, 0, sizeof(node_load));
2740#endif
2741	for_each_online_node(nid) {
2742		pg_data_t *pgdat = NODE_DATA(nid);
2743
2744		build_zonelists(pgdat);
2745		build_zonelist_cache(pgdat);
2746	}
2747	return 0;
2748}
2749
2750void build_all_zonelists(void)
2751{
2752	set_zonelist_order();
2753
2754	if (system_state == SYSTEM_BOOTING) {
2755		__build_all_zonelists(NULL);
2756		mminit_verify_zonelist();
2757		cpuset_init_current_mems_allowed();
2758	} else {
2759		/* we have to stop all cpus to guarantee there is no user
2760		   of zonelist */
2761		stop_machine(__build_all_zonelists, NULL, NULL);
2762		/* cpuset refresh routine should be here */
2763	}
2764	vm_total_pages = nr_free_pagecache_pages();
2765	/*
2766	 * Disable grouping by mobility if the number of pages in the
2767	 * system is too low to allow the mechanism to work. It would be
2768	 * more accurate, but expensive to check per-zone. This check is
2769	 * made on memory-hotadd so a system can start with mobility
2770	 * disabled and enable it later
2771	 */
2772	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2773		page_group_by_mobility_disabled = 1;
2774	else
2775		page_group_by_mobility_disabled = 0;
2776
2777	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2778		"Total pages: %ld\n",
2779			nr_online_nodes,
2780			zonelist_order_name[current_zonelist_order],
2781			page_group_by_mobility_disabled ? "off" : "on",
2782			vm_total_pages);
2783#ifdef CONFIG_NUMA
2784	printk("Policy zone: %s\n", zone_names[policy_zone]);
2785#endif
2786}
2787
2788/*
2789 * Helper functions to size the waitqueue hash table.
2790 * Essentially these want to choose hash table sizes sufficiently
2791 * large so that collisions trying to wait on pages are rare.
2792 * But in fact, the number of active page waitqueues on typical
2793 * systems is ridiculously low, less than 200. So this is even
2794 * conservative, even though it seems large.
2795 *
2796 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2797 * waitqueues, i.e. the size of the waitq table given the number of pages.
2798 */
2799#define PAGES_PER_WAITQUEUE	256
2800
2801#ifndef CONFIG_MEMORY_HOTPLUG
2802static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2803{
2804	unsigned long size = 1;
2805
2806	pages /= PAGES_PER_WAITQUEUE;
2807
2808	while (size < pages)
2809		size <<= 1;
2810
2811	/*
2812	 * Once we have dozens or even hundreds of threads sleeping
2813	 * on IO we've got bigger problems than wait queue collision.
2814	 * Limit the size of the wait table to a reasonable size.
2815	 */
2816	size = min(size, 4096UL);
2817
2818	return max(size, 4UL);
2819}
2820#else
2821/*
2822 * A zone's size might be changed by hot-add, so it is not possible to determine
2823 * a suitable size for its wait_table.  So we use the maximum size now.
2824 *
2825 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2826 *
2827 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2828 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2829 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2830 *
2831 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2832 * or more by the traditional way. (See above).  It equals:
2833 *
2834 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2835 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2836 *    powerpc (64K page size)             : =  (32G +16M)byte.
2837 */
2838static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2839{
2840	return 4096UL;
2841}
2842#endif
2843
2844/*
2845 * This is an integer logarithm so that shifts can be used later
2846 * to extract the more random high bits from the multiplicative
2847 * hash function before the remainder is taken.
2848 */
2849static inline unsigned long wait_table_bits(unsigned long size)
2850{
2851	return ffz(~size);
2852}
2853
2854#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2855
2856/*
2857 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2858 * of blocks reserved is based on min_wmark_pages(zone). The memory within
2859 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2860 * higher will lead to a bigger reserve which will get freed as contiguous
2861 * blocks as reclaim kicks in
2862 */
2863static void setup_zone_migrate_reserve(struct zone *zone)
2864{
2865	unsigned long start_pfn, pfn, end_pfn;
2866	struct page *page;
2867	unsigned long block_migratetype;
2868	int reserve;
2869
2870	/* Get the start pfn, end pfn and the number of blocks to reserve */
2871	start_pfn = zone->zone_start_pfn;
2872	end_pfn = start_pfn + zone->spanned_pages;
2873	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2874							pageblock_order;
2875
2876	/*
2877	 * Reserve blocks are generally in place to help high-order atomic
2878	 * allocations that are short-lived. A min_free_kbytes value that
2879	 * would result in more than 2 reserve blocks for atomic allocations
2880	 * is assumed to be in place to help anti-fragmentation for the
2881	 * future allocation of hugepages at runtime.
2882	 */
2883	reserve = min(2, reserve);
2884
2885	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2886		if (!pfn_valid(pfn))
2887			continue;
2888		page = pfn_to_page(pfn);
2889
2890		/* Watch out for overlapping nodes */
2891		if (page_to_nid(page) != zone_to_nid(zone))
2892			continue;
2893
2894		/* Blocks with reserved pages will never free, skip them. */
2895		if (PageReserved(page))
2896			continue;
2897
2898		block_migratetype = get_pageblock_migratetype(page);
2899
2900		/* If this block is reserved, account for it */
2901		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2902			reserve--;
2903			continue;
2904		}
2905
2906		/* Suitable for reserving if this block is movable */
2907		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2908			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2909			move_freepages_block(zone, page, MIGRATE_RESERVE);
2910			reserve--;
2911			continue;
2912		}
2913
2914		/*
2915		 * If the reserve is met and this is a previous reserved block,
2916		 * take it back
2917		 */
2918		if (block_migratetype == MIGRATE_RESERVE) {
2919			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2920			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2921		}
2922	}
2923}
2924
2925/*
2926 * Initially all pages are reserved - free ones are freed
2927 * up by free_all_bootmem() once the early boot process is
2928 * done. Non-atomic initialization, single-pass.
2929 */
2930void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2931		unsigned long start_pfn, enum memmap_context context)
2932{
2933	struct page *page;
2934	unsigned long end_pfn = start_pfn + size;
2935	unsigned long pfn;
2936	struct zone *z;
2937
2938	if (highest_memmap_pfn < end_pfn - 1)
2939		highest_memmap_pfn = end_pfn - 1;
2940
2941	z = &NODE_DATA(nid)->node_zones[zone];
2942	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2943		/*
2944		 * There can be holes in boot-time mem_map[]s
2945		 * handed to this function.  They do not
2946		 * exist on hotplugged memory.
2947		 */
2948		if (context == MEMMAP_EARLY) {
2949			if (!early_pfn_valid(pfn))
2950				continue;
2951			if (!early_pfn_in_nid(pfn, nid))
2952				continue;
2953		}
2954		page = pfn_to_page(pfn);
2955		set_page_links(page, zone, nid, pfn);
2956		mminit_verify_page_links(page, zone, nid, pfn);
2957		init_page_count(page);
2958		reset_page_mapcount(page);
2959		SetPageReserved(page);
2960		/*
2961		 * Mark the block movable so that blocks are reserved for
2962		 * movable at startup. This will force kernel allocations
2963		 * to reserve their blocks rather than leaking throughout
2964		 * the address space during boot when many long-lived
2965		 * kernel allocations are made. Later some blocks near
2966		 * the start are marked MIGRATE_RESERVE by
2967		 * setup_zone_migrate_reserve()
2968		 *
2969		 * bitmap is created for zone's valid pfn range. but memmap
2970		 * can be created for invalid pages (for alignment)
2971		 * check here not to call set_pageblock_migratetype() against
2972		 * pfn out of zone.
2973		 */
2974		if ((z->zone_start_pfn <= pfn)
2975		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2976		    && !(pfn & (pageblock_nr_pages - 1)))
2977			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2978
2979		INIT_LIST_HEAD(&page->lru);
2980#ifdef WANT_PAGE_VIRTUAL
2981		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2982		if (!is_highmem_idx(zone))
2983			set_page_address(page, __va(pfn << PAGE_SHIFT));
2984#endif
2985	}
2986}
2987
2988static void __meminit zone_init_free_lists(struct zone *zone)
2989{
2990	int order, t;
2991	for_each_migratetype_order(order, t) {
2992		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2993		zone->free_area[order].nr_free = 0;
2994	}
2995}
2996
2997#ifndef __HAVE_ARCH_MEMMAP_INIT
2998#define memmap_init(size, nid, zone, start_pfn) \
2999	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3000#endif
3001
3002static int zone_batchsize(struct zone *zone)
3003{
3004#ifdef CONFIG_MMU
3005	int batch;
3006
3007	/*
3008	 * The per-cpu-pages pools are set to around 1000th of the
3009	 * size of the zone.  But no more than 1/2 of a meg.
3010	 *
3011	 * OK, so we don't know how big the cache is.  So guess.
3012	 */
3013	batch = zone->present_pages / 1024;
3014	if (batch * PAGE_SIZE > 512 * 1024)
3015		batch = (512 * 1024) / PAGE_SIZE;
3016	batch /= 4;		/* We effectively *= 4 below */
3017	if (batch < 1)
3018		batch = 1;
3019
3020	/*
3021	 * Clamp the batch to a 2^n - 1 value. Having a power
3022	 * of 2 value was found to be more likely to have
3023	 * suboptimal cache aliasing properties in some cases.
3024	 *
3025	 * For example if 2 tasks are alternately allocating
3026	 * batches of pages, one task can end up with a lot
3027	 * of pages of one half of the possible page colors
3028	 * and the other with pages of the other colors.
3029	 */
3030	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3031
3032	return batch;
3033
3034#else
3035	/* The deferral and batching of frees should be suppressed under NOMMU
3036	 * conditions.
3037	 *
3038	 * The problem is that NOMMU needs to be able to allocate large chunks
3039	 * of contiguous memory as there's no hardware page translation to
3040	 * assemble apparent contiguous memory from discontiguous pages.
3041	 *
3042	 * Queueing large contiguous runs of pages for batching, however,
3043	 * causes the pages to actually be freed in smaller chunks.  As there
3044	 * can be a significant delay between the individual batches being
3045	 * recycled, this leads to the once large chunks of space being
3046	 * fragmented and becoming unavailable for high-order allocations.
3047	 */
3048	return 0;
3049#endif
3050}
3051
3052static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3053{
3054	struct per_cpu_pages *pcp;
3055	int migratetype;
3056
3057	memset(p, 0, sizeof(*p));
3058
3059	pcp = &p->pcp;
3060	pcp->count = 0;
3061	pcp->high = 6 * batch;
3062	pcp->batch = max(1UL, 1 * batch);
3063	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3064		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3065}
3066
3067/*
3068 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3069 * to the value high for the pageset p.
3070 */
3071
3072static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3073				unsigned long high)
3074{
3075	struct per_cpu_pages *pcp;
3076
3077	pcp = &p->pcp;
3078	pcp->high = high;
3079	pcp->batch = max(1UL, high/4);
3080	if ((high/4) > (PAGE_SHIFT * 8))
3081		pcp->batch = PAGE_SHIFT * 8;
3082}
3083
3084
3085#ifdef CONFIG_NUMA
3086/*
3087 * Boot pageset table. One per cpu which is going to be used for all
3088 * zones and all nodes. The parameters will be set in such a way
3089 * that an item put on a list will immediately be handed over to
3090 * the buddy list. This is safe since pageset manipulation is done
3091 * with interrupts disabled.
3092 *
3093 * Some NUMA counter updates may also be caught by the boot pagesets.
3094 *
3095 * The boot_pagesets must be kept even after bootup is complete for
3096 * unused processors and/or zones. They do play a role for bootstrapping
3097 * hotplugged processors.
3098 *
3099 * zoneinfo_show() and maybe other functions do
3100 * not check if the processor is online before following the pageset pointer.
3101 * Other parts of the kernel may not check if the zone is available.
3102 */
3103static struct per_cpu_pageset boot_pageset[NR_CPUS];
3104
3105/*
3106 * Dynamically allocate memory for the
3107 * per cpu pageset array in struct zone.
3108 */
3109static int __cpuinit process_zones(int cpu)
3110{
3111	struct zone *zone, *dzone;
3112	int node = cpu_to_node(cpu);
3113
3114	node_set_state(node, N_CPU);	/* this node has a cpu */
3115
3116	for_each_populated_zone(zone) {
3117		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
3118					 GFP_KERNEL, node);
3119		if (!zone_pcp(zone, cpu))
3120			goto bad;
3121
3122		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
3123
3124		if (percpu_pagelist_fraction)
3125			setup_pagelist_highmark(zone_pcp(zone, cpu),
3126			 	(zone->present_pages / percpu_pagelist_fraction));
3127	}
3128
3129	return 0;
3130bad:
3131	for_each_zone(dzone) {
3132		if (!populated_zone(dzone))
3133			continue;
3134		if (dzone == zone)
3135			break;
3136		kfree(zone_pcp(dzone, cpu));
3137		zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3138	}
3139	return -ENOMEM;
3140}
3141
3142static inline void free_zone_pagesets(int cpu)
3143{
3144	struct zone *zone;
3145
3146	for_each_zone(zone) {
3147		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
3148
3149		/* Free per_cpu_pageset if it is slab allocated */
3150		if (pset != &boot_pageset[cpu])
3151			kfree(pset);
3152		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3153	}
3154}
3155
3156static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3157		unsigned long action,
3158		void *hcpu)
3159{
3160	int cpu = (long)hcpu;
3161	int ret = NOTIFY_OK;
3162
3163	switch (action) {
3164	case CPU_UP_PREPARE:
3165	case CPU_UP_PREPARE_FROZEN:
3166		if (process_zones(cpu))
3167			ret = NOTIFY_BAD;
3168		break;
3169	case CPU_UP_CANCELED:
3170	case CPU_UP_CANCELED_FROZEN:
3171	case CPU_DEAD:
3172	case CPU_DEAD_FROZEN:
3173		free_zone_pagesets(cpu);
3174		break;
3175	default:
3176		break;
3177	}
3178	return ret;
3179}
3180
3181static struct notifier_block __cpuinitdata pageset_notifier =
3182	{ &pageset_cpuup_callback, NULL, 0 };
3183
3184void __init setup_per_cpu_pageset(void)
3185{
3186	int err;
3187
3188	/* Initialize per_cpu_pageset for cpu 0.
3189	 * A cpuup callback will do this for every cpu
3190	 * as it comes online
3191	 */
3192	err = process_zones(smp_processor_id());
3193	BUG_ON(err);
3194	register_cpu_notifier(&pageset_notifier);
3195}
3196
3197#endif
3198
3199static noinline __init_refok
3200int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3201{
3202	int i;
3203	struct pglist_data *pgdat = zone->zone_pgdat;
3204	size_t alloc_size;
3205
3206	/*
3207	 * The per-page waitqueue mechanism uses hashed waitqueues
3208	 * per zone.
3209	 */
3210	zone->wait_table_hash_nr_entries =
3211		 wait_table_hash_nr_entries(zone_size_pages);
3212	zone->wait_table_bits =
3213		wait_table_bits(zone->wait_table_hash_nr_entries);
3214	alloc_size = zone->wait_table_hash_nr_entries
3215					* sizeof(wait_queue_head_t);
3216
3217	if (!slab_is_available()) {
3218		zone->wait_table = (wait_queue_head_t *)
3219			alloc_bootmem_node(pgdat, alloc_size);
3220	} else {
3221		/*
3222		 * This case means that a zone whose size was 0 gets new memory
3223		 * via memory hot-add.
3224		 * But it may be the case that a new node was hot-added.  In
3225		 * this case vmalloc() will not be able to use this new node's
3226		 * memory - this wait_table must be initialized to use this new
3227		 * node itself as well.
3228		 * To use this new node's memory, further consideration will be
3229		 * necessary.
3230		 */
3231		zone->wait_table = vmalloc(alloc_size);
3232	}
3233	if (!zone->wait_table)
3234		return -ENOMEM;
3235
3236	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3237		init_waitqueue_head(zone->wait_table + i);
3238
3239	return 0;
3240}
3241
3242static int __zone_pcp_update(void *data)
3243{
3244	struct zone *zone = data;
3245	int cpu;
3246	unsigned long batch = zone_batchsize(zone), flags;
3247
3248	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3249		struct per_cpu_pageset *pset;
3250		struct per_cpu_pages *pcp;
3251
3252		pset = zone_pcp(zone, cpu);
3253		pcp = &pset->pcp;
3254
3255		local_irq_save(flags);
3256		free_pcppages_bulk(zone, pcp->count, pcp);
3257		setup_pageset(pset, batch);
3258		local_irq_restore(flags);
3259	}
3260	return 0;
3261}
3262
3263void zone_pcp_update(struct zone *zone)
3264{
3265	stop_machine(__zone_pcp_update, zone, NULL);
3266}
3267
3268static __meminit void zone_pcp_init(struct zone *zone)
3269{
3270	int cpu;
3271	unsigned long batch = zone_batchsize(zone);
3272
3273	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3274#ifdef CONFIG_NUMA
3275		/* Early boot. Slab allocator not functional yet */
3276		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3277		setup_pageset(&boot_pageset[cpu],0);
3278#else
3279		setup_pageset(zone_pcp(zone,cpu), batch);
3280#endif
3281	}
3282	if (zone->present_pages)
3283		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
3284			zone->name, zone->present_pages, batch);
3285}
3286
3287__meminit int init_currently_empty_zone(struct zone *zone,
3288					unsigned long zone_start_pfn,
3289					unsigned long size,
3290					enum memmap_context context)
3291{
3292	struct pglist_data *pgdat = zone->zone_pgdat;
3293	int ret;
3294	ret = zone_wait_table_init(zone, size);
3295	if (ret)
3296		return ret;
3297	pgdat->nr_zones = zone_idx(zone) + 1;
3298
3299	zone->zone_start_pfn = zone_start_pfn;
3300
3301	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3302			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3303			pgdat->node_id,
3304			(unsigned long)zone_idx(zone),
3305			zone_start_pfn, (zone_start_pfn + size));
3306
3307	zone_init_free_lists(zone);
3308
3309	return 0;
3310}
3311
3312#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3313/*
3314 * Basic iterator support. Return the first range of PFNs for a node
3315 * Note: nid == MAX_NUMNODES returns first region regardless of node
3316 */
3317static int __meminit first_active_region_index_in_nid(int nid)
3318{
3319	int i;
3320
3321	for (i = 0; i < nr_nodemap_entries; i++)
3322		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3323			return i;
3324
3325	return -1;
3326}
3327
3328/*
3329 * Basic iterator support. Return the next active range of PFNs for a node
3330 * Note: nid == MAX_NUMNODES returns next region regardless of node
3331 */
3332static int __meminit next_active_region_index_in_nid(int index, int nid)
3333{
3334	for (index = index + 1; index < nr_nodemap_entries; index++)
3335		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3336			return index;
3337
3338	return -1;
3339}
3340
3341#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3342/*
3343 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3344 * Architectures may implement their own version but if add_active_range()
3345 * was used and there are no special requirements, this is a convenient
3346 * alternative
3347 */
3348int __meminit __early_pfn_to_nid(unsigned long pfn)
3349{
3350	int i;
3351
3352	for (i = 0; i < nr_nodemap_entries; i++) {
3353		unsigned long start_pfn = early_node_map[i].start_pfn;
3354		unsigned long end_pfn = early_node_map[i].end_pfn;
3355
3356		if (start_pfn <= pfn && pfn < end_pfn)
3357			return early_node_map[i].nid;
3358	}
3359	/* This is a memory hole */
3360	return -1;
3361}
3362#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3363
3364int __meminit early_pfn_to_nid(unsigned long pfn)
3365{
3366	int nid;
3367
3368	nid = __early_pfn_to_nid(pfn);
3369	if (nid >= 0)
3370		return nid;
3371	/* just returns 0 */
3372	return 0;
3373}
3374
3375#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3376bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3377{
3378	int nid;
3379
3380	nid = __early_pfn_to_nid(pfn);
3381	if (nid >= 0 && nid != node)
3382		return false;
3383	return true;
3384}
3385#endif
3386
3387/* Basic iterator support to walk early_node_map[] */
3388#define for_each_active_range_index_in_nid(i, nid) \
3389	for (i = first_active_region_index_in_nid(nid); i != -1; \
3390				i = next_active_region_index_in_nid(i, nid))
3391
3392/**
3393 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3394 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3395 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3396 *
3397 * If an architecture guarantees that all ranges registered with
3398 * add_active_ranges() contain no holes and may be freed, this
3399 * this function may be used instead of calling free_bootmem() manually.
3400 */
3401void __init free_bootmem_with_active_regions(int nid,
3402						unsigned long max_low_pfn)
3403{
3404	int i;
3405
3406	for_each_active_range_index_in_nid(i, nid) {
3407		unsigned long size_pages = 0;
3408		unsigned long end_pfn = early_node_map[i].end_pfn;
3409
3410		if (early_node_map[i].start_pfn >= max_low_pfn)
3411			continue;
3412
3413		if (end_pfn > max_low_pfn)
3414			end_pfn = max_low_pfn;
3415
3416		size_pages = end_pfn - early_node_map[i].start_pfn;
3417		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3418				PFN_PHYS(early_node_map[i].start_pfn),
3419				size_pages << PAGE_SHIFT);
3420	}
3421}
3422
3423void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3424{
3425	int i;
3426	int ret;
3427
3428	for_each_active_range_index_in_nid(i, nid) {
3429		ret = work_fn(early_node_map[i].start_pfn,
3430			      early_node_map[i].end_pfn, data);
3431		if (ret)
3432			break;
3433	}
3434}
3435/**
3436 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3437 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3438 *
3439 * If an architecture guarantees that all ranges registered with
3440 * add_active_ranges() contain no holes and may be freed, this
3441 * function may be used instead of calling memory_present() manually.
3442 */
3443void __init sparse_memory_present_with_active_regions(int nid)
3444{
3445	int i;
3446
3447	for_each_active_range_index_in_nid(i, nid)
3448		memory_present(early_node_map[i].nid,
3449				early_node_map[i].start_pfn,
3450				early_node_map[i].end_pfn);
3451}
3452
3453/**
3454 * get_pfn_range_for_nid - Return the start and end page frames for a node
3455 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3456 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3457 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3458 *
3459 * It returns the start and end page frame of a node based on information
3460 * provided by an arch calling add_active_range(). If called for a node
3461 * with no available memory, a warning is printed and the start and end
3462 * PFNs will be 0.
3463 */
3464void __meminit get_pfn_range_for_nid(unsigned int nid,
3465			unsigned long *start_pfn, unsigned long *end_pfn)
3466{
3467	int i;
3468	*start_pfn = -1UL;
3469	*end_pfn = 0;
3470
3471	for_each_active_range_index_in_nid(i, nid) {
3472		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3473		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3474	}
3475
3476	if (*start_pfn == -1UL)
3477		*start_pfn = 0;
3478}
3479
3480/*
3481 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3482 * assumption is made that zones within a node are ordered in monotonic
3483 * increasing memory addresses so that the "highest" populated zone is used
3484 */
3485static void __init find_usable_zone_for_movable(void)
3486{
3487	int zone_index;
3488	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3489		if (zone_index == ZONE_MOVABLE)
3490			continue;
3491
3492		if (arch_zone_highest_possible_pfn[zone_index] >
3493				arch_zone_lowest_possible_pfn[zone_index])
3494			break;
3495	}
3496
3497	VM_BUG_ON(zone_index == -1);
3498	movable_zone = zone_index;
3499}
3500
3501/*
3502 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3503 * because it is sized independant of architecture. Unlike the other zones,
3504 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3505 * in each node depending on the size of each node and how evenly kernelcore
3506 * is distributed. This helper function adjusts the zone ranges
3507 * provided by the architecture for a given node by using the end of the
3508 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3509 * zones within a node are in order of monotonic increases memory addresses
3510 */
3511static void __meminit adjust_zone_range_for_zone_movable(int nid,
3512					unsigned long zone_type,
3513					unsigned long node_start_pfn,
3514					unsigned long node_end_pfn,
3515					unsigned long *zone_start_pfn,
3516					unsigned long *zone_end_pfn)
3517{
3518	/* Only adjust if ZONE_MOVABLE is on this node */
3519	if (zone_movable_pfn[nid]) {
3520		/* Size ZONE_MOVABLE */
3521		if (zone_type == ZONE_MOVABLE) {
3522			*zone_start_pfn = zone_movable_pfn[nid];
3523			*zone_end_pfn = min(node_end_pfn,
3524				arch_zone_highest_possible_pfn[movable_zone]);
3525
3526		/* Adjust for ZONE_MOVABLE starting within this range */
3527		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3528				*zone_end_pfn > zone_movable_pfn[nid]) {
3529			*zone_end_pfn = zone_movable_pfn[nid];
3530
3531		/* Check if this whole range is within ZONE_MOVABLE */
3532		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3533			*zone_start_pfn = *zone_end_pfn;
3534	}
3535}
3536
3537/*
3538 * Return the number of pages a zone spans in a node, including holes
3539 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3540 */
3541static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3542					unsigned long zone_type,
3543					unsigned long *ignored)
3544{
3545	unsigned long node_start_pfn, node_end_pfn;
3546	unsigned long zone_start_pfn, zone_end_pfn;
3547
3548	/* Get the start and end of the node and zone */
3549	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3550	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3551	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3552	adjust_zone_range_for_zone_movable(nid, zone_type,
3553				node_start_pfn, node_end_pfn,
3554				&zone_start_pfn, &zone_end_pfn);
3555
3556	/* Check that this node has pages within the zone's required range */
3557	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3558		return 0;
3559
3560	/* Move the zone boundaries inside the node if necessary */
3561	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3562	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3563
3564	/* Return the spanned pages */
3565	return zone_end_pfn - zone_start_pfn;
3566}
3567
3568/*
3569 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3570 * then all holes in the requested range will be accounted for.
3571 */
3572static unsigned long __meminit __absent_pages_in_range(int nid,
3573				unsigned long range_start_pfn,
3574				unsigned long range_end_pfn)
3575{
3576	int i = 0;
3577	unsigned long prev_end_pfn = 0, hole_pages = 0;
3578	unsigned long start_pfn;
3579
3580	/* Find the end_pfn of the first active range of pfns in the node */
3581	i = first_active_region_index_in_nid(nid);
3582	if (i == -1)
3583		return 0;
3584
3585	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3586
3587	/* Account for ranges before physical memory on this node */
3588	if (early_node_map[i].start_pfn > range_start_pfn)
3589		hole_pages = prev_end_pfn - range_start_pfn;
3590
3591	/* Find all holes for the zone within the node */
3592	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3593
3594		/* No need to continue if prev_end_pfn is outside the zone */
3595		if (prev_end_pfn >= range_end_pfn)
3596			break;
3597
3598		/* Make sure the end of the zone is not within the hole */
3599		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3600		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3601
3602		/* Update the hole size cound and move on */
3603		if (start_pfn > range_start_pfn) {
3604			BUG_ON(prev_end_pfn > start_pfn);
3605			hole_pages += start_pfn - prev_end_pfn;
3606		}
3607		prev_end_pfn = early_node_map[i].end_pfn;
3608	}
3609
3610	/* Account for ranges past physical memory on this node */
3611	if (range_end_pfn > prev_end_pfn)
3612		hole_pages += range_end_pfn -
3613				max(range_start_pfn, prev_end_pfn);
3614
3615	return hole_pages;
3616}
3617
3618/**
3619 * absent_pages_in_range - Return number of page frames in holes within a range
3620 * @start_pfn: The start PFN to start searching for holes
3621 * @end_pfn: The end PFN to stop searching for holes
3622 *
3623 * It returns the number of pages frames in memory holes within a range.
3624 */
3625unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3626							unsigned long end_pfn)
3627{
3628	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3629}
3630
3631/* Return the number of page frames in holes in a zone on a node */
3632static unsigned long __meminit zone_absent_pages_in_node(int nid,
3633					unsigned long zone_type,
3634					unsigned long *ignored)
3635{
3636	unsigned long node_start_pfn, node_end_pfn;
3637	unsigned long zone_start_pfn, zone_end_pfn;
3638
3639	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3640	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3641							node_start_pfn);
3642	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3643							node_end_pfn);
3644
3645	adjust_zone_range_for_zone_movable(nid, zone_type,
3646			node_start_pfn, node_end_pfn,
3647			&zone_start_pfn, &zone_end_pfn);
3648	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3649}
3650
3651#else
3652static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3653					unsigned long zone_type,
3654					unsigned long *zones_size)
3655{
3656	return zones_size[zone_type];
3657}
3658
3659static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3660						unsigned long zone_type,
3661						unsigned long *zholes_size)
3662{
3663	if (!zholes_size)
3664		return 0;
3665
3666	return zholes_size[zone_type];
3667}
3668
3669#endif
3670
3671static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3672		unsigned long *zones_size, unsigned long *zholes_size)
3673{
3674	unsigned long realtotalpages, totalpages = 0;
3675	enum zone_type i;
3676
3677	for (i = 0; i < MAX_NR_ZONES; i++)
3678		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3679								zones_size);
3680	pgdat->node_spanned_pages = totalpages;
3681
3682	realtotalpages = totalpages;
3683	for (i = 0; i < MAX_NR_ZONES; i++)
3684		realtotalpages -=
3685			zone_absent_pages_in_node(pgdat->node_id, i,
3686								zholes_size);
3687	pgdat->node_present_pages = realtotalpages;
3688	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3689							realtotalpages);
3690}
3691
3692#ifndef CONFIG_SPARSEMEM
3693/*
3694 * Calculate the size of the zone->blockflags rounded to an unsigned long
3695 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3696 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3697 * round what is now in bits to nearest long in bits, then return it in
3698 * bytes.
3699 */
3700static unsigned long __init usemap_size(unsigned long zonesize)
3701{
3702	unsigned long usemapsize;
3703
3704	usemapsize = roundup(zonesize, pageblock_nr_pages);
3705	usemapsize = usemapsize >> pageblock_order;
3706	usemapsize *= NR_PAGEBLOCK_BITS;
3707	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3708
3709	return usemapsize / 8;
3710}
3711
3712static void __init setup_usemap(struct pglist_data *pgdat,
3713				struct zone *zone, unsigned long zonesize)
3714{
3715	unsigned long usemapsize = usemap_size(zonesize);
3716	zone->pageblock_flags = NULL;
3717	if (usemapsize)
3718		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3719}
3720#else
3721static void inline setup_usemap(struct pglist_data *pgdat,
3722				struct zone *zone, unsigned long zonesize) {}
3723#endif /* CONFIG_SPARSEMEM */
3724
3725#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3726
3727/* Return a sensible default order for the pageblock size. */
3728static inline int pageblock_default_order(void)
3729{
3730	if (HPAGE_SHIFT > PAGE_SHIFT)
3731		return HUGETLB_PAGE_ORDER;
3732
3733	return MAX_ORDER-1;
3734}
3735
3736/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3737static inline void __init set_pageblock_order(unsigned int order)
3738{
3739	/* Check that pageblock_nr_pages has not already been setup */
3740	if (pageblock_order)
3741		return;
3742
3743	/*
3744	 * Assume the largest contiguous order of interest is a huge page.
3745	 * This value may be variable depending on boot parameters on IA64
3746	 */
3747	pageblock_order = order;
3748}
3749#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3750
3751/*
3752 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3753 * and pageblock_default_order() are unused as pageblock_order is set
3754 * at compile-time. See include/linux/pageblock-flags.h for the values of
3755 * pageblock_order based on the kernel config
3756 */
3757static inline int pageblock_default_order(unsigned int order)
3758{
3759	return MAX_ORDER-1;
3760}
3761#define set_pageblock_order(x)	do {} while (0)
3762
3763#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3764
3765/*
3766 * Set up the zone data structures:
3767 *   - mark all pages reserved
3768 *   - mark all memory queues empty
3769 *   - clear the memory bitmaps
3770 */
3771static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3772		unsigned long *zones_size, unsigned long *zholes_size)
3773{
3774	enum zone_type j;
3775	int nid = pgdat->node_id;
3776	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3777	int ret;
3778
3779	pgdat_resize_init(pgdat);
3780	pgdat->nr_zones = 0;
3781	init_waitqueue_head(&pgdat->kswapd_wait);
3782	pgdat->kswapd_max_order = 0;
3783	pgdat_page_cgroup_init(pgdat);
3784
3785	for (j = 0; j < MAX_NR_ZONES; j++) {
3786		struct zone *zone = pgdat->node_zones + j;
3787		unsigned long size, realsize, memmap_pages;
3788		enum lru_list l;
3789
3790		size = zone_spanned_pages_in_node(nid, j, zones_size);
3791		realsize = size - zone_absent_pages_in_node(nid, j,
3792								zholes_size);
3793
3794		/*
3795		 * Adjust realsize so that it accounts for how much memory
3796		 * is used by this zone for memmap. This affects the watermark
3797		 * and per-cpu initialisations
3798		 */
3799		memmap_pages =
3800			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3801		if (realsize >= memmap_pages) {
3802			realsize -= memmap_pages;
3803			if (memmap_pages)
3804				printk(KERN_DEBUG
3805				       "  %s zone: %lu pages used for memmap\n",
3806				       zone_names[j], memmap_pages);
3807		} else
3808			printk(KERN_WARNING
3809				"  %s zone: %lu pages exceeds realsize %lu\n",
3810				zone_names[j], memmap_pages, realsize);
3811
3812		/* Account for reserved pages */
3813		if (j == 0 && realsize > dma_reserve) {
3814			realsize -= dma_reserve;
3815			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3816					zone_names[0], dma_reserve);
3817		}
3818
3819		if (!is_highmem_idx(j))
3820			nr_kernel_pages += realsize;
3821		nr_all_pages += realsize;
3822
3823		zone->spanned_pages = size;
3824		zone->present_pages = realsize;
3825#ifdef CONFIG_NUMA
3826		zone->node = nid;
3827		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3828						/ 100;
3829		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3830#endif
3831		zone->name = zone_names[j];
3832		spin_lock_init(&zone->lock);
3833		spin_lock_init(&zone->lru_lock);
3834		zone_seqlock_init(zone);
3835		zone->zone_pgdat = pgdat;
3836
3837		zone->prev_priority = DEF_PRIORITY;
3838
3839		zone_pcp_init(zone);
3840		for_each_lru(l) {
3841			INIT_LIST_HEAD(&zone->lru[l].list);
3842			zone->reclaim_stat.nr_saved_scan[l] = 0;
3843		}
3844		zone->reclaim_stat.recent_rotated[0] = 0;
3845		zone->reclaim_stat.recent_rotated[1] = 0;
3846		zone->reclaim_stat.recent_scanned[0] = 0;
3847		zone->reclaim_stat.recent_scanned[1] = 0;
3848		zap_zone_vm_stats(zone);
3849		zone->flags = 0;
3850		if (!size)
3851			continue;
3852
3853		set_pageblock_order(pageblock_default_order());
3854		setup_usemap(pgdat, zone, size);
3855		ret = init_currently_empty_zone(zone, zone_start_pfn,
3856						size, MEMMAP_EARLY);
3857		BUG_ON(ret);
3858		memmap_init(size, nid, j, zone_start_pfn);
3859		zone_start_pfn += size;
3860	}
3861}
3862
3863static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3864{
3865	/* Skip empty nodes */
3866	if (!pgdat->node_spanned_pages)
3867		return;
3868
3869#ifdef CONFIG_FLAT_NODE_MEM_MAP
3870	/* ia64 gets its own node_mem_map, before this, without bootmem */
3871	if (!pgdat->node_mem_map) {
3872		unsigned long size, start, end;
3873		struct page *map;
3874
3875		/*
3876		 * The zone's endpoints aren't required to be MAX_ORDER
3877		 * aligned but the node_mem_map endpoints must be in order
3878		 * for the buddy allocator to function correctly.
3879		 */
3880		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3881		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3882		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3883		size =  (end - start) * sizeof(struct page);
3884		map = alloc_remap(pgdat->node_id, size);
3885		if (!map)
3886			map = alloc_bootmem_node(pgdat, size);
3887		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3888	}
3889#ifndef CONFIG_NEED_MULTIPLE_NODES
3890	/*
3891	 * With no DISCONTIG, the global mem_map is just set as node 0's
3892	 */
3893	if (pgdat == NODE_DATA(0)) {
3894		mem_map = NODE_DATA(0)->node_mem_map;
3895#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3896		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3897			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3898#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3899	}
3900#endif
3901#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3902}
3903
3904void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3905		unsigned long node_start_pfn, unsigned long *zholes_size)
3906{
3907	pg_data_t *pgdat = NODE_DATA(nid);
3908
3909	pgdat->node_id = nid;
3910	pgdat->node_start_pfn = node_start_pfn;
3911	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3912
3913	alloc_node_mem_map(pgdat);
3914#ifdef CONFIG_FLAT_NODE_MEM_MAP
3915	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3916		nid, (unsigned long)pgdat,
3917		(unsigned long)pgdat->node_mem_map);
3918#endif
3919
3920	free_area_init_core(pgdat, zones_size, zholes_size);
3921}
3922
3923#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3924
3925#if MAX_NUMNODES > 1
3926/*
3927 * Figure out the number of possible node ids.
3928 */
3929static void __init setup_nr_node_ids(void)
3930{
3931	unsigned int node;
3932	unsigned int highest = 0;
3933
3934	for_each_node_mask(node, node_possible_map)
3935		highest = node;
3936	nr_node_ids = highest + 1;
3937}
3938#else
3939static inline void setup_nr_node_ids(void)
3940{
3941}
3942#endif
3943
3944/**
3945 * add_active_range - Register a range of PFNs backed by physical memory
3946 * @nid: The node ID the range resides on
3947 * @start_pfn: The start PFN of the available physical memory
3948 * @end_pfn: The end PFN of the available physical memory
3949 *
3950 * These ranges are stored in an early_node_map[] and later used by
3951 * free_area_init_nodes() to calculate zone sizes and holes. If the
3952 * range spans a memory hole, it is up to the architecture to ensure
3953 * the memory is not freed by the bootmem allocator. If possible
3954 * the range being registered will be merged with existing ranges.
3955 */
3956void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3957						unsigned long end_pfn)
3958{
3959	int i;
3960
3961	mminit_dprintk(MMINIT_TRACE, "memory_register",
3962			"Entering add_active_range(%d, %#lx, %#lx) "
3963			"%d entries of %d used\n",
3964			nid, start_pfn, end_pfn,
3965			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3966
3967	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3968
3969	/* Merge with existing active regions if possible */
3970	for (i = 0; i < nr_nodemap_entries; i++) {
3971		if (early_node_map[i].nid != nid)
3972			continue;
3973
3974		/* Skip if an existing region covers this new one */
3975		if (start_pfn >= early_node_map[i].start_pfn &&
3976				end_pfn <= early_node_map[i].end_pfn)
3977			return;
3978
3979		/* Merge forward if suitable */
3980		if (start_pfn <= early_node_map[i].end_pfn &&
3981				end_pfn > early_node_map[i].end_pfn) {
3982			early_node_map[i].end_pfn = end_pfn;
3983			return;
3984		}
3985
3986		/* Merge backward if suitable */
3987		if (start_pfn < early_node_map[i].end_pfn &&
3988				end_pfn >= early_node_map[i].start_pfn) {
3989			early_node_map[i].start_pfn = start_pfn;
3990			return;
3991		}
3992	}
3993
3994	/* Check that early_node_map is large enough */
3995	if (i >= MAX_ACTIVE_REGIONS) {
3996		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3997							MAX_ACTIVE_REGIONS);
3998		return;
3999	}
4000
4001	early_node_map[i].nid = nid;
4002	early_node_map[i].start_pfn = start_pfn;
4003	early_node_map[i].end_pfn = end_pfn;
4004	nr_nodemap_entries = i + 1;
4005}
4006
4007/**
4008 * remove_active_range - Shrink an existing registered range of PFNs
4009 * @nid: The node id the range is on that should be shrunk
4010 * @start_pfn: The new PFN of the range
4011 * @end_pfn: The new PFN of the range
4012 *
4013 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4014 * The map is kept near the end physical page range that has already been
4015 * registered. This function allows an arch to shrink an existing registered
4016 * range.
4017 */
4018void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4019				unsigned long end_pfn)
4020{
4021	int i, j;
4022	int removed = 0;
4023
4024	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4025			  nid, start_pfn, end_pfn);
4026
4027	/* Find the old active region end and shrink */
4028	for_each_active_range_index_in_nid(i, nid) {
4029		if (early_node_map[i].start_pfn >= start_pfn &&
4030		    early_node_map[i].end_pfn <= end_pfn) {
4031			/* clear it */
4032			early_node_map[i].start_pfn = 0;
4033			early_node_map[i].end_pfn = 0;
4034			removed = 1;
4035			continue;
4036		}
4037		if (early_node_map[i].start_pfn < start_pfn &&
4038		    early_node_map[i].end_pfn > start_pfn) {
4039			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4040			early_node_map[i].end_pfn = start_pfn;
4041			if (temp_end_pfn > end_pfn)
4042				add_active_range(nid, end_pfn, temp_end_pfn);
4043			continue;
4044		}
4045		if (early_node_map[i].start_pfn >= start_pfn &&
4046		    early_node_map[i].end_pfn > end_pfn &&
4047		    early_node_map[i].start_pfn < end_pfn) {
4048			early_node_map[i].start_pfn = end_pfn;
4049			continue;
4050		}
4051	}
4052
4053	if (!removed)
4054		return;
4055
4056	/* remove the blank ones */
4057	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4058		if (early_node_map[i].nid != nid)
4059			continue;
4060		if (early_node_map[i].end_pfn)
4061			continue;
4062		/* we found it, get rid of it */
4063		for (j = i; j < nr_nodemap_entries - 1; j++)
4064			memcpy(&early_node_map[j], &early_node_map[j+1],
4065				sizeof(early_node_map[j]));
4066		j = nr_nodemap_entries - 1;
4067		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4068		nr_nodemap_entries--;
4069	}
4070}
4071
4072/**
4073 * remove_all_active_ranges - Remove all currently registered regions
4074 *
4075 * During discovery, it may be found that a table like SRAT is invalid
4076 * and an alternative discovery method must be used. This function removes
4077 * all currently registered regions.
4078 */
4079void __init remove_all_active_ranges(void)
4080{
4081	memset(early_node_map, 0, sizeof(early_node_map));
4082	nr_nodemap_entries = 0;
4083}
4084
4085/* Compare two active node_active_regions */
4086static int __init cmp_node_active_region(const void *a, const void *b)
4087{
4088	struct node_active_region *arange = (struct node_active_region *)a;
4089	struct node_active_region *brange = (struct node_active_region *)b;
4090
4091	/* Done this way to avoid overflows */
4092	if (arange->start_pfn > brange->start_pfn)
4093		return 1;
4094	if (arange->start_pfn < brange->start_pfn)
4095		return -1;
4096
4097	return 0;
4098}
4099
4100/* sort the node_map by start_pfn */
4101static void __init sort_node_map(void)
4102{
4103	sort(early_node_map, (size_t)nr_nodemap_entries,
4104			sizeof(struct node_active_region),
4105			cmp_node_active_region, NULL);
4106}
4107
4108/* Find the lowest pfn for a node */
4109static unsigned long __init find_min_pfn_for_node(int nid)
4110{
4111	int i;
4112	unsigned long min_pfn = ULONG_MAX;
4113
4114	/* Assuming a sorted map, the first range found has the starting pfn */
4115	for_each_active_range_index_in_nid(i, nid)
4116		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4117
4118	if (min_pfn == ULONG_MAX) {
4119		printk(KERN_WARNING
4120			"Could not find start_pfn for node %d\n", nid);
4121		return 0;
4122	}
4123
4124	return min_pfn;
4125}
4126
4127/**
4128 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4129 *
4130 * It returns the minimum PFN based on information provided via
4131 * add_active_range().
4132 */
4133unsigned long __init find_min_pfn_with_active_regions(void)
4134{
4135	return find_min_pfn_for_node(MAX_NUMNODES);
4136}
4137
4138/*
4139 * early_calculate_totalpages()
4140 * Sum pages in active regions for movable zone.
4141 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4142 */
4143static unsigned long __init early_calculate_totalpages(void)
4144{
4145	int i;
4146	unsigned long totalpages = 0;
4147
4148	for (i = 0; i < nr_nodemap_entries; i++) {
4149		unsigned long pages = early_node_map[i].end_pfn -
4150						early_node_map[i].start_pfn;
4151		totalpages += pages;
4152		if (pages)
4153			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4154	}
4155  	return totalpages;
4156}
4157
4158/*
4159 * Find the PFN the Movable zone begins in each node. Kernel memory
4160 * is spread evenly between nodes as long as the nodes have enough
4161 * memory. When they don't, some nodes will have more kernelcore than
4162 * others
4163 */
4164static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4165{
4166	int i, nid;
4167	unsigned long usable_startpfn;
4168	unsigned long kernelcore_node, kernelcore_remaining;
4169	/* save the state before borrow the nodemask */
4170	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4171	unsigned long totalpages = early_calculate_totalpages();
4172	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4173
4174	/*
4175	 * If movablecore was specified, calculate what size of
4176	 * kernelcore that corresponds so that memory usable for
4177	 * any allocation type is evenly spread. If both kernelcore
4178	 * and movablecore are specified, then the value of kernelcore
4179	 * will be used for required_kernelcore if it's greater than
4180	 * what movablecore would have allowed.
4181	 */
4182	if (required_movablecore) {
4183		unsigned long corepages;
4184
4185		/*
4186		 * Round-up so that ZONE_MOVABLE is at least as large as what
4187		 * was requested by the user
4188		 */
4189		required_movablecore =
4190			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4191		corepages = totalpages - required_movablecore;
4192
4193		required_kernelcore = max(required_kernelcore, corepages);
4194	}
4195
4196	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4197	if (!required_kernelcore)
4198		goto out;
4199
4200	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4201	find_usable_zone_for_movable();
4202	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4203
4204restart:
4205	/* Spread kernelcore memory as evenly as possible throughout nodes */
4206	kernelcore_node = required_kernelcore / usable_nodes;
4207	for_each_node_state(nid, N_HIGH_MEMORY) {
4208		/*
4209		 * Recalculate kernelcore_node if the division per node
4210		 * now exceeds what is necessary to satisfy the requested
4211		 * amount of memory for the kernel
4212		 */
4213		if (required_kernelcore < kernelcore_node)
4214			kernelcore_node = required_kernelcore / usable_nodes;
4215
4216		/*
4217		 * As the map is walked, we track how much memory is usable
4218		 * by the kernel using kernelcore_remaining. When it is
4219		 * 0, the rest of the node is usable by ZONE_MOVABLE
4220		 */
4221		kernelcore_remaining = kernelcore_node;
4222
4223		/* Go through each range of PFNs within this node */
4224		for_each_active_range_index_in_nid(i, nid) {
4225			unsigned long start_pfn, end_pfn;
4226			unsigned long size_pages;
4227
4228			start_pfn = max(early_node_map[i].start_pfn,
4229						zone_movable_pfn[nid]);
4230			end_pfn = early_node_map[i].end_pfn;
4231			if (start_pfn >= end_pfn)
4232				continue;
4233
4234			/* Account for what is only usable for kernelcore */
4235			if (start_pfn < usable_startpfn) {
4236				unsigned long kernel_pages;
4237				kernel_pages = min(end_pfn, usable_startpfn)
4238								- start_pfn;
4239
4240				kernelcore_remaining -= min(kernel_pages,
4241							kernelcore_remaining);
4242				required_kernelcore -= min(kernel_pages,
4243							required_kernelcore);
4244
4245				/* Continue if range is now fully accounted */
4246				if (end_pfn <= usable_startpfn) {
4247
4248					/*
4249					 * Push zone_movable_pfn to the end so
4250					 * that if we have to rebalance
4251					 * kernelcore across nodes, we will
4252					 * not double account here
4253					 */
4254					zone_movable_pfn[nid] = end_pfn;
4255					continue;
4256				}
4257				start_pfn = usable_startpfn;
4258			}
4259
4260			/*
4261			 * The usable PFN range for ZONE_MOVABLE is from
4262			 * start_pfn->end_pfn. Calculate size_pages as the
4263			 * number of pages used as kernelcore
4264			 */
4265			size_pages = end_pfn - start_pfn;
4266			if (size_pages > kernelcore_remaining)
4267				size_pages = kernelcore_remaining;
4268			zone_movable_pfn[nid] = start_pfn + size_pages;
4269
4270			/*
4271			 * Some kernelcore has been met, update counts and
4272			 * break if the kernelcore for this node has been
4273			 * satisified
4274			 */
4275			required_kernelcore -= min(required_kernelcore,
4276								size_pages);
4277			kernelcore_remaining -= size_pages;
4278			if (!kernelcore_remaining)
4279				break;
4280		}
4281	}
4282
4283	/*
4284	 * If there is still required_kernelcore, we do another pass with one
4285	 * less node in the count. This will push zone_movable_pfn[nid] further
4286	 * along on the nodes that still have memory until kernelcore is
4287	 * satisified
4288	 */
4289	usable_nodes--;
4290	if (usable_nodes && required_kernelcore > usable_nodes)
4291		goto restart;
4292
4293	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4294	for (nid = 0; nid < MAX_NUMNODES; nid++)
4295		zone_movable_pfn[nid] =
4296			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4297
4298out:
4299	/* restore the node_state */
4300	node_states[N_HIGH_MEMORY] = saved_node_state;
4301}
4302
4303/* Any regular memory on that node ? */
4304static void check_for_regular_memory(pg_data_t *pgdat)
4305{
4306#ifdef CONFIG_HIGHMEM
4307	enum zone_type zone_type;
4308
4309	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4310		struct zone *zone = &pgdat->node_zones[zone_type];
4311		if (zone->present_pages)
4312			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4313	}
4314#endif
4315}
4316
4317/**
4318 * free_area_init_nodes - Initialise all pg_data_t and zone data
4319 * @max_zone_pfn: an array of max PFNs for each zone
4320 *
4321 * This will call free_area_init_node() for each active node in the system.
4322 * Using the page ranges provided by add_active_range(), the size of each
4323 * zone in each node and their holes is calculated. If the maximum PFN
4324 * between two adjacent zones match, it is assumed that the zone is empty.
4325 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4326 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4327 * starts where the previous one ended. For example, ZONE_DMA32 starts
4328 * at arch_max_dma_pfn.
4329 */
4330void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4331{
4332	unsigned long nid;
4333	int i;
4334
4335	/* Sort early_node_map as initialisation assumes it is sorted */
4336	sort_node_map();
4337
4338	/* Record where the zone boundaries are */
4339	memset(arch_zone_lowest_possible_pfn, 0,
4340				sizeof(arch_zone_lowest_possible_pfn));
4341	memset(arch_zone_highest_possible_pfn, 0,
4342				sizeof(arch_zone_highest_possible_pfn));
4343	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4344	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4345	for (i = 1; i < MAX_NR_ZONES; i++) {
4346		if (i == ZONE_MOVABLE)
4347			continue;
4348		arch_zone_lowest_possible_pfn[i] =
4349			arch_zone_highest_possible_pfn[i-1];
4350		arch_zone_highest_possible_pfn[i] =
4351			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4352	}
4353	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4354	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4355
4356	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4357	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4358	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4359
4360	/* Print out the zone ranges */
4361	printk("Zone PFN ranges:\n");
4362	for (i = 0; i < MAX_NR_ZONES; i++) {
4363		if (i == ZONE_MOVABLE)
4364			continue;
4365		printk("  %-8s %0#10lx -> %0#10lx\n",
4366				zone_names[i],
4367				arch_zone_lowest_possible_pfn[i],
4368				arch_zone_highest_possible_pfn[i]);
4369	}
4370
4371	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4372	printk("Movable zone start PFN for each node\n");
4373	for (i = 0; i < MAX_NUMNODES; i++) {
4374		if (zone_movable_pfn[i])
4375			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4376	}
4377
4378	/* Print out the early_node_map[] */
4379	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4380	for (i = 0; i < nr_nodemap_entries; i++)
4381		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4382						early_node_map[i].start_pfn,
4383						early_node_map[i].end_pfn);
4384
4385	/* Initialise every node */
4386	mminit_verify_pageflags_layout();
4387	setup_nr_node_ids();
4388	for_each_online_node(nid) {
4389		pg_data_t *pgdat = NODE_DATA(nid);
4390		free_area_init_node(nid, NULL,
4391				find_min_pfn_for_node(nid), NULL);
4392
4393		/* Any memory on that node */
4394		if (pgdat->node_present_pages)
4395			node_set_state(nid, N_HIGH_MEMORY);
4396		check_for_regular_memory(pgdat);
4397	}
4398}
4399
4400static int __init cmdline_parse_core(char *p, unsigned long *core)
4401{
4402	unsigned long long coremem;
4403	if (!p)
4404		return -EINVAL;
4405
4406	coremem = memparse(p, &p);
4407	*core = coremem >> PAGE_SHIFT;
4408
4409	/* Paranoid check that UL is enough for the coremem value */
4410	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4411
4412	return 0;
4413}
4414
4415/*
4416 * kernelcore=size sets the amount of memory for use for allocations that
4417 * cannot be reclaimed or migrated.
4418 */
4419static int __init cmdline_parse_kernelcore(char *p)
4420{
4421	return cmdline_parse_core(p, &required_kernelcore);
4422}
4423
4424/*
4425 * movablecore=size sets the amount of memory for use for allocations that
4426 * can be reclaimed or migrated.
4427 */
4428static int __init cmdline_parse_movablecore(char *p)
4429{
4430	return cmdline_parse_core(p, &required_movablecore);
4431}
4432
4433early_param("kernelcore", cmdline_parse_kernelcore);
4434early_param("movablecore", cmdline_parse_movablecore);
4435
4436#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4437
4438/**
4439 * set_dma_reserve - set the specified number of pages reserved in the first zone
4440 * @new_dma_reserve: The number of pages to mark reserved
4441 *
4442 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4443 * In the DMA zone, a significant percentage may be consumed by kernel image
4444 * and other unfreeable allocations which can skew the watermarks badly. This
4445 * function may optionally be used to account for unfreeable pages in the
4446 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4447 * smaller per-cpu batchsize.
4448 */
4449void __init set_dma_reserve(unsigned long new_dma_reserve)
4450{
4451	dma_reserve = new_dma_reserve;
4452}
4453
4454#ifndef CONFIG_NEED_MULTIPLE_NODES
4455struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4456EXPORT_SYMBOL(contig_page_data);
4457#endif
4458
4459void __init free_area_init(unsigned long *zones_size)
4460{
4461	free_area_init_node(0, zones_size,
4462			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4463}
4464
4465static int page_alloc_cpu_notify(struct notifier_block *self,
4466				 unsigned long action, void *hcpu)
4467{
4468	int cpu = (unsigned long)hcpu;
4469
4470	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4471		drain_pages(cpu);
4472
4473		/*
4474		 * Spill the event counters of the dead processor
4475		 * into the current processors event counters.
4476		 * This artificially elevates the count of the current
4477		 * processor.
4478		 */
4479		vm_events_fold_cpu(cpu);
4480
4481		/*
4482		 * Zero the differential counters of the dead processor
4483		 * so that the vm statistics are consistent.
4484		 *
4485		 * This is only okay since the processor is dead and cannot
4486		 * race with what we are doing.
4487		 */
4488		refresh_cpu_vm_stats(cpu);
4489	}
4490	return NOTIFY_OK;
4491}
4492
4493void __init page_alloc_init(void)
4494{
4495	hotcpu_notifier(page_alloc_cpu_notify, 0);
4496}
4497
4498/*
4499 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4500 *	or min_free_kbytes changes.
4501 */
4502static void calculate_totalreserve_pages(void)
4503{
4504	struct pglist_data *pgdat;
4505	unsigned long reserve_pages = 0;
4506	enum zone_type i, j;
4507
4508	for_each_online_pgdat(pgdat) {
4509		for (i = 0; i < MAX_NR_ZONES; i++) {
4510			struct zone *zone = pgdat->node_zones + i;
4511			unsigned long max = 0;
4512
4513			/* Find valid and maximum lowmem_reserve in the zone */
4514			for (j = i; j < MAX_NR_ZONES; j++) {
4515				if (zone->lowmem_reserve[j] > max)
4516					max = zone->lowmem_reserve[j];
4517			}
4518
4519			/* we treat the high watermark as reserved pages. */
4520			max += high_wmark_pages(zone);
4521
4522			if (max > zone->present_pages)
4523				max = zone->present_pages;
4524			reserve_pages += max;
4525		}
4526	}
4527	totalreserve_pages = reserve_pages;
4528}
4529
4530/*
4531 * setup_per_zone_lowmem_reserve - called whenever
4532 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4533 *	has a correct pages reserved value, so an adequate number of
4534 *	pages are left in the zone after a successful __alloc_pages().
4535 */
4536static void setup_per_zone_lowmem_reserve(void)
4537{
4538	struct pglist_data *pgdat;
4539	enum zone_type j, idx;
4540
4541	for_each_online_pgdat(pgdat) {
4542		for (j = 0; j < MAX_NR_ZONES; j++) {
4543			struct zone *zone = pgdat->node_zones + j;
4544			unsigned long present_pages = zone->present_pages;
4545
4546			zone->lowmem_reserve[j] = 0;
4547
4548			idx = j;
4549			while (idx) {
4550				struct zone *lower_zone;
4551
4552				idx--;
4553
4554				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4555					sysctl_lowmem_reserve_ratio[idx] = 1;
4556
4557				lower_zone = pgdat->node_zones + idx;
4558				lower_zone->lowmem_reserve[j] = present_pages /
4559					sysctl_lowmem_reserve_ratio[idx];
4560				present_pages += lower_zone->present_pages;
4561			}
4562		}
4563	}
4564
4565	/* update totalreserve_pages */
4566	calculate_totalreserve_pages();
4567}
4568
4569/**
4570 * setup_per_zone_wmarks - called when min_free_kbytes changes
4571 * or when memory is hot-{added|removed}
4572 *
4573 * Ensures that the watermark[min,low,high] values for each zone are set
4574 * correctly with respect to min_free_kbytes.
4575 */
4576void setup_per_zone_wmarks(void)
4577{
4578	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4579	unsigned long lowmem_pages = 0;
4580	struct zone *zone;
4581	unsigned long flags;
4582
4583	/* Calculate total number of !ZONE_HIGHMEM pages */
4584	for_each_zone(zone) {
4585		if (!is_highmem(zone))
4586			lowmem_pages += zone->present_pages;
4587	}
4588
4589	for_each_zone(zone) {
4590		u64 tmp;
4591
4592		spin_lock_irqsave(&zone->lock, flags);
4593		tmp = (u64)pages_min * zone->present_pages;
4594		do_div(tmp, lowmem_pages);
4595		if (is_highmem(zone)) {
4596			/*
4597			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4598			 * need highmem pages, so cap pages_min to a small
4599			 * value here.
4600			 *
4601			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4602			 * deltas controls asynch page reclaim, and so should
4603			 * not be capped for highmem.
4604			 */
4605			int min_pages;
4606
4607			min_pages = zone->present_pages / 1024;
4608			if (min_pages < SWAP_CLUSTER_MAX)
4609				min_pages = SWAP_CLUSTER_MAX;
4610			if (min_pages > 128)
4611				min_pages = 128;
4612			zone->watermark[WMARK_MIN] = min_pages;
4613		} else {
4614			/*
4615			 * If it's a lowmem zone, reserve a number of pages
4616			 * proportionate to the zone's size.
4617			 */
4618			zone->watermark[WMARK_MIN] = tmp;
4619		}
4620
4621		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4622		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4623		setup_zone_migrate_reserve(zone);
4624		spin_unlock_irqrestore(&zone->lock, flags);
4625	}
4626
4627	/* update totalreserve_pages */
4628	calculate_totalreserve_pages();
4629}
4630
4631/*
4632 * The inactive anon list should be small enough that the VM never has to
4633 * do too much work, but large enough that each inactive page has a chance
4634 * to be referenced again before it is swapped out.
4635 *
4636 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4637 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4638 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4639 * the anonymous pages are kept on the inactive list.
4640 *
4641 * total     target    max
4642 * memory    ratio     inactive anon
4643 * -------------------------------------
4644 *   10MB       1         5MB
4645 *  100MB       1        50MB
4646 *    1GB       3       250MB
4647 *   10GB      10       0.9GB
4648 *  100GB      31         3GB
4649 *    1TB     101        10GB
4650 *   10TB     320        32GB
4651 */
4652void calculate_zone_inactive_ratio(struct zone *zone)
4653{
4654	unsigned int gb, ratio;
4655
4656	/* Zone size in gigabytes */
4657	gb = zone->present_pages >> (30 - PAGE_SHIFT);
4658	if (gb)
4659		ratio = int_sqrt(10 * gb);
4660	else
4661		ratio = 1;
4662
4663	zone->inactive_ratio = ratio;
4664}
4665
4666static void __init setup_per_zone_inactive_ratio(void)
4667{
4668	struct zone *zone;
4669
4670	for_each_zone(zone)
4671		calculate_zone_inactive_ratio(zone);
4672}
4673
4674/*
4675 * Initialise min_free_kbytes.
4676 *
4677 * For small machines we want it small (128k min).  For large machines
4678 * we want it large (64MB max).  But it is not linear, because network
4679 * bandwidth does not increase linearly with machine size.  We use
4680 *
4681 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4682 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4683 *
4684 * which yields
4685 *
4686 * 16MB:	512k
4687 * 32MB:	724k
4688 * 64MB:	1024k
4689 * 128MB:	1448k
4690 * 256MB:	2048k
4691 * 512MB:	2896k
4692 * 1024MB:	4096k
4693 * 2048MB:	5792k
4694 * 4096MB:	8192k
4695 * 8192MB:	11584k
4696 * 16384MB:	16384k
4697 */
4698static int __init init_per_zone_wmark_min(void)
4699{
4700	unsigned long lowmem_kbytes;
4701
4702	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4703
4704	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4705	if (min_free_kbytes < 128)
4706		min_free_kbytes = 128;
4707	if (min_free_kbytes > 65536)
4708		min_free_kbytes = 65536;
4709	setup_per_zone_wmarks();
4710	setup_per_zone_lowmem_reserve();
4711	setup_per_zone_inactive_ratio();
4712	return 0;
4713}
4714module_init(init_per_zone_wmark_min)
4715
4716/*
4717 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4718 *	that we can call two helper functions whenever min_free_kbytes
4719 *	changes.
4720 */
4721int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4722	void __user *buffer, size_t *length, loff_t *ppos)
4723{
4724	proc_dointvec(table, write, buffer, length, ppos);
4725	if (write)
4726		setup_per_zone_wmarks();
4727	return 0;
4728}
4729
4730#ifdef CONFIG_NUMA
4731int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4732	void __user *buffer, size_t *length, loff_t *ppos)
4733{
4734	struct zone *zone;
4735	int rc;
4736
4737	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4738	if (rc)
4739		return rc;
4740
4741	for_each_zone(zone)
4742		zone->min_unmapped_pages = (zone->present_pages *
4743				sysctl_min_unmapped_ratio) / 100;
4744	return 0;
4745}
4746
4747int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4748	void __user *buffer, size_t *length, loff_t *ppos)
4749{
4750	struct zone *zone;
4751	int rc;
4752
4753	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4754	if (rc)
4755		return rc;
4756
4757	for_each_zone(zone)
4758		zone->min_slab_pages = (zone->present_pages *
4759				sysctl_min_slab_ratio) / 100;
4760	return 0;
4761}
4762#endif
4763
4764/*
4765 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4766 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4767 *	whenever sysctl_lowmem_reserve_ratio changes.
4768 *
4769 * The reserve ratio obviously has absolutely no relation with the
4770 * minimum watermarks. The lowmem reserve ratio can only make sense
4771 * if in function of the boot time zone sizes.
4772 */
4773int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4774	void __user *buffer, size_t *length, loff_t *ppos)
4775{
4776	proc_dointvec_minmax(table, write, buffer, length, ppos);
4777	setup_per_zone_lowmem_reserve();
4778	return 0;
4779}
4780
4781/*
4782 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4783 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4784 * can have before it gets flushed back to buddy allocator.
4785 */
4786
4787int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4788	void __user *buffer, size_t *length, loff_t *ppos)
4789{
4790	struct zone *zone;
4791	unsigned int cpu;
4792	int ret;
4793
4794	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
4795	if (!write || (ret == -EINVAL))
4796		return ret;
4797	for_each_populated_zone(zone) {
4798		for_each_online_cpu(cpu) {
4799			unsigned long  high;
4800			high = zone->present_pages / percpu_pagelist_fraction;
4801			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4802		}
4803	}
4804	return 0;
4805}
4806
4807int hashdist = HASHDIST_DEFAULT;
4808
4809#ifdef CONFIG_NUMA
4810static int __init set_hashdist(char *str)
4811{
4812	if (!str)
4813		return 0;
4814	hashdist = simple_strtoul(str, &str, 0);
4815	return 1;
4816}
4817__setup("hashdist=", set_hashdist);
4818#endif
4819
4820/*
4821 * allocate a large system hash table from bootmem
4822 * - it is assumed that the hash table must contain an exact power-of-2
4823 *   quantity of entries
4824 * - limit is the number of hash buckets, not the total allocation size
4825 */
4826void *__init alloc_large_system_hash(const char *tablename,
4827				     unsigned long bucketsize,
4828				     unsigned long numentries,
4829				     int scale,
4830				     int flags,
4831				     unsigned int *_hash_shift,
4832				     unsigned int *_hash_mask,
4833				     unsigned long limit)
4834{
4835	unsigned long long max = limit;
4836	unsigned long log2qty, size;
4837	void *table = NULL;
4838
4839	/* allow the kernel cmdline to have a say */
4840	if (!numentries) {
4841		/* round applicable memory size up to nearest megabyte */
4842		numentries = nr_kernel_pages;
4843		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4844		numentries >>= 20 - PAGE_SHIFT;
4845		numentries <<= 20 - PAGE_SHIFT;
4846
4847		/* limit to 1 bucket per 2^scale bytes of low memory */
4848		if (scale > PAGE_SHIFT)
4849			numentries >>= (scale - PAGE_SHIFT);
4850		else
4851			numentries <<= (PAGE_SHIFT - scale);
4852
4853		/* Make sure we've got at least a 0-order allocation.. */
4854		if (unlikely(flags & HASH_SMALL)) {
4855			/* Makes no sense without HASH_EARLY */
4856			WARN_ON(!(flags & HASH_EARLY));
4857			if (!(numentries >> *_hash_shift)) {
4858				numentries = 1UL << *_hash_shift;
4859				BUG_ON(!numentries);
4860			}
4861		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4862			numentries = PAGE_SIZE / bucketsize;
4863	}
4864	numentries = roundup_pow_of_two(numentries);
4865
4866	/* limit allocation size to 1/16 total memory by default */
4867	if (max == 0) {
4868		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4869		do_div(max, bucketsize);
4870	}
4871
4872	if (numentries > max)
4873		numentries = max;
4874
4875	log2qty = ilog2(numentries);
4876
4877	do {
4878		size = bucketsize << log2qty;
4879		if (flags & HASH_EARLY)
4880			table = alloc_bootmem_nopanic(size);
4881		else if (hashdist)
4882			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4883		else {
4884			/*
4885			 * If bucketsize is not a power-of-two, we may free
4886			 * some pages at the end of hash table which
4887			 * alloc_pages_exact() automatically does
4888			 */
4889			if (get_order(size) < MAX_ORDER) {
4890				table = alloc_pages_exact(size, GFP_ATOMIC);
4891				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4892			}
4893		}
4894	} while (!table && size > PAGE_SIZE && --log2qty);
4895
4896	if (!table)
4897		panic("Failed to allocate %s hash table\n", tablename);
4898
4899	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4900	       tablename,
4901	       (1U << log2qty),
4902	       ilog2(size) - PAGE_SHIFT,
4903	       size);
4904
4905	if (_hash_shift)
4906		*_hash_shift = log2qty;
4907	if (_hash_mask)
4908		*_hash_mask = (1 << log2qty) - 1;
4909
4910	return table;
4911}
4912
4913/* Return a pointer to the bitmap storing bits affecting a block of pages */
4914static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4915							unsigned long pfn)
4916{
4917#ifdef CONFIG_SPARSEMEM
4918	return __pfn_to_section(pfn)->pageblock_flags;
4919#else
4920	return zone->pageblock_flags;
4921#endif /* CONFIG_SPARSEMEM */
4922}
4923
4924static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4925{
4926#ifdef CONFIG_SPARSEMEM
4927	pfn &= (PAGES_PER_SECTION-1);
4928	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4929#else
4930	pfn = pfn - zone->zone_start_pfn;
4931	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4932#endif /* CONFIG_SPARSEMEM */
4933}
4934
4935/**
4936 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4937 * @page: The page within the block of interest
4938 * @start_bitidx: The first bit of interest to retrieve
4939 * @end_bitidx: The last bit of interest
4940 * returns pageblock_bits flags
4941 */
4942unsigned long get_pageblock_flags_group(struct page *page,
4943					int start_bitidx, int end_bitidx)
4944{
4945	struct zone *zone;
4946	unsigned long *bitmap;
4947	unsigned long pfn, bitidx;
4948	unsigned long flags = 0;
4949	unsigned long value = 1;
4950
4951	zone = page_zone(page);
4952	pfn = page_to_pfn(page);
4953	bitmap = get_pageblock_bitmap(zone, pfn);
4954	bitidx = pfn_to_bitidx(zone, pfn);
4955
4956	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4957		if (test_bit(bitidx + start_bitidx, bitmap))
4958			flags |= value;
4959
4960	return flags;
4961}
4962
4963/**
4964 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4965 * @page: The page within the block of interest
4966 * @start_bitidx: The first bit of interest
4967 * @end_bitidx: The last bit of interest
4968 * @flags: The flags to set
4969 */
4970void set_pageblock_flags_group(struct page *page, unsigned long flags,
4971					int start_bitidx, int end_bitidx)
4972{
4973	struct zone *zone;
4974	unsigned long *bitmap;
4975	unsigned long pfn, bitidx;
4976	unsigned long value = 1;
4977
4978	zone = page_zone(page);
4979	pfn = page_to_pfn(page);
4980	bitmap = get_pageblock_bitmap(zone, pfn);
4981	bitidx = pfn_to_bitidx(zone, pfn);
4982	VM_BUG_ON(pfn < zone->zone_start_pfn);
4983	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4984
4985	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4986		if (flags & value)
4987			__set_bit(bitidx + start_bitidx, bitmap);
4988		else
4989			__clear_bit(bitidx + start_bitidx, bitmap);
4990}
4991
4992/*
4993 * This is designed as sub function...plz see page_isolation.c also.
4994 * set/clear page block's type to be ISOLATE.
4995 * page allocater never alloc memory from ISOLATE block.
4996 */
4997
4998int set_migratetype_isolate(struct page *page)
4999{
5000	struct zone *zone;
5001	unsigned long flags;
5002	int ret = -EBUSY;
5003	int zone_idx;
5004
5005	zone = page_zone(page);
5006	zone_idx = zone_idx(zone);
5007	spin_lock_irqsave(&zone->lock, flags);
5008	/*
5009	 * In future, more migrate types will be able to be isolation target.
5010	 */
5011	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
5012	    zone_idx != ZONE_MOVABLE)
5013		goto out;
5014	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5015	move_freepages_block(zone, page, MIGRATE_ISOLATE);
5016	ret = 0;
5017out:
5018	spin_unlock_irqrestore(&zone->lock, flags);
5019	if (!ret)
5020		drain_all_pages();
5021	return ret;
5022}
5023
5024void unset_migratetype_isolate(struct page *page)
5025{
5026	struct zone *zone;
5027	unsigned long flags;
5028	zone = page_zone(page);
5029	spin_lock_irqsave(&zone->lock, flags);
5030	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5031		goto out;
5032	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5033	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5034out:
5035	spin_unlock_irqrestore(&zone->lock, flags);
5036}
5037
5038#ifdef CONFIG_MEMORY_HOTREMOVE
5039/*
5040 * All pages in the range must be isolated before calling this.
5041 */
5042void
5043__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5044{
5045	struct page *page;
5046	struct zone *zone;
5047	int order, i;
5048	unsigned long pfn;
5049	unsigned long flags;
5050	/* find the first valid pfn */
5051	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5052		if (pfn_valid(pfn))
5053			break;
5054	if (pfn == end_pfn)
5055		return;
5056	zone = page_zone(pfn_to_page(pfn));
5057	spin_lock_irqsave(&zone->lock, flags);
5058	pfn = start_pfn;
5059	while (pfn < end_pfn) {
5060		if (!pfn_valid(pfn)) {
5061			pfn++;
5062			continue;
5063		}
5064		page = pfn_to_page(pfn);
5065		BUG_ON(page_count(page));
5066		BUG_ON(!PageBuddy(page));
5067		order = page_order(page);
5068#ifdef CONFIG_DEBUG_VM
5069		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5070		       pfn, 1 << order, end_pfn);
5071#endif
5072		list_del(&page->lru);
5073		rmv_page_order(page);
5074		zone->free_area[order].nr_free--;
5075		__mod_zone_page_state(zone, NR_FREE_PAGES,
5076				      - (1UL << order));
5077		for (i = 0; i < (1 << order); i++)
5078			SetPageReserved((page+i));
5079		pfn += (1 << order);
5080	}
5081	spin_unlock_irqrestore(&zone->lock, flags);
5082}
5083#endif
5084
5085#ifdef CONFIG_MEMORY_FAILURE
5086bool is_free_buddy_page(struct page *page)
5087{
5088	struct zone *zone = page_zone(page);
5089	unsigned long pfn = page_to_pfn(page);
5090	unsigned long flags;
5091	int order;
5092
5093	spin_lock_irqsave(&zone->lock, flags);
5094	for (order = 0; order < MAX_ORDER; order++) {
5095		struct page *page_head = page - (pfn & ((1 << order) - 1));
5096
5097		if (PageBuddy(page_head) && page_order(page_head) >= order)
5098			break;
5099	}
5100	spin_unlock_irqrestore(&zone->lock, flags);
5101
5102	return order < MAX_ORDER;
5103}
5104#endif
5105