page_alloc.c revision 5f8dcc21211a3d4e3a7a5ca366b469fb88117f61
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/kmemcheck.h>
27#include <linux/module.h>
28#include <linux/suspend.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h>
32#include <linux/oom.h>
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/sysctl.h>
36#include <linux/cpu.h>
37#include <linux/cpuset.h>
38#include <linux/memory_hotplug.h>
39#include <linux/nodemask.h>
40#include <linux/vmalloc.h>
41#include <linux/mempolicy.h>
42#include <linux/stop_machine.h>
43#include <linux/sort.h>
44#include <linux/pfn.h>
45#include <linux/backing-dev.h>
46#include <linux/fault-inject.h>
47#include <linux/page-isolation.h>
48#include <linux/page_cgroup.h>
49#include <linux/debugobjects.h>
50#include <linux/kmemleak.h>
51#include <trace/events/kmem.h>
52
53#include <asm/tlbflush.h>
54#include <asm/div64.h>
55#include "internal.h"
56
57/*
58 * Array of node states.
59 */
60nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
61	[N_POSSIBLE] = NODE_MASK_ALL,
62	[N_ONLINE] = { { [0] = 1UL } },
63#ifndef CONFIG_NUMA
64	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
65#ifdef CONFIG_HIGHMEM
66	[N_HIGH_MEMORY] = { { [0] = 1UL } },
67#endif
68	[N_CPU] = { { [0] = 1UL } },
69#endif	/* NUMA */
70};
71EXPORT_SYMBOL(node_states);
72
73unsigned long totalram_pages __read_mostly;
74unsigned long totalreserve_pages __read_mostly;
75unsigned long highest_memmap_pfn __read_mostly;
76int percpu_pagelist_fraction;
77gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
78
79#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
80int pageblock_order __read_mostly;
81#endif
82
83static void __free_pages_ok(struct page *page, unsigned int order);
84
85/*
86 * results with 256, 32 in the lowmem_reserve sysctl:
87 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
88 *	1G machine -> (16M dma, 784M normal, 224M high)
89 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
90 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
91 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
92 *
93 * TBD: should special case ZONE_DMA32 machines here - in those we normally
94 * don't need any ZONE_NORMAL reservation
95 */
96int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
97#ifdef CONFIG_ZONE_DMA
98	 256,
99#endif
100#ifdef CONFIG_ZONE_DMA32
101	 256,
102#endif
103#ifdef CONFIG_HIGHMEM
104	 32,
105#endif
106	 32,
107};
108
109EXPORT_SYMBOL(totalram_pages);
110
111static char * const zone_names[MAX_NR_ZONES] = {
112#ifdef CONFIG_ZONE_DMA
113	 "DMA",
114#endif
115#ifdef CONFIG_ZONE_DMA32
116	 "DMA32",
117#endif
118	 "Normal",
119#ifdef CONFIG_HIGHMEM
120	 "HighMem",
121#endif
122	 "Movable",
123};
124
125int min_free_kbytes = 1024;
126
127static unsigned long __meminitdata nr_kernel_pages;
128static unsigned long __meminitdata nr_all_pages;
129static unsigned long __meminitdata dma_reserve;
130
131#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
132  /*
133   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
134   * ranges of memory (RAM) that may be registered with add_active_range().
135   * Ranges passed to add_active_range() will be merged if possible
136   * so the number of times add_active_range() can be called is
137   * related to the number of nodes and the number of holes
138   */
139  #ifdef CONFIG_MAX_ACTIVE_REGIONS
140    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
141    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
142  #else
143    #if MAX_NUMNODES >= 32
144      /* If there can be many nodes, allow up to 50 holes per node */
145      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
146    #else
147      /* By default, allow up to 256 distinct regions */
148      #define MAX_ACTIVE_REGIONS 256
149    #endif
150  #endif
151
152  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
153  static int __meminitdata nr_nodemap_entries;
154  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
155  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
156  static unsigned long __initdata required_kernelcore;
157  static unsigned long __initdata required_movablecore;
158  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
159
160  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
161  int movable_zone;
162  EXPORT_SYMBOL(movable_zone);
163#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
164
165#if MAX_NUMNODES > 1
166int nr_node_ids __read_mostly = MAX_NUMNODES;
167int nr_online_nodes __read_mostly = 1;
168EXPORT_SYMBOL(nr_node_ids);
169EXPORT_SYMBOL(nr_online_nodes);
170#endif
171
172int page_group_by_mobility_disabled __read_mostly;
173
174static void set_pageblock_migratetype(struct page *page, int migratetype)
175{
176
177	if (unlikely(page_group_by_mobility_disabled))
178		migratetype = MIGRATE_UNMOVABLE;
179
180	set_pageblock_flags_group(page, (unsigned long)migratetype,
181					PB_migrate, PB_migrate_end);
182}
183
184bool oom_killer_disabled __read_mostly;
185
186#ifdef CONFIG_DEBUG_VM
187static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
188{
189	int ret = 0;
190	unsigned seq;
191	unsigned long pfn = page_to_pfn(page);
192
193	do {
194		seq = zone_span_seqbegin(zone);
195		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
196			ret = 1;
197		else if (pfn < zone->zone_start_pfn)
198			ret = 1;
199	} while (zone_span_seqretry(zone, seq));
200
201	return ret;
202}
203
204static int page_is_consistent(struct zone *zone, struct page *page)
205{
206	if (!pfn_valid_within(page_to_pfn(page)))
207		return 0;
208	if (zone != page_zone(page))
209		return 0;
210
211	return 1;
212}
213/*
214 * Temporary debugging check for pages not lying within a given zone.
215 */
216static int bad_range(struct zone *zone, struct page *page)
217{
218	if (page_outside_zone_boundaries(zone, page))
219		return 1;
220	if (!page_is_consistent(zone, page))
221		return 1;
222
223	return 0;
224}
225#else
226static inline int bad_range(struct zone *zone, struct page *page)
227{
228	return 0;
229}
230#endif
231
232static void bad_page(struct page *page)
233{
234	static unsigned long resume;
235	static unsigned long nr_shown;
236	static unsigned long nr_unshown;
237
238	/*
239	 * Allow a burst of 60 reports, then keep quiet for that minute;
240	 * or allow a steady drip of one report per second.
241	 */
242	if (nr_shown == 60) {
243		if (time_before(jiffies, resume)) {
244			nr_unshown++;
245			goto out;
246		}
247		if (nr_unshown) {
248			printk(KERN_ALERT
249			      "BUG: Bad page state: %lu messages suppressed\n",
250				nr_unshown);
251			nr_unshown = 0;
252		}
253		nr_shown = 0;
254	}
255	if (nr_shown++ == 0)
256		resume = jiffies + 60 * HZ;
257
258	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
259		current->comm, page_to_pfn(page));
260	printk(KERN_ALERT
261		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
262		page, (void *)page->flags, page_count(page),
263		page_mapcount(page), page->mapping, page->index);
264
265	dump_stack();
266out:
267	/* Leave bad fields for debug, except PageBuddy could make trouble */
268	__ClearPageBuddy(page);
269	add_taint(TAINT_BAD_PAGE);
270}
271
272/*
273 * Higher-order pages are called "compound pages".  They are structured thusly:
274 *
275 * The first PAGE_SIZE page is called the "head page".
276 *
277 * The remaining PAGE_SIZE pages are called "tail pages".
278 *
279 * All pages have PG_compound set.  All pages have their ->private pointing at
280 * the head page (even the head page has this).
281 *
282 * The first tail page's ->lru.next holds the address of the compound page's
283 * put_page() function.  Its ->lru.prev holds the order of allocation.
284 * This usage means that zero-order pages may not be compound.
285 */
286
287static void free_compound_page(struct page *page)
288{
289	__free_pages_ok(page, compound_order(page));
290}
291
292void prep_compound_page(struct page *page, unsigned long order)
293{
294	int i;
295	int nr_pages = 1 << order;
296
297	set_compound_page_dtor(page, free_compound_page);
298	set_compound_order(page, order);
299	__SetPageHead(page);
300	for (i = 1; i < nr_pages; i++) {
301		struct page *p = page + i;
302
303		__SetPageTail(p);
304		p->first_page = page;
305	}
306}
307
308static int destroy_compound_page(struct page *page, unsigned long order)
309{
310	int i;
311	int nr_pages = 1 << order;
312	int bad = 0;
313
314	if (unlikely(compound_order(page) != order) ||
315	    unlikely(!PageHead(page))) {
316		bad_page(page);
317		bad++;
318	}
319
320	__ClearPageHead(page);
321
322	for (i = 1; i < nr_pages; i++) {
323		struct page *p = page + i;
324
325		if (unlikely(!PageTail(p) || (p->first_page != page))) {
326			bad_page(page);
327			bad++;
328		}
329		__ClearPageTail(p);
330	}
331
332	return bad;
333}
334
335static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
336{
337	int i;
338
339	/*
340	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
341	 * and __GFP_HIGHMEM from hard or soft interrupt context.
342	 */
343	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
344	for (i = 0; i < (1 << order); i++)
345		clear_highpage(page + i);
346}
347
348static inline void set_page_order(struct page *page, int order)
349{
350	set_page_private(page, order);
351	__SetPageBuddy(page);
352}
353
354static inline void rmv_page_order(struct page *page)
355{
356	__ClearPageBuddy(page);
357	set_page_private(page, 0);
358}
359
360/*
361 * Locate the struct page for both the matching buddy in our
362 * pair (buddy1) and the combined O(n+1) page they form (page).
363 *
364 * 1) Any buddy B1 will have an order O twin B2 which satisfies
365 * the following equation:
366 *     B2 = B1 ^ (1 << O)
367 * For example, if the starting buddy (buddy2) is #8 its order
368 * 1 buddy is #10:
369 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
370 *
371 * 2) Any buddy B will have an order O+1 parent P which
372 * satisfies the following equation:
373 *     P = B & ~(1 << O)
374 *
375 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
376 */
377static inline struct page *
378__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
379{
380	unsigned long buddy_idx = page_idx ^ (1 << order);
381
382	return page + (buddy_idx - page_idx);
383}
384
385static inline unsigned long
386__find_combined_index(unsigned long page_idx, unsigned int order)
387{
388	return (page_idx & ~(1 << order));
389}
390
391/*
392 * This function checks whether a page is free && is the buddy
393 * we can do coalesce a page and its buddy if
394 * (a) the buddy is not in a hole &&
395 * (b) the buddy is in the buddy system &&
396 * (c) a page and its buddy have the same order &&
397 * (d) a page and its buddy are in the same zone.
398 *
399 * For recording whether a page is in the buddy system, we use PG_buddy.
400 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
401 *
402 * For recording page's order, we use page_private(page).
403 */
404static inline int page_is_buddy(struct page *page, struct page *buddy,
405								int order)
406{
407	if (!pfn_valid_within(page_to_pfn(buddy)))
408		return 0;
409
410	if (page_zone_id(page) != page_zone_id(buddy))
411		return 0;
412
413	if (PageBuddy(buddy) && page_order(buddy) == order) {
414		VM_BUG_ON(page_count(buddy) != 0);
415		return 1;
416	}
417	return 0;
418}
419
420/*
421 * Freeing function for a buddy system allocator.
422 *
423 * The concept of a buddy system is to maintain direct-mapped table
424 * (containing bit values) for memory blocks of various "orders".
425 * The bottom level table contains the map for the smallest allocatable
426 * units of memory (here, pages), and each level above it describes
427 * pairs of units from the levels below, hence, "buddies".
428 * At a high level, all that happens here is marking the table entry
429 * at the bottom level available, and propagating the changes upward
430 * as necessary, plus some accounting needed to play nicely with other
431 * parts of the VM system.
432 * At each level, we keep a list of pages, which are heads of continuous
433 * free pages of length of (1 << order) and marked with PG_buddy. Page's
434 * order is recorded in page_private(page) field.
435 * So when we are allocating or freeing one, we can derive the state of the
436 * other.  That is, if we allocate a small block, and both were
437 * free, the remainder of the region must be split into blocks.
438 * If a block is freed, and its buddy is also free, then this
439 * triggers coalescing into a block of larger size.
440 *
441 * -- wli
442 */
443
444static inline void __free_one_page(struct page *page,
445		struct zone *zone, unsigned int order,
446		int migratetype)
447{
448	unsigned long page_idx;
449
450	if (unlikely(PageCompound(page)))
451		if (unlikely(destroy_compound_page(page, order)))
452			return;
453
454	VM_BUG_ON(migratetype == -1);
455
456	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
457
458	VM_BUG_ON(page_idx & ((1 << order) - 1));
459	VM_BUG_ON(bad_range(zone, page));
460
461	while (order < MAX_ORDER-1) {
462		unsigned long combined_idx;
463		struct page *buddy;
464
465		buddy = __page_find_buddy(page, page_idx, order);
466		if (!page_is_buddy(page, buddy, order))
467			break;
468
469		/* Our buddy is free, merge with it and move up one order. */
470		list_del(&buddy->lru);
471		zone->free_area[order].nr_free--;
472		rmv_page_order(buddy);
473		combined_idx = __find_combined_index(page_idx, order);
474		page = page + (combined_idx - page_idx);
475		page_idx = combined_idx;
476		order++;
477	}
478	set_page_order(page, order);
479	list_add(&page->lru,
480		&zone->free_area[order].free_list[migratetype]);
481	zone->free_area[order].nr_free++;
482}
483
484#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
485/*
486 * free_page_mlock() -- clean up attempts to free and mlocked() page.
487 * Page should not be on lru, so no need to fix that up.
488 * free_pages_check() will verify...
489 */
490static inline void free_page_mlock(struct page *page)
491{
492	__dec_zone_page_state(page, NR_MLOCK);
493	__count_vm_event(UNEVICTABLE_MLOCKFREED);
494}
495#else
496static void free_page_mlock(struct page *page) { }
497#endif
498
499static inline int free_pages_check(struct page *page)
500{
501	if (unlikely(page_mapcount(page) |
502		(page->mapping != NULL)  |
503		(atomic_read(&page->_count) != 0) |
504		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
505		bad_page(page);
506		return 1;
507	}
508	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
509		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
510	return 0;
511}
512
513/*
514 * Frees a number of pages from the PCP lists
515 * Assumes all pages on list are in same zone, and of same order.
516 * count is the number of pages to free.
517 *
518 * If the zone was previously in an "all pages pinned" state then look to
519 * see if this freeing clears that state.
520 *
521 * And clear the zone's pages_scanned counter, to hold off the "all pages are
522 * pinned" detection logic.
523 */
524static void free_pcppages_bulk(struct zone *zone, int count,
525					struct per_cpu_pages *pcp)
526{
527	int migratetype = 0;
528
529	spin_lock(&zone->lock);
530	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
531	zone->pages_scanned = 0;
532
533	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
534	while (count--) {
535		struct page *page;
536		struct list_head *list;
537
538		/*
539		 * Remove pages from lists in a round-robin fashion. This spinning
540		 * around potentially empty lists is bloody awful, alternatives that
541		 * don't suck are welcome
542		 */
543		do {
544			if (++migratetype == MIGRATE_PCPTYPES)
545				migratetype = 0;
546			list = &pcp->lists[migratetype];
547		} while (list_empty(list));
548
549		page = list_entry(list->prev, struct page, lru);
550		/* have to delete it as __free_one_page list manipulates */
551		list_del(&page->lru);
552		trace_mm_page_pcpu_drain(page, 0, migratetype);
553		__free_one_page(page, zone, 0, migratetype);
554	}
555	spin_unlock(&zone->lock);
556}
557
558static void free_one_page(struct zone *zone, struct page *page, int order,
559				int migratetype)
560{
561	spin_lock(&zone->lock);
562	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
563	zone->pages_scanned = 0;
564
565	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
566	__free_one_page(page, zone, order, migratetype);
567	spin_unlock(&zone->lock);
568}
569
570static void __free_pages_ok(struct page *page, unsigned int order)
571{
572	unsigned long flags;
573	int i;
574	int bad = 0;
575	int wasMlocked = __TestClearPageMlocked(page);
576
577	kmemcheck_free_shadow(page, order);
578
579	for (i = 0 ; i < (1 << order) ; ++i)
580		bad += free_pages_check(page + i);
581	if (bad)
582		return;
583
584	if (!PageHighMem(page)) {
585		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
586		debug_check_no_obj_freed(page_address(page),
587					   PAGE_SIZE << order);
588	}
589	arch_free_page(page, order);
590	kernel_map_pages(page, 1 << order, 0);
591
592	local_irq_save(flags);
593	if (unlikely(wasMlocked))
594		free_page_mlock(page);
595	__count_vm_events(PGFREE, 1 << order);
596	free_one_page(page_zone(page), page, order,
597					get_pageblock_migratetype(page));
598	local_irq_restore(flags);
599}
600
601/*
602 * permit the bootmem allocator to evade page validation on high-order frees
603 */
604void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
605{
606	if (order == 0) {
607		__ClearPageReserved(page);
608		set_page_count(page, 0);
609		set_page_refcounted(page);
610		__free_page(page);
611	} else {
612		int loop;
613
614		prefetchw(page);
615		for (loop = 0; loop < BITS_PER_LONG; loop++) {
616			struct page *p = &page[loop];
617
618			if (loop + 1 < BITS_PER_LONG)
619				prefetchw(p + 1);
620			__ClearPageReserved(p);
621			set_page_count(p, 0);
622		}
623
624		set_page_refcounted(page);
625		__free_pages(page, order);
626	}
627}
628
629
630/*
631 * The order of subdivision here is critical for the IO subsystem.
632 * Please do not alter this order without good reasons and regression
633 * testing. Specifically, as large blocks of memory are subdivided,
634 * the order in which smaller blocks are delivered depends on the order
635 * they're subdivided in this function. This is the primary factor
636 * influencing the order in which pages are delivered to the IO
637 * subsystem according to empirical testing, and this is also justified
638 * by considering the behavior of a buddy system containing a single
639 * large block of memory acted on by a series of small allocations.
640 * This behavior is a critical factor in sglist merging's success.
641 *
642 * -- wli
643 */
644static inline void expand(struct zone *zone, struct page *page,
645	int low, int high, struct free_area *area,
646	int migratetype)
647{
648	unsigned long size = 1 << high;
649
650	while (high > low) {
651		area--;
652		high--;
653		size >>= 1;
654		VM_BUG_ON(bad_range(zone, &page[size]));
655		list_add(&page[size].lru, &area->free_list[migratetype]);
656		area->nr_free++;
657		set_page_order(&page[size], high);
658	}
659}
660
661/*
662 * This page is about to be returned from the page allocator
663 */
664static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
665{
666	if (unlikely(page_mapcount(page) |
667		(page->mapping != NULL)  |
668		(atomic_read(&page->_count) != 0)  |
669		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
670		bad_page(page);
671		return 1;
672	}
673
674	set_page_private(page, 0);
675	set_page_refcounted(page);
676
677	arch_alloc_page(page, order);
678	kernel_map_pages(page, 1 << order, 1);
679
680	if (gfp_flags & __GFP_ZERO)
681		prep_zero_page(page, order, gfp_flags);
682
683	if (order && (gfp_flags & __GFP_COMP))
684		prep_compound_page(page, order);
685
686	return 0;
687}
688
689/*
690 * Go through the free lists for the given migratetype and remove
691 * the smallest available page from the freelists
692 */
693static inline
694struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
695						int migratetype)
696{
697	unsigned int current_order;
698	struct free_area * area;
699	struct page *page;
700
701	/* Find a page of the appropriate size in the preferred list */
702	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
703		area = &(zone->free_area[current_order]);
704		if (list_empty(&area->free_list[migratetype]))
705			continue;
706
707		page = list_entry(area->free_list[migratetype].next,
708							struct page, lru);
709		list_del(&page->lru);
710		rmv_page_order(page);
711		area->nr_free--;
712		expand(zone, page, order, current_order, area, migratetype);
713		return page;
714	}
715
716	return NULL;
717}
718
719
720/*
721 * This array describes the order lists are fallen back to when
722 * the free lists for the desirable migrate type are depleted
723 */
724static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
725	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
726	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
727	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
728	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
729};
730
731/*
732 * Move the free pages in a range to the free lists of the requested type.
733 * Note that start_page and end_pages are not aligned on a pageblock
734 * boundary. If alignment is required, use move_freepages_block()
735 */
736static int move_freepages(struct zone *zone,
737			  struct page *start_page, struct page *end_page,
738			  int migratetype)
739{
740	struct page *page;
741	unsigned long order;
742	int pages_moved = 0;
743
744#ifndef CONFIG_HOLES_IN_ZONE
745	/*
746	 * page_zone is not safe to call in this context when
747	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
748	 * anyway as we check zone boundaries in move_freepages_block().
749	 * Remove at a later date when no bug reports exist related to
750	 * grouping pages by mobility
751	 */
752	BUG_ON(page_zone(start_page) != page_zone(end_page));
753#endif
754
755	for (page = start_page; page <= end_page;) {
756		/* Make sure we are not inadvertently changing nodes */
757		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
758
759		if (!pfn_valid_within(page_to_pfn(page))) {
760			page++;
761			continue;
762		}
763
764		if (!PageBuddy(page)) {
765			page++;
766			continue;
767		}
768
769		order = page_order(page);
770		list_del(&page->lru);
771		list_add(&page->lru,
772			&zone->free_area[order].free_list[migratetype]);
773		page += 1 << order;
774		pages_moved += 1 << order;
775	}
776
777	return pages_moved;
778}
779
780static int move_freepages_block(struct zone *zone, struct page *page,
781				int migratetype)
782{
783	unsigned long start_pfn, end_pfn;
784	struct page *start_page, *end_page;
785
786	start_pfn = page_to_pfn(page);
787	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
788	start_page = pfn_to_page(start_pfn);
789	end_page = start_page + pageblock_nr_pages - 1;
790	end_pfn = start_pfn + pageblock_nr_pages - 1;
791
792	/* Do not cross zone boundaries */
793	if (start_pfn < zone->zone_start_pfn)
794		start_page = page;
795	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
796		return 0;
797
798	return move_freepages(zone, start_page, end_page, migratetype);
799}
800
801static void change_pageblock_range(struct page *pageblock_page,
802					int start_order, int migratetype)
803{
804	int nr_pageblocks = 1 << (start_order - pageblock_order);
805
806	while (nr_pageblocks--) {
807		set_pageblock_migratetype(pageblock_page, migratetype);
808		pageblock_page += pageblock_nr_pages;
809	}
810}
811
812/* Remove an element from the buddy allocator from the fallback list */
813static inline struct page *
814__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
815{
816	struct free_area * area;
817	int current_order;
818	struct page *page;
819	int migratetype, i;
820
821	/* Find the largest possible block of pages in the other list */
822	for (current_order = MAX_ORDER-1; current_order >= order;
823						--current_order) {
824		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
825			migratetype = fallbacks[start_migratetype][i];
826
827			/* MIGRATE_RESERVE handled later if necessary */
828			if (migratetype == MIGRATE_RESERVE)
829				continue;
830
831			area = &(zone->free_area[current_order]);
832			if (list_empty(&area->free_list[migratetype]))
833				continue;
834
835			page = list_entry(area->free_list[migratetype].next,
836					struct page, lru);
837			area->nr_free--;
838
839			/*
840			 * If breaking a large block of pages, move all free
841			 * pages to the preferred allocation list. If falling
842			 * back for a reclaimable kernel allocation, be more
843			 * agressive about taking ownership of free pages
844			 */
845			if (unlikely(current_order >= (pageblock_order >> 1)) ||
846					start_migratetype == MIGRATE_RECLAIMABLE ||
847					page_group_by_mobility_disabled) {
848				unsigned long pages;
849				pages = move_freepages_block(zone, page,
850								start_migratetype);
851
852				/* Claim the whole block if over half of it is free */
853				if (pages >= (1 << (pageblock_order-1)) ||
854						page_group_by_mobility_disabled)
855					set_pageblock_migratetype(page,
856								start_migratetype);
857
858				migratetype = start_migratetype;
859			}
860
861			/* Remove the page from the freelists */
862			list_del(&page->lru);
863			rmv_page_order(page);
864
865			/* Take ownership for orders >= pageblock_order */
866			if (current_order >= pageblock_order)
867				change_pageblock_range(page, current_order,
868							start_migratetype);
869
870			expand(zone, page, order, current_order, area, migratetype);
871
872			trace_mm_page_alloc_extfrag(page, order, current_order,
873				start_migratetype, migratetype);
874
875			return page;
876		}
877	}
878
879	return NULL;
880}
881
882/*
883 * Do the hard work of removing an element from the buddy allocator.
884 * Call me with the zone->lock already held.
885 */
886static struct page *__rmqueue(struct zone *zone, unsigned int order,
887						int migratetype)
888{
889	struct page *page;
890
891retry_reserve:
892	page = __rmqueue_smallest(zone, order, migratetype);
893
894	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
895		page = __rmqueue_fallback(zone, order, migratetype);
896
897		/*
898		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
899		 * is used because __rmqueue_smallest is an inline function
900		 * and we want just one call site
901		 */
902		if (!page) {
903			migratetype = MIGRATE_RESERVE;
904			goto retry_reserve;
905		}
906	}
907
908	trace_mm_page_alloc_zone_locked(page, order, migratetype);
909	return page;
910}
911
912/*
913 * Obtain a specified number of elements from the buddy allocator, all under
914 * a single hold of the lock, for efficiency.  Add them to the supplied list.
915 * Returns the number of new pages which were placed at *list.
916 */
917static int rmqueue_bulk(struct zone *zone, unsigned int order,
918			unsigned long count, struct list_head *list,
919			int migratetype, int cold)
920{
921	int i;
922
923	spin_lock(&zone->lock);
924	for (i = 0; i < count; ++i) {
925		struct page *page = __rmqueue(zone, order, migratetype);
926		if (unlikely(page == NULL))
927			break;
928
929		/*
930		 * Split buddy pages returned by expand() are received here
931		 * in physical page order. The page is added to the callers and
932		 * list and the list head then moves forward. From the callers
933		 * perspective, the linked list is ordered by page number in
934		 * some conditions. This is useful for IO devices that can
935		 * merge IO requests if the physical pages are ordered
936		 * properly.
937		 */
938		if (likely(cold == 0))
939			list_add(&page->lru, list);
940		else
941			list_add_tail(&page->lru, list);
942		set_page_private(page, migratetype);
943		list = &page->lru;
944	}
945	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
946	spin_unlock(&zone->lock);
947	return i;
948}
949
950#ifdef CONFIG_NUMA
951/*
952 * Called from the vmstat counter updater to drain pagesets of this
953 * currently executing processor on remote nodes after they have
954 * expired.
955 *
956 * Note that this function must be called with the thread pinned to
957 * a single processor.
958 */
959void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
960{
961	unsigned long flags;
962	int to_drain;
963
964	local_irq_save(flags);
965	if (pcp->count >= pcp->batch)
966		to_drain = pcp->batch;
967	else
968		to_drain = pcp->count;
969	free_pcppages_bulk(zone, to_drain, pcp);
970	pcp->count -= to_drain;
971	local_irq_restore(flags);
972}
973#endif
974
975/*
976 * Drain pages of the indicated processor.
977 *
978 * The processor must either be the current processor and the
979 * thread pinned to the current processor or a processor that
980 * is not online.
981 */
982static void drain_pages(unsigned int cpu)
983{
984	unsigned long flags;
985	struct zone *zone;
986
987	for_each_populated_zone(zone) {
988		struct per_cpu_pageset *pset;
989		struct per_cpu_pages *pcp;
990
991		pset = zone_pcp(zone, cpu);
992
993		pcp = &pset->pcp;
994		local_irq_save(flags);
995		free_pcppages_bulk(zone, pcp->count, pcp);
996		pcp->count = 0;
997		local_irq_restore(flags);
998	}
999}
1000
1001/*
1002 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1003 */
1004void drain_local_pages(void *arg)
1005{
1006	drain_pages(smp_processor_id());
1007}
1008
1009/*
1010 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1011 */
1012void drain_all_pages(void)
1013{
1014	on_each_cpu(drain_local_pages, NULL, 1);
1015}
1016
1017#ifdef CONFIG_HIBERNATION
1018
1019void mark_free_pages(struct zone *zone)
1020{
1021	unsigned long pfn, max_zone_pfn;
1022	unsigned long flags;
1023	int order, t;
1024	struct list_head *curr;
1025
1026	if (!zone->spanned_pages)
1027		return;
1028
1029	spin_lock_irqsave(&zone->lock, flags);
1030
1031	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1032	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1033		if (pfn_valid(pfn)) {
1034			struct page *page = pfn_to_page(pfn);
1035
1036			if (!swsusp_page_is_forbidden(page))
1037				swsusp_unset_page_free(page);
1038		}
1039
1040	for_each_migratetype_order(order, t) {
1041		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1042			unsigned long i;
1043
1044			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1045			for (i = 0; i < (1UL << order); i++)
1046				swsusp_set_page_free(pfn_to_page(pfn + i));
1047		}
1048	}
1049	spin_unlock_irqrestore(&zone->lock, flags);
1050}
1051#endif /* CONFIG_PM */
1052
1053/*
1054 * Free a 0-order page
1055 */
1056static void free_hot_cold_page(struct page *page, int cold)
1057{
1058	struct zone *zone = page_zone(page);
1059	struct per_cpu_pages *pcp;
1060	unsigned long flags;
1061	int migratetype;
1062	int wasMlocked = __TestClearPageMlocked(page);
1063
1064	kmemcheck_free_shadow(page, 0);
1065
1066	if (PageAnon(page))
1067		page->mapping = NULL;
1068	if (free_pages_check(page))
1069		return;
1070
1071	if (!PageHighMem(page)) {
1072		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1073		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1074	}
1075	arch_free_page(page, 0);
1076	kernel_map_pages(page, 1, 0);
1077
1078	pcp = &zone_pcp(zone, get_cpu())->pcp;
1079	migratetype = get_pageblock_migratetype(page);
1080	set_page_private(page, migratetype);
1081	local_irq_save(flags);
1082	if (unlikely(wasMlocked))
1083		free_page_mlock(page);
1084	__count_vm_event(PGFREE);
1085
1086	/*
1087	 * We only track unmovable, reclaimable and movable on pcp lists.
1088	 * Free ISOLATE pages back to the allocator because they are being
1089	 * offlined but treat RESERVE as movable pages so we can get those
1090	 * areas back if necessary. Otherwise, we may have to free
1091	 * excessively into the page allocator
1092	 */
1093	if (migratetype >= MIGRATE_PCPTYPES) {
1094		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1095			free_one_page(zone, page, 0, migratetype);
1096			goto out;
1097		}
1098		migratetype = MIGRATE_MOVABLE;
1099	}
1100
1101	if (cold)
1102		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1103	else
1104		list_add(&page->lru, &pcp->lists[migratetype]);
1105	pcp->count++;
1106	if (pcp->count >= pcp->high) {
1107		free_pcppages_bulk(zone, pcp->batch, pcp);
1108		pcp->count -= pcp->batch;
1109	}
1110
1111out:
1112	local_irq_restore(flags);
1113	put_cpu();
1114}
1115
1116void free_hot_page(struct page *page)
1117{
1118	trace_mm_page_free_direct(page, 0);
1119	free_hot_cold_page(page, 0);
1120}
1121
1122/*
1123 * split_page takes a non-compound higher-order page, and splits it into
1124 * n (1<<order) sub-pages: page[0..n]
1125 * Each sub-page must be freed individually.
1126 *
1127 * Note: this is probably too low level an operation for use in drivers.
1128 * Please consult with lkml before using this in your driver.
1129 */
1130void split_page(struct page *page, unsigned int order)
1131{
1132	int i;
1133
1134	VM_BUG_ON(PageCompound(page));
1135	VM_BUG_ON(!page_count(page));
1136
1137#ifdef CONFIG_KMEMCHECK
1138	/*
1139	 * Split shadow pages too, because free(page[0]) would
1140	 * otherwise free the whole shadow.
1141	 */
1142	if (kmemcheck_page_is_tracked(page))
1143		split_page(virt_to_page(page[0].shadow), order);
1144#endif
1145
1146	for (i = 1; i < (1 << order); i++)
1147		set_page_refcounted(page + i);
1148}
1149
1150/*
1151 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1152 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1153 * or two.
1154 */
1155static inline
1156struct page *buffered_rmqueue(struct zone *preferred_zone,
1157			struct zone *zone, int order, gfp_t gfp_flags,
1158			int migratetype)
1159{
1160	unsigned long flags;
1161	struct page *page;
1162	int cold = !!(gfp_flags & __GFP_COLD);
1163	int cpu;
1164
1165again:
1166	cpu  = get_cpu();
1167	if (likely(order == 0)) {
1168		struct per_cpu_pages *pcp;
1169		struct list_head *list;
1170
1171		pcp = &zone_pcp(zone, cpu)->pcp;
1172		list = &pcp->lists[migratetype];
1173		local_irq_save(flags);
1174		if (list_empty(list)) {
1175			pcp->count += rmqueue_bulk(zone, 0,
1176					pcp->batch, list,
1177					migratetype, cold);
1178			if (unlikely(list_empty(list)))
1179				goto failed;
1180		}
1181
1182		if (cold)
1183			page = list_entry(list->prev, struct page, lru);
1184		else
1185			page = list_entry(list->next, struct page, lru);
1186
1187		list_del(&page->lru);
1188		pcp->count--;
1189	} else {
1190		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1191			/*
1192			 * __GFP_NOFAIL is not to be used in new code.
1193			 *
1194			 * All __GFP_NOFAIL callers should be fixed so that they
1195			 * properly detect and handle allocation failures.
1196			 *
1197			 * We most definitely don't want callers attempting to
1198			 * allocate greater than order-1 page units with
1199			 * __GFP_NOFAIL.
1200			 */
1201			WARN_ON_ONCE(order > 1);
1202		}
1203		spin_lock_irqsave(&zone->lock, flags);
1204		page = __rmqueue(zone, order, migratetype);
1205		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1206		spin_unlock(&zone->lock);
1207		if (!page)
1208			goto failed;
1209	}
1210
1211	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1212	zone_statistics(preferred_zone, zone);
1213	local_irq_restore(flags);
1214	put_cpu();
1215
1216	VM_BUG_ON(bad_range(zone, page));
1217	if (prep_new_page(page, order, gfp_flags))
1218		goto again;
1219	return page;
1220
1221failed:
1222	local_irq_restore(flags);
1223	put_cpu();
1224	return NULL;
1225}
1226
1227/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1228#define ALLOC_WMARK_MIN		WMARK_MIN
1229#define ALLOC_WMARK_LOW		WMARK_LOW
1230#define ALLOC_WMARK_HIGH	WMARK_HIGH
1231#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1232
1233/* Mask to get the watermark bits */
1234#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1235
1236#define ALLOC_HARDER		0x10 /* try to alloc harder */
1237#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1238#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1239
1240#ifdef CONFIG_FAIL_PAGE_ALLOC
1241
1242static struct fail_page_alloc_attr {
1243	struct fault_attr attr;
1244
1245	u32 ignore_gfp_highmem;
1246	u32 ignore_gfp_wait;
1247	u32 min_order;
1248
1249#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1250
1251	struct dentry *ignore_gfp_highmem_file;
1252	struct dentry *ignore_gfp_wait_file;
1253	struct dentry *min_order_file;
1254
1255#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1256
1257} fail_page_alloc = {
1258	.attr = FAULT_ATTR_INITIALIZER,
1259	.ignore_gfp_wait = 1,
1260	.ignore_gfp_highmem = 1,
1261	.min_order = 1,
1262};
1263
1264static int __init setup_fail_page_alloc(char *str)
1265{
1266	return setup_fault_attr(&fail_page_alloc.attr, str);
1267}
1268__setup("fail_page_alloc=", setup_fail_page_alloc);
1269
1270static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1271{
1272	if (order < fail_page_alloc.min_order)
1273		return 0;
1274	if (gfp_mask & __GFP_NOFAIL)
1275		return 0;
1276	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1277		return 0;
1278	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1279		return 0;
1280
1281	return should_fail(&fail_page_alloc.attr, 1 << order);
1282}
1283
1284#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1285
1286static int __init fail_page_alloc_debugfs(void)
1287{
1288	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1289	struct dentry *dir;
1290	int err;
1291
1292	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1293				       "fail_page_alloc");
1294	if (err)
1295		return err;
1296	dir = fail_page_alloc.attr.dentries.dir;
1297
1298	fail_page_alloc.ignore_gfp_wait_file =
1299		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1300				      &fail_page_alloc.ignore_gfp_wait);
1301
1302	fail_page_alloc.ignore_gfp_highmem_file =
1303		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1304				      &fail_page_alloc.ignore_gfp_highmem);
1305	fail_page_alloc.min_order_file =
1306		debugfs_create_u32("min-order", mode, dir,
1307				   &fail_page_alloc.min_order);
1308
1309	if (!fail_page_alloc.ignore_gfp_wait_file ||
1310            !fail_page_alloc.ignore_gfp_highmem_file ||
1311            !fail_page_alloc.min_order_file) {
1312		err = -ENOMEM;
1313		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1314		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1315		debugfs_remove(fail_page_alloc.min_order_file);
1316		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1317	}
1318
1319	return err;
1320}
1321
1322late_initcall(fail_page_alloc_debugfs);
1323
1324#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1325
1326#else /* CONFIG_FAIL_PAGE_ALLOC */
1327
1328static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1329{
1330	return 0;
1331}
1332
1333#endif /* CONFIG_FAIL_PAGE_ALLOC */
1334
1335/*
1336 * Return 1 if free pages are above 'mark'. This takes into account the order
1337 * of the allocation.
1338 */
1339int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1340		      int classzone_idx, int alloc_flags)
1341{
1342	/* free_pages my go negative - that's OK */
1343	long min = mark;
1344	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1345	int o;
1346
1347	if (alloc_flags & ALLOC_HIGH)
1348		min -= min / 2;
1349	if (alloc_flags & ALLOC_HARDER)
1350		min -= min / 4;
1351
1352	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1353		return 0;
1354	for (o = 0; o < order; o++) {
1355		/* At the next order, this order's pages become unavailable */
1356		free_pages -= z->free_area[o].nr_free << o;
1357
1358		/* Require fewer higher order pages to be free */
1359		min >>= 1;
1360
1361		if (free_pages <= min)
1362			return 0;
1363	}
1364	return 1;
1365}
1366
1367#ifdef CONFIG_NUMA
1368/*
1369 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1370 * skip over zones that are not allowed by the cpuset, or that have
1371 * been recently (in last second) found to be nearly full.  See further
1372 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1373 * that have to skip over a lot of full or unallowed zones.
1374 *
1375 * If the zonelist cache is present in the passed in zonelist, then
1376 * returns a pointer to the allowed node mask (either the current
1377 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1378 *
1379 * If the zonelist cache is not available for this zonelist, does
1380 * nothing and returns NULL.
1381 *
1382 * If the fullzones BITMAP in the zonelist cache is stale (more than
1383 * a second since last zap'd) then we zap it out (clear its bits.)
1384 *
1385 * We hold off even calling zlc_setup, until after we've checked the
1386 * first zone in the zonelist, on the theory that most allocations will
1387 * be satisfied from that first zone, so best to examine that zone as
1388 * quickly as we can.
1389 */
1390static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1391{
1392	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1393	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1394
1395	zlc = zonelist->zlcache_ptr;
1396	if (!zlc)
1397		return NULL;
1398
1399	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1400		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1401		zlc->last_full_zap = jiffies;
1402	}
1403
1404	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1405					&cpuset_current_mems_allowed :
1406					&node_states[N_HIGH_MEMORY];
1407	return allowednodes;
1408}
1409
1410/*
1411 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1412 * if it is worth looking at further for free memory:
1413 *  1) Check that the zone isn't thought to be full (doesn't have its
1414 *     bit set in the zonelist_cache fullzones BITMAP).
1415 *  2) Check that the zones node (obtained from the zonelist_cache
1416 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1417 * Return true (non-zero) if zone is worth looking at further, or
1418 * else return false (zero) if it is not.
1419 *
1420 * This check -ignores- the distinction between various watermarks,
1421 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1422 * found to be full for any variation of these watermarks, it will
1423 * be considered full for up to one second by all requests, unless
1424 * we are so low on memory on all allowed nodes that we are forced
1425 * into the second scan of the zonelist.
1426 *
1427 * In the second scan we ignore this zonelist cache and exactly
1428 * apply the watermarks to all zones, even it is slower to do so.
1429 * We are low on memory in the second scan, and should leave no stone
1430 * unturned looking for a free page.
1431 */
1432static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1433						nodemask_t *allowednodes)
1434{
1435	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1436	int i;				/* index of *z in zonelist zones */
1437	int n;				/* node that zone *z is on */
1438
1439	zlc = zonelist->zlcache_ptr;
1440	if (!zlc)
1441		return 1;
1442
1443	i = z - zonelist->_zonerefs;
1444	n = zlc->z_to_n[i];
1445
1446	/* This zone is worth trying if it is allowed but not full */
1447	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1448}
1449
1450/*
1451 * Given 'z' scanning a zonelist, set the corresponding bit in
1452 * zlc->fullzones, so that subsequent attempts to allocate a page
1453 * from that zone don't waste time re-examining it.
1454 */
1455static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1456{
1457	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1458	int i;				/* index of *z in zonelist zones */
1459
1460	zlc = zonelist->zlcache_ptr;
1461	if (!zlc)
1462		return;
1463
1464	i = z - zonelist->_zonerefs;
1465
1466	set_bit(i, zlc->fullzones);
1467}
1468
1469#else	/* CONFIG_NUMA */
1470
1471static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1472{
1473	return NULL;
1474}
1475
1476static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1477				nodemask_t *allowednodes)
1478{
1479	return 1;
1480}
1481
1482static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1483{
1484}
1485#endif	/* CONFIG_NUMA */
1486
1487/*
1488 * get_page_from_freelist goes through the zonelist trying to allocate
1489 * a page.
1490 */
1491static struct page *
1492get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1493		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1494		struct zone *preferred_zone, int migratetype)
1495{
1496	struct zoneref *z;
1497	struct page *page = NULL;
1498	int classzone_idx;
1499	struct zone *zone;
1500	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1501	int zlc_active = 0;		/* set if using zonelist_cache */
1502	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1503
1504	classzone_idx = zone_idx(preferred_zone);
1505zonelist_scan:
1506	/*
1507	 * Scan zonelist, looking for a zone with enough free.
1508	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1509	 */
1510	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1511						high_zoneidx, nodemask) {
1512		if (NUMA_BUILD && zlc_active &&
1513			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1514				continue;
1515		if ((alloc_flags & ALLOC_CPUSET) &&
1516			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1517				goto try_next_zone;
1518
1519		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1520		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1521			unsigned long mark;
1522			int ret;
1523
1524			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1525			if (zone_watermark_ok(zone, order, mark,
1526				    classzone_idx, alloc_flags))
1527				goto try_this_zone;
1528
1529			if (zone_reclaim_mode == 0)
1530				goto this_zone_full;
1531
1532			ret = zone_reclaim(zone, gfp_mask, order);
1533			switch (ret) {
1534			case ZONE_RECLAIM_NOSCAN:
1535				/* did not scan */
1536				goto try_next_zone;
1537			case ZONE_RECLAIM_FULL:
1538				/* scanned but unreclaimable */
1539				goto this_zone_full;
1540			default:
1541				/* did we reclaim enough */
1542				if (!zone_watermark_ok(zone, order, mark,
1543						classzone_idx, alloc_flags))
1544					goto this_zone_full;
1545			}
1546		}
1547
1548try_this_zone:
1549		page = buffered_rmqueue(preferred_zone, zone, order,
1550						gfp_mask, migratetype);
1551		if (page)
1552			break;
1553this_zone_full:
1554		if (NUMA_BUILD)
1555			zlc_mark_zone_full(zonelist, z);
1556try_next_zone:
1557		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1558			/*
1559			 * we do zlc_setup after the first zone is tried but only
1560			 * if there are multiple nodes make it worthwhile
1561			 */
1562			allowednodes = zlc_setup(zonelist, alloc_flags);
1563			zlc_active = 1;
1564			did_zlc_setup = 1;
1565		}
1566	}
1567
1568	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1569		/* Disable zlc cache for second zonelist scan */
1570		zlc_active = 0;
1571		goto zonelist_scan;
1572	}
1573	return page;
1574}
1575
1576static inline int
1577should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1578				unsigned long pages_reclaimed)
1579{
1580	/* Do not loop if specifically requested */
1581	if (gfp_mask & __GFP_NORETRY)
1582		return 0;
1583
1584	/*
1585	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1586	 * means __GFP_NOFAIL, but that may not be true in other
1587	 * implementations.
1588	 */
1589	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1590		return 1;
1591
1592	/*
1593	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1594	 * specified, then we retry until we no longer reclaim any pages
1595	 * (above), or we've reclaimed an order of pages at least as
1596	 * large as the allocation's order. In both cases, if the
1597	 * allocation still fails, we stop retrying.
1598	 */
1599	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1600		return 1;
1601
1602	/*
1603	 * Don't let big-order allocations loop unless the caller
1604	 * explicitly requests that.
1605	 */
1606	if (gfp_mask & __GFP_NOFAIL)
1607		return 1;
1608
1609	return 0;
1610}
1611
1612static inline struct page *
1613__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1614	struct zonelist *zonelist, enum zone_type high_zoneidx,
1615	nodemask_t *nodemask, struct zone *preferred_zone,
1616	int migratetype)
1617{
1618	struct page *page;
1619
1620	/* Acquire the OOM killer lock for the zones in zonelist */
1621	if (!try_set_zone_oom(zonelist, gfp_mask)) {
1622		schedule_timeout_uninterruptible(1);
1623		return NULL;
1624	}
1625
1626	/*
1627	 * Go through the zonelist yet one more time, keep very high watermark
1628	 * here, this is only to catch a parallel oom killing, we must fail if
1629	 * we're still under heavy pressure.
1630	 */
1631	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1632		order, zonelist, high_zoneidx,
1633		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1634		preferred_zone, migratetype);
1635	if (page)
1636		goto out;
1637
1638	/* The OOM killer will not help higher order allocs */
1639	if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
1640		goto out;
1641
1642	/* Exhausted what can be done so it's blamo time */
1643	out_of_memory(zonelist, gfp_mask, order);
1644
1645out:
1646	clear_zonelist_oom(zonelist, gfp_mask);
1647	return page;
1648}
1649
1650/* The really slow allocator path where we enter direct reclaim */
1651static inline struct page *
1652__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1653	struct zonelist *zonelist, enum zone_type high_zoneidx,
1654	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1655	int migratetype, unsigned long *did_some_progress)
1656{
1657	struct page *page = NULL;
1658	struct reclaim_state reclaim_state;
1659	struct task_struct *p = current;
1660
1661	cond_resched();
1662
1663	/* We now go into synchronous reclaim */
1664	cpuset_memory_pressure_bump();
1665	p->flags |= PF_MEMALLOC;
1666	lockdep_set_current_reclaim_state(gfp_mask);
1667	reclaim_state.reclaimed_slab = 0;
1668	p->reclaim_state = &reclaim_state;
1669
1670	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1671
1672	p->reclaim_state = NULL;
1673	lockdep_clear_current_reclaim_state();
1674	p->flags &= ~PF_MEMALLOC;
1675
1676	cond_resched();
1677
1678	if (order != 0)
1679		drain_all_pages();
1680
1681	if (likely(*did_some_progress))
1682		page = get_page_from_freelist(gfp_mask, nodemask, order,
1683					zonelist, high_zoneidx,
1684					alloc_flags, preferred_zone,
1685					migratetype);
1686	return page;
1687}
1688
1689/*
1690 * This is called in the allocator slow-path if the allocation request is of
1691 * sufficient urgency to ignore watermarks and take other desperate measures
1692 */
1693static inline struct page *
1694__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1695	struct zonelist *zonelist, enum zone_type high_zoneidx,
1696	nodemask_t *nodemask, struct zone *preferred_zone,
1697	int migratetype)
1698{
1699	struct page *page;
1700
1701	do {
1702		page = get_page_from_freelist(gfp_mask, nodemask, order,
1703			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1704			preferred_zone, migratetype);
1705
1706		if (!page && gfp_mask & __GFP_NOFAIL)
1707			congestion_wait(BLK_RW_ASYNC, HZ/50);
1708	} while (!page && (gfp_mask & __GFP_NOFAIL));
1709
1710	return page;
1711}
1712
1713static inline
1714void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1715						enum zone_type high_zoneidx)
1716{
1717	struct zoneref *z;
1718	struct zone *zone;
1719
1720	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1721		wakeup_kswapd(zone, order);
1722}
1723
1724static inline int
1725gfp_to_alloc_flags(gfp_t gfp_mask)
1726{
1727	struct task_struct *p = current;
1728	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1729	const gfp_t wait = gfp_mask & __GFP_WAIT;
1730
1731	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1732	BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1733
1734	/*
1735	 * The caller may dip into page reserves a bit more if the caller
1736	 * cannot run direct reclaim, or if the caller has realtime scheduling
1737	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1738	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1739	 */
1740	alloc_flags |= (gfp_mask & __GFP_HIGH);
1741
1742	if (!wait) {
1743		alloc_flags |= ALLOC_HARDER;
1744		/*
1745		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1746		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1747		 */
1748		alloc_flags &= ~ALLOC_CPUSET;
1749	} else if (unlikely(rt_task(p)))
1750		alloc_flags |= ALLOC_HARDER;
1751
1752	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1753		if (!in_interrupt() &&
1754		    ((p->flags & PF_MEMALLOC) ||
1755		     unlikely(test_thread_flag(TIF_MEMDIE))))
1756			alloc_flags |= ALLOC_NO_WATERMARKS;
1757	}
1758
1759	return alloc_flags;
1760}
1761
1762static inline struct page *
1763__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1764	struct zonelist *zonelist, enum zone_type high_zoneidx,
1765	nodemask_t *nodemask, struct zone *preferred_zone,
1766	int migratetype)
1767{
1768	const gfp_t wait = gfp_mask & __GFP_WAIT;
1769	struct page *page = NULL;
1770	int alloc_flags;
1771	unsigned long pages_reclaimed = 0;
1772	unsigned long did_some_progress;
1773	struct task_struct *p = current;
1774
1775	/*
1776	 * In the slowpath, we sanity check order to avoid ever trying to
1777	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1778	 * be using allocators in order of preference for an area that is
1779	 * too large.
1780	 */
1781	if (order >= MAX_ORDER) {
1782		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1783		return NULL;
1784	}
1785
1786	/*
1787	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1788	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1789	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1790	 * using a larger set of nodes after it has established that the
1791	 * allowed per node queues are empty and that nodes are
1792	 * over allocated.
1793	 */
1794	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1795		goto nopage;
1796
1797	wake_all_kswapd(order, zonelist, high_zoneidx);
1798
1799restart:
1800	/*
1801	 * OK, we're below the kswapd watermark and have kicked background
1802	 * reclaim. Now things get more complex, so set up alloc_flags according
1803	 * to how we want to proceed.
1804	 */
1805	alloc_flags = gfp_to_alloc_flags(gfp_mask);
1806
1807	/* This is the last chance, in general, before the goto nopage. */
1808	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1809			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1810			preferred_zone, migratetype);
1811	if (page)
1812		goto got_pg;
1813
1814rebalance:
1815	/* Allocate without watermarks if the context allows */
1816	if (alloc_flags & ALLOC_NO_WATERMARKS) {
1817		page = __alloc_pages_high_priority(gfp_mask, order,
1818				zonelist, high_zoneidx, nodemask,
1819				preferred_zone, migratetype);
1820		if (page)
1821			goto got_pg;
1822	}
1823
1824	/* Atomic allocations - we can't balance anything */
1825	if (!wait)
1826		goto nopage;
1827
1828	/* Avoid recursion of direct reclaim */
1829	if (p->flags & PF_MEMALLOC)
1830		goto nopage;
1831
1832	/* Avoid allocations with no watermarks from looping endlessly */
1833	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1834		goto nopage;
1835
1836	/* Try direct reclaim and then allocating */
1837	page = __alloc_pages_direct_reclaim(gfp_mask, order,
1838					zonelist, high_zoneidx,
1839					nodemask,
1840					alloc_flags, preferred_zone,
1841					migratetype, &did_some_progress);
1842	if (page)
1843		goto got_pg;
1844
1845	/*
1846	 * If we failed to make any progress reclaiming, then we are
1847	 * running out of options and have to consider going OOM
1848	 */
1849	if (!did_some_progress) {
1850		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1851			if (oom_killer_disabled)
1852				goto nopage;
1853			page = __alloc_pages_may_oom(gfp_mask, order,
1854					zonelist, high_zoneidx,
1855					nodemask, preferred_zone,
1856					migratetype);
1857			if (page)
1858				goto got_pg;
1859
1860			/*
1861			 * The OOM killer does not trigger for high-order
1862			 * ~__GFP_NOFAIL allocations so if no progress is being
1863			 * made, there are no other options and retrying is
1864			 * unlikely to help.
1865			 */
1866			if (order > PAGE_ALLOC_COSTLY_ORDER &&
1867						!(gfp_mask & __GFP_NOFAIL))
1868				goto nopage;
1869
1870			goto restart;
1871		}
1872	}
1873
1874	/* Check if we should retry the allocation */
1875	pages_reclaimed += did_some_progress;
1876	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1877		/* Wait for some write requests to complete then retry */
1878		congestion_wait(BLK_RW_ASYNC, HZ/50);
1879		goto rebalance;
1880	}
1881
1882nopage:
1883	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1884		printk(KERN_WARNING "%s: page allocation failure."
1885			" order:%d, mode:0x%x\n",
1886			p->comm, order, gfp_mask);
1887		dump_stack();
1888		show_mem();
1889	}
1890	return page;
1891got_pg:
1892	if (kmemcheck_enabled)
1893		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1894	return page;
1895
1896}
1897
1898/*
1899 * This is the 'heart' of the zoned buddy allocator.
1900 */
1901struct page *
1902__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1903			struct zonelist *zonelist, nodemask_t *nodemask)
1904{
1905	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1906	struct zone *preferred_zone;
1907	struct page *page;
1908	int migratetype = allocflags_to_migratetype(gfp_mask);
1909
1910	gfp_mask &= gfp_allowed_mask;
1911
1912	lockdep_trace_alloc(gfp_mask);
1913
1914	might_sleep_if(gfp_mask & __GFP_WAIT);
1915
1916	if (should_fail_alloc_page(gfp_mask, order))
1917		return NULL;
1918
1919	/*
1920	 * Check the zones suitable for the gfp_mask contain at least one
1921	 * valid zone. It's possible to have an empty zonelist as a result
1922	 * of GFP_THISNODE and a memoryless node
1923	 */
1924	if (unlikely(!zonelist->_zonerefs->zone))
1925		return NULL;
1926
1927	/* The preferred zone is used for statistics later */
1928	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1929	if (!preferred_zone)
1930		return NULL;
1931
1932	/* First allocation attempt */
1933	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1934			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1935			preferred_zone, migratetype);
1936	if (unlikely(!page))
1937		page = __alloc_pages_slowpath(gfp_mask, order,
1938				zonelist, high_zoneidx, nodemask,
1939				preferred_zone, migratetype);
1940
1941	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
1942	return page;
1943}
1944EXPORT_SYMBOL(__alloc_pages_nodemask);
1945
1946/*
1947 * Common helper functions.
1948 */
1949unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1950{
1951	struct page *page;
1952
1953	/*
1954	 * __get_free_pages() returns a 32-bit address, which cannot represent
1955	 * a highmem page
1956	 */
1957	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1958
1959	page = alloc_pages(gfp_mask, order);
1960	if (!page)
1961		return 0;
1962	return (unsigned long) page_address(page);
1963}
1964EXPORT_SYMBOL(__get_free_pages);
1965
1966unsigned long get_zeroed_page(gfp_t gfp_mask)
1967{
1968	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1969}
1970EXPORT_SYMBOL(get_zeroed_page);
1971
1972void __pagevec_free(struct pagevec *pvec)
1973{
1974	int i = pagevec_count(pvec);
1975
1976	while (--i >= 0) {
1977		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
1978		free_hot_cold_page(pvec->pages[i], pvec->cold);
1979	}
1980}
1981
1982void __free_pages(struct page *page, unsigned int order)
1983{
1984	if (put_page_testzero(page)) {
1985		trace_mm_page_free_direct(page, order);
1986		if (order == 0)
1987			free_hot_page(page);
1988		else
1989			__free_pages_ok(page, order);
1990	}
1991}
1992
1993EXPORT_SYMBOL(__free_pages);
1994
1995void free_pages(unsigned long addr, unsigned int order)
1996{
1997	if (addr != 0) {
1998		VM_BUG_ON(!virt_addr_valid((void *)addr));
1999		__free_pages(virt_to_page((void *)addr), order);
2000	}
2001}
2002
2003EXPORT_SYMBOL(free_pages);
2004
2005/**
2006 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2007 * @size: the number of bytes to allocate
2008 * @gfp_mask: GFP flags for the allocation
2009 *
2010 * This function is similar to alloc_pages(), except that it allocates the
2011 * minimum number of pages to satisfy the request.  alloc_pages() can only
2012 * allocate memory in power-of-two pages.
2013 *
2014 * This function is also limited by MAX_ORDER.
2015 *
2016 * Memory allocated by this function must be released by free_pages_exact().
2017 */
2018void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2019{
2020	unsigned int order = get_order(size);
2021	unsigned long addr;
2022
2023	addr = __get_free_pages(gfp_mask, order);
2024	if (addr) {
2025		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2026		unsigned long used = addr + PAGE_ALIGN(size);
2027
2028		split_page(virt_to_page((void *)addr), order);
2029		while (used < alloc_end) {
2030			free_page(used);
2031			used += PAGE_SIZE;
2032		}
2033	}
2034
2035	return (void *)addr;
2036}
2037EXPORT_SYMBOL(alloc_pages_exact);
2038
2039/**
2040 * free_pages_exact - release memory allocated via alloc_pages_exact()
2041 * @virt: the value returned by alloc_pages_exact.
2042 * @size: size of allocation, same value as passed to alloc_pages_exact().
2043 *
2044 * Release the memory allocated by a previous call to alloc_pages_exact.
2045 */
2046void free_pages_exact(void *virt, size_t size)
2047{
2048	unsigned long addr = (unsigned long)virt;
2049	unsigned long end = addr + PAGE_ALIGN(size);
2050
2051	while (addr < end) {
2052		free_page(addr);
2053		addr += PAGE_SIZE;
2054	}
2055}
2056EXPORT_SYMBOL(free_pages_exact);
2057
2058static unsigned int nr_free_zone_pages(int offset)
2059{
2060	struct zoneref *z;
2061	struct zone *zone;
2062
2063	/* Just pick one node, since fallback list is circular */
2064	unsigned int sum = 0;
2065
2066	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2067
2068	for_each_zone_zonelist(zone, z, zonelist, offset) {
2069		unsigned long size = zone->present_pages;
2070		unsigned long high = high_wmark_pages(zone);
2071		if (size > high)
2072			sum += size - high;
2073	}
2074
2075	return sum;
2076}
2077
2078/*
2079 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2080 */
2081unsigned int nr_free_buffer_pages(void)
2082{
2083	return nr_free_zone_pages(gfp_zone(GFP_USER));
2084}
2085EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2086
2087/*
2088 * Amount of free RAM allocatable within all zones
2089 */
2090unsigned int nr_free_pagecache_pages(void)
2091{
2092	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2093}
2094
2095static inline void show_node(struct zone *zone)
2096{
2097	if (NUMA_BUILD)
2098		printk("Node %d ", zone_to_nid(zone));
2099}
2100
2101void si_meminfo(struct sysinfo *val)
2102{
2103	val->totalram = totalram_pages;
2104	val->sharedram = 0;
2105	val->freeram = global_page_state(NR_FREE_PAGES);
2106	val->bufferram = nr_blockdev_pages();
2107	val->totalhigh = totalhigh_pages;
2108	val->freehigh = nr_free_highpages();
2109	val->mem_unit = PAGE_SIZE;
2110}
2111
2112EXPORT_SYMBOL(si_meminfo);
2113
2114#ifdef CONFIG_NUMA
2115void si_meminfo_node(struct sysinfo *val, int nid)
2116{
2117	pg_data_t *pgdat = NODE_DATA(nid);
2118
2119	val->totalram = pgdat->node_present_pages;
2120	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2121#ifdef CONFIG_HIGHMEM
2122	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2123	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2124			NR_FREE_PAGES);
2125#else
2126	val->totalhigh = 0;
2127	val->freehigh = 0;
2128#endif
2129	val->mem_unit = PAGE_SIZE;
2130}
2131#endif
2132
2133#define K(x) ((x) << (PAGE_SHIFT-10))
2134
2135/*
2136 * Show free area list (used inside shift_scroll-lock stuff)
2137 * We also calculate the percentage fragmentation. We do this by counting the
2138 * memory on each free list with the exception of the first item on the list.
2139 */
2140void show_free_areas(void)
2141{
2142	int cpu;
2143	struct zone *zone;
2144
2145	for_each_populated_zone(zone) {
2146		show_node(zone);
2147		printk("%s per-cpu:\n", zone->name);
2148
2149		for_each_online_cpu(cpu) {
2150			struct per_cpu_pageset *pageset;
2151
2152			pageset = zone_pcp(zone, cpu);
2153
2154			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2155			       cpu, pageset->pcp.high,
2156			       pageset->pcp.batch, pageset->pcp.count);
2157		}
2158	}
2159
2160	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2161		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2162		" unevictable:%lu"
2163		" dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
2164		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2165		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2166		global_page_state(NR_ACTIVE_ANON),
2167		global_page_state(NR_INACTIVE_ANON),
2168		global_page_state(NR_ISOLATED_ANON),
2169		global_page_state(NR_ACTIVE_FILE),
2170		global_page_state(NR_INACTIVE_FILE),
2171		global_page_state(NR_ISOLATED_FILE),
2172		global_page_state(NR_UNEVICTABLE),
2173		global_page_state(NR_FILE_DIRTY),
2174		global_page_state(NR_WRITEBACK),
2175		global_page_state(NR_UNSTABLE_NFS),
2176		nr_blockdev_pages(),
2177		global_page_state(NR_FREE_PAGES),
2178		global_page_state(NR_SLAB_RECLAIMABLE),
2179		global_page_state(NR_SLAB_UNRECLAIMABLE),
2180		global_page_state(NR_FILE_MAPPED),
2181		global_page_state(NR_SHMEM),
2182		global_page_state(NR_PAGETABLE),
2183		global_page_state(NR_BOUNCE));
2184
2185	for_each_populated_zone(zone) {
2186		int i;
2187
2188		show_node(zone);
2189		printk("%s"
2190			" free:%lukB"
2191			" min:%lukB"
2192			" low:%lukB"
2193			" high:%lukB"
2194			" active_anon:%lukB"
2195			" inactive_anon:%lukB"
2196			" active_file:%lukB"
2197			" inactive_file:%lukB"
2198			" unevictable:%lukB"
2199			" isolated(anon):%lukB"
2200			" isolated(file):%lukB"
2201			" present:%lukB"
2202			" mlocked:%lukB"
2203			" dirty:%lukB"
2204			" writeback:%lukB"
2205			" mapped:%lukB"
2206			" shmem:%lukB"
2207			" slab_reclaimable:%lukB"
2208			" slab_unreclaimable:%lukB"
2209			" kernel_stack:%lukB"
2210			" pagetables:%lukB"
2211			" unstable:%lukB"
2212			" bounce:%lukB"
2213			" writeback_tmp:%lukB"
2214			" pages_scanned:%lu"
2215			" all_unreclaimable? %s"
2216			"\n",
2217			zone->name,
2218			K(zone_page_state(zone, NR_FREE_PAGES)),
2219			K(min_wmark_pages(zone)),
2220			K(low_wmark_pages(zone)),
2221			K(high_wmark_pages(zone)),
2222			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2223			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2224			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2225			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2226			K(zone_page_state(zone, NR_UNEVICTABLE)),
2227			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2228			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2229			K(zone->present_pages),
2230			K(zone_page_state(zone, NR_MLOCK)),
2231			K(zone_page_state(zone, NR_FILE_DIRTY)),
2232			K(zone_page_state(zone, NR_WRITEBACK)),
2233			K(zone_page_state(zone, NR_FILE_MAPPED)),
2234			K(zone_page_state(zone, NR_SHMEM)),
2235			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2236			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2237			zone_page_state(zone, NR_KERNEL_STACK) *
2238				THREAD_SIZE / 1024,
2239			K(zone_page_state(zone, NR_PAGETABLE)),
2240			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2241			K(zone_page_state(zone, NR_BOUNCE)),
2242			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2243			zone->pages_scanned,
2244			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
2245			);
2246		printk("lowmem_reserve[]:");
2247		for (i = 0; i < MAX_NR_ZONES; i++)
2248			printk(" %lu", zone->lowmem_reserve[i]);
2249		printk("\n");
2250	}
2251
2252	for_each_populated_zone(zone) {
2253 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2254
2255		show_node(zone);
2256		printk("%s: ", zone->name);
2257
2258		spin_lock_irqsave(&zone->lock, flags);
2259		for (order = 0; order < MAX_ORDER; order++) {
2260			nr[order] = zone->free_area[order].nr_free;
2261			total += nr[order] << order;
2262		}
2263		spin_unlock_irqrestore(&zone->lock, flags);
2264		for (order = 0; order < MAX_ORDER; order++)
2265			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2266		printk("= %lukB\n", K(total));
2267	}
2268
2269	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2270
2271	show_swap_cache_info();
2272}
2273
2274static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2275{
2276	zoneref->zone = zone;
2277	zoneref->zone_idx = zone_idx(zone);
2278}
2279
2280/*
2281 * Builds allocation fallback zone lists.
2282 *
2283 * Add all populated zones of a node to the zonelist.
2284 */
2285static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2286				int nr_zones, enum zone_type zone_type)
2287{
2288	struct zone *zone;
2289
2290	BUG_ON(zone_type >= MAX_NR_ZONES);
2291	zone_type++;
2292
2293	do {
2294		zone_type--;
2295		zone = pgdat->node_zones + zone_type;
2296		if (populated_zone(zone)) {
2297			zoneref_set_zone(zone,
2298				&zonelist->_zonerefs[nr_zones++]);
2299			check_highest_zone(zone_type);
2300		}
2301
2302	} while (zone_type);
2303	return nr_zones;
2304}
2305
2306
2307/*
2308 *  zonelist_order:
2309 *  0 = automatic detection of better ordering.
2310 *  1 = order by ([node] distance, -zonetype)
2311 *  2 = order by (-zonetype, [node] distance)
2312 *
2313 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2314 *  the same zonelist. So only NUMA can configure this param.
2315 */
2316#define ZONELIST_ORDER_DEFAULT  0
2317#define ZONELIST_ORDER_NODE     1
2318#define ZONELIST_ORDER_ZONE     2
2319
2320/* zonelist order in the kernel.
2321 * set_zonelist_order() will set this to NODE or ZONE.
2322 */
2323static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2324static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2325
2326
2327#ifdef CONFIG_NUMA
2328/* The value user specified ....changed by config */
2329static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2330/* string for sysctl */
2331#define NUMA_ZONELIST_ORDER_LEN	16
2332char numa_zonelist_order[16] = "default";
2333
2334/*
2335 * interface for configure zonelist ordering.
2336 * command line option "numa_zonelist_order"
2337 *	= "[dD]efault	- default, automatic configuration.
2338 *	= "[nN]ode 	- order by node locality, then by zone within node
2339 *	= "[zZ]one      - order by zone, then by locality within zone
2340 */
2341
2342static int __parse_numa_zonelist_order(char *s)
2343{
2344	if (*s == 'd' || *s == 'D') {
2345		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2346	} else if (*s == 'n' || *s == 'N') {
2347		user_zonelist_order = ZONELIST_ORDER_NODE;
2348	} else if (*s == 'z' || *s == 'Z') {
2349		user_zonelist_order = ZONELIST_ORDER_ZONE;
2350	} else {
2351		printk(KERN_WARNING
2352			"Ignoring invalid numa_zonelist_order value:  "
2353			"%s\n", s);
2354		return -EINVAL;
2355	}
2356	return 0;
2357}
2358
2359static __init int setup_numa_zonelist_order(char *s)
2360{
2361	if (s)
2362		return __parse_numa_zonelist_order(s);
2363	return 0;
2364}
2365early_param("numa_zonelist_order", setup_numa_zonelist_order);
2366
2367/*
2368 * sysctl handler for numa_zonelist_order
2369 */
2370int numa_zonelist_order_handler(ctl_table *table, int write,
2371		struct file *file, void __user *buffer, size_t *length,
2372		loff_t *ppos)
2373{
2374	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2375	int ret;
2376
2377	if (write)
2378		strncpy(saved_string, (char*)table->data,
2379			NUMA_ZONELIST_ORDER_LEN);
2380	ret = proc_dostring(table, write, file, buffer, length, ppos);
2381	if (ret)
2382		return ret;
2383	if (write) {
2384		int oldval = user_zonelist_order;
2385		if (__parse_numa_zonelist_order((char*)table->data)) {
2386			/*
2387			 * bogus value.  restore saved string
2388			 */
2389			strncpy((char*)table->data, saved_string,
2390				NUMA_ZONELIST_ORDER_LEN);
2391			user_zonelist_order = oldval;
2392		} else if (oldval != user_zonelist_order)
2393			build_all_zonelists();
2394	}
2395	return 0;
2396}
2397
2398
2399#define MAX_NODE_LOAD (nr_online_nodes)
2400static int node_load[MAX_NUMNODES];
2401
2402/**
2403 * find_next_best_node - find the next node that should appear in a given node's fallback list
2404 * @node: node whose fallback list we're appending
2405 * @used_node_mask: nodemask_t of already used nodes
2406 *
2407 * We use a number of factors to determine which is the next node that should
2408 * appear on a given node's fallback list.  The node should not have appeared
2409 * already in @node's fallback list, and it should be the next closest node
2410 * according to the distance array (which contains arbitrary distance values
2411 * from each node to each node in the system), and should also prefer nodes
2412 * with no CPUs, since presumably they'll have very little allocation pressure
2413 * on them otherwise.
2414 * It returns -1 if no node is found.
2415 */
2416static int find_next_best_node(int node, nodemask_t *used_node_mask)
2417{
2418	int n, val;
2419	int min_val = INT_MAX;
2420	int best_node = -1;
2421	const struct cpumask *tmp = cpumask_of_node(0);
2422
2423	/* Use the local node if we haven't already */
2424	if (!node_isset(node, *used_node_mask)) {
2425		node_set(node, *used_node_mask);
2426		return node;
2427	}
2428
2429	for_each_node_state(n, N_HIGH_MEMORY) {
2430
2431		/* Don't want a node to appear more than once */
2432		if (node_isset(n, *used_node_mask))
2433			continue;
2434
2435		/* Use the distance array to find the distance */
2436		val = node_distance(node, n);
2437
2438		/* Penalize nodes under us ("prefer the next node") */
2439		val += (n < node);
2440
2441		/* Give preference to headless and unused nodes */
2442		tmp = cpumask_of_node(n);
2443		if (!cpumask_empty(tmp))
2444			val += PENALTY_FOR_NODE_WITH_CPUS;
2445
2446		/* Slight preference for less loaded node */
2447		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2448		val += node_load[n];
2449
2450		if (val < min_val) {
2451			min_val = val;
2452			best_node = n;
2453		}
2454	}
2455
2456	if (best_node >= 0)
2457		node_set(best_node, *used_node_mask);
2458
2459	return best_node;
2460}
2461
2462
2463/*
2464 * Build zonelists ordered by node and zones within node.
2465 * This results in maximum locality--normal zone overflows into local
2466 * DMA zone, if any--but risks exhausting DMA zone.
2467 */
2468static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2469{
2470	int j;
2471	struct zonelist *zonelist;
2472
2473	zonelist = &pgdat->node_zonelists[0];
2474	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2475		;
2476	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2477							MAX_NR_ZONES - 1);
2478	zonelist->_zonerefs[j].zone = NULL;
2479	zonelist->_zonerefs[j].zone_idx = 0;
2480}
2481
2482/*
2483 * Build gfp_thisnode zonelists
2484 */
2485static void build_thisnode_zonelists(pg_data_t *pgdat)
2486{
2487	int j;
2488	struct zonelist *zonelist;
2489
2490	zonelist = &pgdat->node_zonelists[1];
2491	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2492	zonelist->_zonerefs[j].zone = NULL;
2493	zonelist->_zonerefs[j].zone_idx = 0;
2494}
2495
2496/*
2497 * Build zonelists ordered by zone and nodes within zones.
2498 * This results in conserving DMA zone[s] until all Normal memory is
2499 * exhausted, but results in overflowing to remote node while memory
2500 * may still exist in local DMA zone.
2501 */
2502static int node_order[MAX_NUMNODES];
2503
2504static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2505{
2506	int pos, j, node;
2507	int zone_type;		/* needs to be signed */
2508	struct zone *z;
2509	struct zonelist *zonelist;
2510
2511	zonelist = &pgdat->node_zonelists[0];
2512	pos = 0;
2513	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2514		for (j = 0; j < nr_nodes; j++) {
2515			node = node_order[j];
2516			z = &NODE_DATA(node)->node_zones[zone_type];
2517			if (populated_zone(z)) {
2518				zoneref_set_zone(z,
2519					&zonelist->_zonerefs[pos++]);
2520				check_highest_zone(zone_type);
2521			}
2522		}
2523	}
2524	zonelist->_zonerefs[pos].zone = NULL;
2525	zonelist->_zonerefs[pos].zone_idx = 0;
2526}
2527
2528static int default_zonelist_order(void)
2529{
2530	int nid, zone_type;
2531	unsigned long low_kmem_size,total_size;
2532	struct zone *z;
2533	int average_size;
2534	/*
2535         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2536	 * If they are really small and used heavily, the system can fall
2537	 * into OOM very easily.
2538	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2539	 */
2540	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2541	low_kmem_size = 0;
2542	total_size = 0;
2543	for_each_online_node(nid) {
2544		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2545			z = &NODE_DATA(nid)->node_zones[zone_type];
2546			if (populated_zone(z)) {
2547				if (zone_type < ZONE_NORMAL)
2548					low_kmem_size += z->present_pages;
2549				total_size += z->present_pages;
2550			}
2551		}
2552	}
2553	if (!low_kmem_size ||  /* there are no DMA area. */
2554	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2555		return ZONELIST_ORDER_NODE;
2556	/*
2557	 * look into each node's config.
2558  	 * If there is a node whose DMA/DMA32 memory is very big area on
2559 	 * local memory, NODE_ORDER may be suitable.
2560         */
2561	average_size = total_size /
2562				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2563	for_each_online_node(nid) {
2564		low_kmem_size = 0;
2565		total_size = 0;
2566		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2567			z = &NODE_DATA(nid)->node_zones[zone_type];
2568			if (populated_zone(z)) {
2569				if (zone_type < ZONE_NORMAL)
2570					low_kmem_size += z->present_pages;
2571				total_size += z->present_pages;
2572			}
2573		}
2574		if (low_kmem_size &&
2575		    total_size > average_size && /* ignore small node */
2576		    low_kmem_size > total_size * 70/100)
2577			return ZONELIST_ORDER_NODE;
2578	}
2579	return ZONELIST_ORDER_ZONE;
2580}
2581
2582static void set_zonelist_order(void)
2583{
2584	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2585		current_zonelist_order = default_zonelist_order();
2586	else
2587		current_zonelist_order = user_zonelist_order;
2588}
2589
2590static void build_zonelists(pg_data_t *pgdat)
2591{
2592	int j, node, load;
2593	enum zone_type i;
2594	nodemask_t used_mask;
2595	int local_node, prev_node;
2596	struct zonelist *zonelist;
2597	int order = current_zonelist_order;
2598
2599	/* initialize zonelists */
2600	for (i = 0; i < MAX_ZONELISTS; i++) {
2601		zonelist = pgdat->node_zonelists + i;
2602		zonelist->_zonerefs[0].zone = NULL;
2603		zonelist->_zonerefs[0].zone_idx = 0;
2604	}
2605
2606	/* NUMA-aware ordering of nodes */
2607	local_node = pgdat->node_id;
2608	load = nr_online_nodes;
2609	prev_node = local_node;
2610	nodes_clear(used_mask);
2611
2612	memset(node_order, 0, sizeof(node_order));
2613	j = 0;
2614
2615	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2616		int distance = node_distance(local_node, node);
2617
2618		/*
2619		 * If another node is sufficiently far away then it is better
2620		 * to reclaim pages in a zone before going off node.
2621		 */
2622		if (distance > RECLAIM_DISTANCE)
2623			zone_reclaim_mode = 1;
2624
2625		/*
2626		 * We don't want to pressure a particular node.
2627		 * So adding penalty to the first node in same
2628		 * distance group to make it round-robin.
2629		 */
2630		if (distance != node_distance(local_node, prev_node))
2631			node_load[node] = load;
2632
2633		prev_node = node;
2634		load--;
2635		if (order == ZONELIST_ORDER_NODE)
2636			build_zonelists_in_node_order(pgdat, node);
2637		else
2638			node_order[j++] = node;	/* remember order */
2639	}
2640
2641	if (order == ZONELIST_ORDER_ZONE) {
2642		/* calculate node order -- i.e., DMA last! */
2643		build_zonelists_in_zone_order(pgdat, j);
2644	}
2645
2646	build_thisnode_zonelists(pgdat);
2647}
2648
2649/* Construct the zonelist performance cache - see further mmzone.h */
2650static void build_zonelist_cache(pg_data_t *pgdat)
2651{
2652	struct zonelist *zonelist;
2653	struct zonelist_cache *zlc;
2654	struct zoneref *z;
2655
2656	zonelist = &pgdat->node_zonelists[0];
2657	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2658	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2659	for (z = zonelist->_zonerefs; z->zone; z++)
2660		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2661}
2662
2663
2664#else	/* CONFIG_NUMA */
2665
2666static void set_zonelist_order(void)
2667{
2668	current_zonelist_order = ZONELIST_ORDER_ZONE;
2669}
2670
2671static void build_zonelists(pg_data_t *pgdat)
2672{
2673	int node, local_node;
2674	enum zone_type j;
2675	struct zonelist *zonelist;
2676
2677	local_node = pgdat->node_id;
2678
2679	zonelist = &pgdat->node_zonelists[0];
2680	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2681
2682	/*
2683	 * Now we build the zonelist so that it contains the zones
2684	 * of all the other nodes.
2685	 * We don't want to pressure a particular node, so when
2686	 * building the zones for node N, we make sure that the
2687	 * zones coming right after the local ones are those from
2688	 * node N+1 (modulo N)
2689	 */
2690	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2691		if (!node_online(node))
2692			continue;
2693		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2694							MAX_NR_ZONES - 1);
2695	}
2696	for (node = 0; node < local_node; node++) {
2697		if (!node_online(node))
2698			continue;
2699		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2700							MAX_NR_ZONES - 1);
2701	}
2702
2703	zonelist->_zonerefs[j].zone = NULL;
2704	zonelist->_zonerefs[j].zone_idx = 0;
2705}
2706
2707/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2708static void build_zonelist_cache(pg_data_t *pgdat)
2709{
2710	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2711}
2712
2713#endif	/* CONFIG_NUMA */
2714
2715/* return values int ....just for stop_machine() */
2716static int __build_all_zonelists(void *dummy)
2717{
2718	int nid;
2719
2720#ifdef CONFIG_NUMA
2721	memset(node_load, 0, sizeof(node_load));
2722#endif
2723	for_each_online_node(nid) {
2724		pg_data_t *pgdat = NODE_DATA(nid);
2725
2726		build_zonelists(pgdat);
2727		build_zonelist_cache(pgdat);
2728	}
2729	return 0;
2730}
2731
2732void build_all_zonelists(void)
2733{
2734	set_zonelist_order();
2735
2736	if (system_state == SYSTEM_BOOTING) {
2737		__build_all_zonelists(NULL);
2738		mminit_verify_zonelist();
2739		cpuset_init_current_mems_allowed();
2740	} else {
2741		/* we have to stop all cpus to guarantee there is no user
2742		   of zonelist */
2743		stop_machine(__build_all_zonelists, NULL, NULL);
2744		/* cpuset refresh routine should be here */
2745	}
2746	vm_total_pages = nr_free_pagecache_pages();
2747	/*
2748	 * Disable grouping by mobility if the number of pages in the
2749	 * system is too low to allow the mechanism to work. It would be
2750	 * more accurate, but expensive to check per-zone. This check is
2751	 * made on memory-hotadd so a system can start with mobility
2752	 * disabled and enable it later
2753	 */
2754	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2755		page_group_by_mobility_disabled = 1;
2756	else
2757		page_group_by_mobility_disabled = 0;
2758
2759	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2760		"Total pages: %ld\n",
2761			nr_online_nodes,
2762			zonelist_order_name[current_zonelist_order],
2763			page_group_by_mobility_disabled ? "off" : "on",
2764			vm_total_pages);
2765#ifdef CONFIG_NUMA
2766	printk("Policy zone: %s\n", zone_names[policy_zone]);
2767#endif
2768}
2769
2770/*
2771 * Helper functions to size the waitqueue hash table.
2772 * Essentially these want to choose hash table sizes sufficiently
2773 * large so that collisions trying to wait on pages are rare.
2774 * But in fact, the number of active page waitqueues on typical
2775 * systems is ridiculously low, less than 200. So this is even
2776 * conservative, even though it seems large.
2777 *
2778 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2779 * waitqueues, i.e. the size of the waitq table given the number of pages.
2780 */
2781#define PAGES_PER_WAITQUEUE	256
2782
2783#ifndef CONFIG_MEMORY_HOTPLUG
2784static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2785{
2786	unsigned long size = 1;
2787
2788	pages /= PAGES_PER_WAITQUEUE;
2789
2790	while (size < pages)
2791		size <<= 1;
2792
2793	/*
2794	 * Once we have dozens or even hundreds of threads sleeping
2795	 * on IO we've got bigger problems than wait queue collision.
2796	 * Limit the size of the wait table to a reasonable size.
2797	 */
2798	size = min(size, 4096UL);
2799
2800	return max(size, 4UL);
2801}
2802#else
2803/*
2804 * A zone's size might be changed by hot-add, so it is not possible to determine
2805 * a suitable size for its wait_table.  So we use the maximum size now.
2806 *
2807 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2808 *
2809 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2810 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2811 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2812 *
2813 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2814 * or more by the traditional way. (See above).  It equals:
2815 *
2816 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2817 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2818 *    powerpc (64K page size)             : =  (32G +16M)byte.
2819 */
2820static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2821{
2822	return 4096UL;
2823}
2824#endif
2825
2826/*
2827 * This is an integer logarithm so that shifts can be used later
2828 * to extract the more random high bits from the multiplicative
2829 * hash function before the remainder is taken.
2830 */
2831static inline unsigned long wait_table_bits(unsigned long size)
2832{
2833	return ffz(~size);
2834}
2835
2836#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2837
2838/*
2839 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2840 * of blocks reserved is based on min_wmark_pages(zone). The memory within
2841 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2842 * higher will lead to a bigger reserve which will get freed as contiguous
2843 * blocks as reclaim kicks in
2844 */
2845static void setup_zone_migrate_reserve(struct zone *zone)
2846{
2847	unsigned long start_pfn, pfn, end_pfn;
2848	struct page *page;
2849	unsigned long block_migratetype;
2850	int reserve;
2851
2852	/* Get the start pfn, end pfn and the number of blocks to reserve */
2853	start_pfn = zone->zone_start_pfn;
2854	end_pfn = start_pfn + zone->spanned_pages;
2855	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2856							pageblock_order;
2857
2858	/*
2859	 * Reserve blocks are generally in place to help high-order atomic
2860	 * allocations that are short-lived. A min_free_kbytes value that
2861	 * would result in more than 2 reserve blocks for atomic allocations
2862	 * is assumed to be in place to help anti-fragmentation for the
2863	 * future allocation of hugepages at runtime.
2864	 */
2865	reserve = min(2, reserve);
2866
2867	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2868		if (!pfn_valid(pfn))
2869			continue;
2870		page = pfn_to_page(pfn);
2871
2872		/* Watch out for overlapping nodes */
2873		if (page_to_nid(page) != zone_to_nid(zone))
2874			continue;
2875
2876		/* Blocks with reserved pages will never free, skip them. */
2877		if (PageReserved(page))
2878			continue;
2879
2880		block_migratetype = get_pageblock_migratetype(page);
2881
2882		/* If this block is reserved, account for it */
2883		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2884			reserve--;
2885			continue;
2886		}
2887
2888		/* Suitable for reserving if this block is movable */
2889		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2890			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2891			move_freepages_block(zone, page, MIGRATE_RESERVE);
2892			reserve--;
2893			continue;
2894		}
2895
2896		/*
2897		 * If the reserve is met and this is a previous reserved block,
2898		 * take it back
2899		 */
2900		if (block_migratetype == MIGRATE_RESERVE) {
2901			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2902			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2903		}
2904	}
2905}
2906
2907/*
2908 * Initially all pages are reserved - free ones are freed
2909 * up by free_all_bootmem() once the early boot process is
2910 * done. Non-atomic initialization, single-pass.
2911 */
2912void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2913		unsigned long start_pfn, enum memmap_context context)
2914{
2915	struct page *page;
2916	unsigned long end_pfn = start_pfn + size;
2917	unsigned long pfn;
2918	struct zone *z;
2919
2920	if (highest_memmap_pfn < end_pfn - 1)
2921		highest_memmap_pfn = end_pfn - 1;
2922
2923	z = &NODE_DATA(nid)->node_zones[zone];
2924	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2925		/*
2926		 * There can be holes in boot-time mem_map[]s
2927		 * handed to this function.  They do not
2928		 * exist on hotplugged memory.
2929		 */
2930		if (context == MEMMAP_EARLY) {
2931			if (!early_pfn_valid(pfn))
2932				continue;
2933			if (!early_pfn_in_nid(pfn, nid))
2934				continue;
2935		}
2936		page = pfn_to_page(pfn);
2937		set_page_links(page, zone, nid, pfn);
2938		mminit_verify_page_links(page, zone, nid, pfn);
2939		init_page_count(page);
2940		reset_page_mapcount(page);
2941		SetPageReserved(page);
2942		/*
2943		 * Mark the block movable so that blocks are reserved for
2944		 * movable at startup. This will force kernel allocations
2945		 * to reserve their blocks rather than leaking throughout
2946		 * the address space during boot when many long-lived
2947		 * kernel allocations are made. Later some blocks near
2948		 * the start are marked MIGRATE_RESERVE by
2949		 * setup_zone_migrate_reserve()
2950		 *
2951		 * bitmap is created for zone's valid pfn range. but memmap
2952		 * can be created for invalid pages (for alignment)
2953		 * check here not to call set_pageblock_migratetype() against
2954		 * pfn out of zone.
2955		 */
2956		if ((z->zone_start_pfn <= pfn)
2957		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2958		    && !(pfn & (pageblock_nr_pages - 1)))
2959			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2960
2961		INIT_LIST_HEAD(&page->lru);
2962#ifdef WANT_PAGE_VIRTUAL
2963		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2964		if (!is_highmem_idx(zone))
2965			set_page_address(page, __va(pfn << PAGE_SHIFT));
2966#endif
2967	}
2968}
2969
2970static void __meminit zone_init_free_lists(struct zone *zone)
2971{
2972	int order, t;
2973	for_each_migratetype_order(order, t) {
2974		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2975		zone->free_area[order].nr_free = 0;
2976	}
2977}
2978
2979#ifndef __HAVE_ARCH_MEMMAP_INIT
2980#define memmap_init(size, nid, zone, start_pfn) \
2981	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2982#endif
2983
2984static int zone_batchsize(struct zone *zone)
2985{
2986#ifdef CONFIG_MMU
2987	int batch;
2988
2989	/*
2990	 * The per-cpu-pages pools are set to around 1000th of the
2991	 * size of the zone.  But no more than 1/2 of a meg.
2992	 *
2993	 * OK, so we don't know how big the cache is.  So guess.
2994	 */
2995	batch = zone->present_pages / 1024;
2996	if (batch * PAGE_SIZE > 512 * 1024)
2997		batch = (512 * 1024) / PAGE_SIZE;
2998	batch /= 4;		/* We effectively *= 4 below */
2999	if (batch < 1)
3000		batch = 1;
3001
3002	/*
3003	 * Clamp the batch to a 2^n - 1 value. Having a power
3004	 * of 2 value was found to be more likely to have
3005	 * suboptimal cache aliasing properties in some cases.
3006	 *
3007	 * For example if 2 tasks are alternately allocating
3008	 * batches of pages, one task can end up with a lot
3009	 * of pages of one half of the possible page colors
3010	 * and the other with pages of the other colors.
3011	 */
3012	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3013
3014	return batch;
3015
3016#else
3017	/* The deferral and batching of frees should be suppressed under NOMMU
3018	 * conditions.
3019	 *
3020	 * The problem is that NOMMU needs to be able to allocate large chunks
3021	 * of contiguous memory as there's no hardware page translation to
3022	 * assemble apparent contiguous memory from discontiguous pages.
3023	 *
3024	 * Queueing large contiguous runs of pages for batching, however,
3025	 * causes the pages to actually be freed in smaller chunks.  As there
3026	 * can be a significant delay between the individual batches being
3027	 * recycled, this leads to the once large chunks of space being
3028	 * fragmented and becoming unavailable for high-order allocations.
3029	 */
3030	return 0;
3031#endif
3032}
3033
3034static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3035{
3036	struct per_cpu_pages *pcp;
3037	int migratetype;
3038
3039	memset(p, 0, sizeof(*p));
3040
3041	pcp = &p->pcp;
3042	pcp->count = 0;
3043	pcp->high = 6 * batch;
3044	pcp->batch = max(1UL, 1 * batch);
3045	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3046		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3047}
3048
3049/*
3050 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3051 * to the value high for the pageset p.
3052 */
3053
3054static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3055				unsigned long high)
3056{
3057	struct per_cpu_pages *pcp;
3058
3059	pcp = &p->pcp;
3060	pcp->high = high;
3061	pcp->batch = max(1UL, high/4);
3062	if ((high/4) > (PAGE_SHIFT * 8))
3063		pcp->batch = PAGE_SHIFT * 8;
3064}
3065
3066
3067#ifdef CONFIG_NUMA
3068/*
3069 * Boot pageset table. One per cpu which is going to be used for all
3070 * zones and all nodes. The parameters will be set in such a way
3071 * that an item put on a list will immediately be handed over to
3072 * the buddy list. This is safe since pageset manipulation is done
3073 * with interrupts disabled.
3074 *
3075 * Some NUMA counter updates may also be caught by the boot pagesets.
3076 *
3077 * The boot_pagesets must be kept even after bootup is complete for
3078 * unused processors and/or zones. They do play a role for bootstrapping
3079 * hotplugged processors.
3080 *
3081 * zoneinfo_show() and maybe other functions do
3082 * not check if the processor is online before following the pageset pointer.
3083 * Other parts of the kernel may not check if the zone is available.
3084 */
3085static struct per_cpu_pageset boot_pageset[NR_CPUS];
3086
3087/*
3088 * Dynamically allocate memory for the
3089 * per cpu pageset array in struct zone.
3090 */
3091static int __cpuinit process_zones(int cpu)
3092{
3093	struct zone *zone, *dzone;
3094	int node = cpu_to_node(cpu);
3095
3096	node_set_state(node, N_CPU);	/* this node has a cpu */
3097
3098	for_each_populated_zone(zone) {
3099		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
3100					 GFP_KERNEL, node);
3101		if (!zone_pcp(zone, cpu))
3102			goto bad;
3103
3104		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
3105
3106		if (percpu_pagelist_fraction)
3107			setup_pagelist_highmark(zone_pcp(zone, cpu),
3108			 	(zone->present_pages / percpu_pagelist_fraction));
3109	}
3110
3111	return 0;
3112bad:
3113	for_each_zone(dzone) {
3114		if (!populated_zone(dzone))
3115			continue;
3116		if (dzone == zone)
3117			break;
3118		kfree(zone_pcp(dzone, cpu));
3119		zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3120	}
3121	return -ENOMEM;
3122}
3123
3124static inline void free_zone_pagesets(int cpu)
3125{
3126	struct zone *zone;
3127
3128	for_each_zone(zone) {
3129		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
3130
3131		/* Free per_cpu_pageset if it is slab allocated */
3132		if (pset != &boot_pageset[cpu])
3133			kfree(pset);
3134		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3135	}
3136}
3137
3138static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3139		unsigned long action,
3140		void *hcpu)
3141{
3142	int cpu = (long)hcpu;
3143	int ret = NOTIFY_OK;
3144
3145	switch (action) {
3146	case CPU_UP_PREPARE:
3147	case CPU_UP_PREPARE_FROZEN:
3148		if (process_zones(cpu))
3149			ret = NOTIFY_BAD;
3150		break;
3151	case CPU_UP_CANCELED:
3152	case CPU_UP_CANCELED_FROZEN:
3153	case CPU_DEAD:
3154	case CPU_DEAD_FROZEN:
3155		free_zone_pagesets(cpu);
3156		break;
3157	default:
3158		break;
3159	}
3160	return ret;
3161}
3162
3163static struct notifier_block __cpuinitdata pageset_notifier =
3164	{ &pageset_cpuup_callback, NULL, 0 };
3165
3166void __init setup_per_cpu_pageset(void)
3167{
3168	int err;
3169
3170	/* Initialize per_cpu_pageset for cpu 0.
3171	 * A cpuup callback will do this for every cpu
3172	 * as it comes online
3173	 */
3174	err = process_zones(smp_processor_id());
3175	BUG_ON(err);
3176	register_cpu_notifier(&pageset_notifier);
3177}
3178
3179#endif
3180
3181static noinline __init_refok
3182int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3183{
3184	int i;
3185	struct pglist_data *pgdat = zone->zone_pgdat;
3186	size_t alloc_size;
3187
3188	/*
3189	 * The per-page waitqueue mechanism uses hashed waitqueues
3190	 * per zone.
3191	 */
3192	zone->wait_table_hash_nr_entries =
3193		 wait_table_hash_nr_entries(zone_size_pages);
3194	zone->wait_table_bits =
3195		wait_table_bits(zone->wait_table_hash_nr_entries);
3196	alloc_size = zone->wait_table_hash_nr_entries
3197					* sizeof(wait_queue_head_t);
3198
3199	if (!slab_is_available()) {
3200		zone->wait_table = (wait_queue_head_t *)
3201			alloc_bootmem_node(pgdat, alloc_size);
3202	} else {
3203		/*
3204		 * This case means that a zone whose size was 0 gets new memory
3205		 * via memory hot-add.
3206		 * But it may be the case that a new node was hot-added.  In
3207		 * this case vmalloc() will not be able to use this new node's
3208		 * memory - this wait_table must be initialized to use this new
3209		 * node itself as well.
3210		 * To use this new node's memory, further consideration will be
3211		 * necessary.
3212		 */
3213		zone->wait_table = vmalloc(alloc_size);
3214	}
3215	if (!zone->wait_table)
3216		return -ENOMEM;
3217
3218	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3219		init_waitqueue_head(zone->wait_table + i);
3220
3221	return 0;
3222}
3223
3224static int __zone_pcp_update(void *data)
3225{
3226	struct zone *zone = data;
3227	int cpu;
3228	unsigned long batch = zone_batchsize(zone), flags;
3229
3230	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3231		struct per_cpu_pageset *pset;
3232		struct per_cpu_pages *pcp;
3233
3234		pset = zone_pcp(zone, cpu);
3235		pcp = &pset->pcp;
3236
3237		local_irq_save(flags);
3238		free_pcppages_bulk(zone, pcp->count, pcp);
3239		setup_pageset(pset, batch);
3240		local_irq_restore(flags);
3241	}
3242	return 0;
3243}
3244
3245void zone_pcp_update(struct zone *zone)
3246{
3247	stop_machine(__zone_pcp_update, zone, NULL);
3248}
3249
3250static __meminit void zone_pcp_init(struct zone *zone)
3251{
3252	int cpu;
3253	unsigned long batch = zone_batchsize(zone);
3254
3255	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3256#ifdef CONFIG_NUMA
3257		/* Early boot. Slab allocator not functional yet */
3258		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3259		setup_pageset(&boot_pageset[cpu],0);
3260#else
3261		setup_pageset(zone_pcp(zone,cpu), batch);
3262#endif
3263	}
3264	if (zone->present_pages)
3265		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
3266			zone->name, zone->present_pages, batch);
3267}
3268
3269__meminit int init_currently_empty_zone(struct zone *zone,
3270					unsigned long zone_start_pfn,
3271					unsigned long size,
3272					enum memmap_context context)
3273{
3274	struct pglist_data *pgdat = zone->zone_pgdat;
3275	int ret;
3276	ret = zone_wait_table_init(zone, size);
3277	if (ret)
3278		return ret;
3279	pgdat->nr_zones = zone_idx(zone) + 1;
3280
3281	zone->zone_start_pfn = zone_start_pfn;
3282
3283	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3284			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3285			pgdat->node_id,
3286			(unsigned long)zone_idx(zone),
3287			zone_start_pfn, (zone_start_pfn + size));
3288
3289	zone_init_free_lists(zone);
3290
3291	return 0;
3292}
3293
3294#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3295/*
3296 * Basic iterator support. Return the first range of PFNs for a node
3297 * Note: nid == MAX_NUMNODES returns first region regardless of node
3298 */
3299static int __meminit first_active_region_index_in_nid(int nid)
3300{
3301	int i;
3302
3303	for (i = 0; i < nr_nodemap_entries; i++)
3304		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3305			return i;
3306
3307	return -1;
3308}
3309
3310/*
3311 * Basic iterator support. Return the next active range of PFNs for a node
3312 * Note: nid == MAX_NUMNODES returns next region regardless of node
3313 */
3314static int __meminit next_active_region_index_in_nid(int index, int nid)
3315{
3316	for (index = index + 1; index < nr_nodemap_entries; index++)
3317		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3318			return index;
3319
3320	return -1;
3321}
3322
3323#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3324/*
3325 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3326 * Architectures may implement their own version but if add_active_range()
3327 * was used and there are no special requirements, this is a convenient
3328 * alternative
3329 */
3330int __meminit __early_pfn_to_nid(unsigned long pfn)
3331{
3332	int i;
3333
3334	for (i = 0; i < nr_nodemap_entries; i++) {
3335		unsigned long start_pfn = early_node_map[i].start_pfn;
3336		unsigned long end_pfn = early_node_map[i].end_pfn;
3337
3338		if (start_pfn <= pfn && pfn < end_pfn)
3339			return early_node_map[i].nid;
3340	}
3341	/* This is a memory hole */
3342	return -1;
3343}
3344#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3345
3346int __meminit early_pfn_to_nid(unsigned long pfn)
3347{
3348	int nid;
3349
3350	nid = __early_pfn_to_nid(pfn);
3351	if (nid >= 0)
3352		return nid;
3353	/* just returns 0 */
3354	return 0;
3355}
3356
3357#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3358bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3359{
3360	int nid;
3361
3362	nid = __early_pfn_to_nid(pfn);
3363	if (nid >= 0 && nid != node)
3364		return false;
3365	return true;
3366}
3367#endif
3368
3369/* Basic iterator support to walk early_node_map[] */
3370#define for_each_active_range_index_in_nid(i, nid) \
3371	for (i = first_active_region_index_in_nid(nid); i != -1; \
3372				i = next_active_region_index_in_nid(i, nid))
3373
3374/**
3375 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3376 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3377 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3378 *
3379 * If an architecture guarantees that all ranges registered with
3380 * add_active_ranges() contain no holes and may be freed, this
3381 * this function may be used instead of calling free_bootmem() manually.
3382 */
3383void __init free_bootmem_with_active_regions(int nid,
3384						unsigned long max_low_pfn)
3385{
3386	int i;
3387
3388	for_each_active_range_index_in_nid(i, nid) {
3389		unsigned long size_pages = 0;
3390		unsigned long end_pfn = early_node_map[i].end_pfn;
3391
3392		if (early_node_map[i].start_pfn >= max_low_pfn)
3393			continue;
3394
3395		if (end_pfn > max_low_pfn)
3396			end_pfn = max_low_pfn;
3397
3398		size_pages = end_pfn - early_node_map[i].start_pfn;
3399		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3400				PFN_PHYS(early_node_map[i].start_pfn),
3401				size_pages << PAGE_SHIFT);
3402	}
3403}
3404
3405void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3406{
3407	int i;
3408	int ret;
3409
3410	for_each_active_range_index_in_nid(i, nid) {
3411		ret = work_fn(early_node_map[i].start_pfn,
3412			      early_node_map[i].end_pfn, data);
3413		if (ret)
3414			break;
3415	}
3416}
3417/**
3418 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3419 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3420 *
3421 * If an architecture guarantees that all ranges registered with
3422 * add_active_ranges() contain no holes and may be freed, this
3423 * function may be used instead of calling memory_present() manually.
3424 */
3425void __init sparse_memory_present_with_active_regions(int nid)
3426{
3427	int i;
3428
3429	for_each_active_range_index_in_nid(i, nid)
3430		memory_present(early_node_map[i].nid,
3431				early_node_map[i].start_pfn,
3432				early_node_map[i].end_pfn);
3433}
3434
3435/**
3436 * get_pfn_range_for_nid - Return the start and end page frames for a node
3437 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3438 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3439 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3440 *
3441 * It returns the start and end page frame of a node based on information
3442 * provided by an arch calling add_active_range(). If called for a node
3443 * with no available memory, a warning is printed and the start and end
3444 * PFNs will be 0.
3445 */
3446void __meminit get_pfn_range_for_nid(unsigned int nid,
3447			unsigned long *start_pfn, unsigned long *end_pfn)
3448{
3449	int i;
3450	*start_pfn = -1UL;
3451	*end_pfn = 0;
3452
3453	for_each_active_range_index_in_nid(i, nid) {
3454		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3455		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3456	}
3457
3458	if (*start_pfn == -1UL)
3459		*start_pfn = 0;
3460}
3461
3462/*
3463 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3464 * assumption is made that zones within a node are ordered in monotonic
3465 * increasing memory addresses so that the "highest" populated zone is used
3466 */
3467static void __init find_usable_zone_for_movable(void)
3468{
3469	int zone_index;
3470	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3471		if (zone_index == ZONE_MOVABLE)
3472			continue;
3473
3474		if (arch_zone_highest_possible_pfn[zone_index] >
3475				arch_zone_lowest_possible_pfn[zone_index])
3476			break;
3477	}
3478
3479	VM_BUG_ON(zone_index == -1);
3480	movable_zone = zone_index;
3481}
3482
3483/*
3484 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3485 * because it is sized independant of architecture. Unlike the other zones,
3486 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3487 * in each node depending on the size of each node and how evenly kernelcore
3488 * is distributed. This helper function adjusts the zone ranges
3489 * provided by the architecture for a given node by using the end of the
3490 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3491 * zones within a node are in order of monotonic increases memory addresses
3492 */
3493static void __meminit adjust_zone_range_for_zone_movable(int nid,
3494					unsigned long zone_type,
3495					unsigned long node_start_pfn,
3496					unsigned long node_end_pfn,
3497					unsigned long *zone_start_pfn,
3498					unsigned long *zone_end_pfn)
3499{
3500	/* Only adjust if ZONE_MOVABLE is on this node */
3501	if (zone_movable_pfn[nid]) {
3502		/* Size ZONE_MOVABLE */
3503		if (zone_type == ZONE_MOVABLE) {
3504			*zone_start_pfn = zone_movable_pfn[nid];
3505			*zone_end_pfn = min(node_end_pfn,
3506				arch_zone_highest_possible_pfn[movable_zone]);
3507
3508		/* Adjust for ZONE_MOVABLE starting within this range */
3509		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3510				*zone_end_pfn > zone_movable_pfn[nid]) {
3511			*zone_end_pfn = zone_movable_pfn[nid];
3512
3513		/* Check if this whole range is within ZONE_MOVABLE */
3514		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3515			*zone_start_pfn = *zone_end_pfn;
3516	}
3517}
3518
3519/*
3520 * Return the number of pages a zone spans in a node, including holes
3521 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3522 */
3523static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3524					unsigned long zone_type,
3525					unsigned long *ignored)
3526{
3527	unsigned long node_start_pfn, node_end_pfn;
3528	unsigned long zone_start_pfn, zone_end_pfn;
3529
3530	/* Get the start and end of the node and zone */
3531	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3532	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3533	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3534	adjust_zone_range_for_zone_movable(nid, zone_type,
3535				node_start_pfn, node_end_pfn,
3536				&zone_start_pfn, &zone_end_pfn);
3537
3538	/* Check that this node has pages within the zone's required range */
3539	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3540		return 0;
3541
3542	/* Move the zone boundaries inside the node if necessary */
3543	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3544	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3545
3546	/* Return the spanned pages */
3547	return zone_end_pfn - zone_start_pfn;
3548}
3549
3550/*
3551 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3552 * then all holes in the requested range will be accounted for.
3553 */
3554static unsigned long __meminit __absent_pages_in_range(int nid,
3555				unsigned long range_start_pfn,
3556				unsigned long range_end_pfn)
3557{
3558	int i = 0;
3559	unsigned long prev_end_pfn = 0, hole_pages = 0;
3560	unsigned long start_pfn;
3561
3562	/* Find the end_pfn of the first active range of pfns in the node */
3563	i = first_active_region_index_in_nid(nid);
3564	if (i == -1)
3565		return 0;
3566
3567	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3568
3569	/* Account for ranges before physical memory on this node */
3570	if (early_node_map[i].start_pfn > range_start_pfn)
3571		hole_pages = prev_end_pfn - range_start_pfn;
3572
3573	/* Find all holes for the zone within the node */
3574	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3575
3576		/* No need to continue if prev_end_pfn is outside the zone */
3577		if (prev_end_pfn >= range_end_pfn)
3578			break;
3579
3580		/* Make sure the end of the zone is not within the hole */
3581		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3582		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3583
3584		/* Update the hole size cound and move on */
3585		if (start_pfn > range_start_pfn) {
3586			BUG_ON(prev_end_pfn > start_pfn);
3587			hole_pages += start_pfn - prev_end_pfn;
3588		}
3589		prev_end_pfn = early_node_map[i].end_pfn;
3590	}
3591
3592	/* Account for ranges past physical memory on this node */
3593	if (range_end_pfn > prev_end_pfn)
3594		hole_pages += range_end_pfn -
3595				max(range_start_pfn, prev_end_pfn);
3596
3597	return hole_pages;
3598}
3599
3600/**
3601 * absent_pages_in_range - Return number of page frames in holes within a range
3602 * @start_pfn: The start PFN to start searching for holes
3603 * @end_pfn: The end PFN to stop searching for holes
3604 *
3605 * It returns the number of pages frames in memory holes within a range.
3606 */
3607unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3608							unsigned long end_pfn)
3609{
3610	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3611}
3612
3613/* Return the number of page frames in holes in a zone on a node */
3614static unsigned long __meminit zone_absent_pages_in_node(int nid,
3615					unsigned long zone_type,
3616					unsigned long *ignored)
3617{
3618	unsigned long node_start_pfn, node_end_pfn;
3619	unsigned long zone_start_pfn, zone_end_pfn;
3620
3621	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3622	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3623							node_start_pfn);
3624	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3625							node_end_pfn);
3626
3627	adjust_zone_range_for_zone_movable(nid, zone_type,
3628			node_start_pfn, node_end_pfn,
3629			&zone_start_pfn, &zone_end_pfn);
3630	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3631}
3632
3633#else
3634static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3635					unsigned long zone_type,
3636					unsigned long *zones_size)
3637{
3638	return zones_size[zone_type];
3639}
3640
3641static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3642						unsigned long zone_type,
3643						unsigned long *zholes_size)
3644{
3645	if (!zholes_size)
3646		return 0;
3647
3648	return zholes_size[zone_type];
3649}
3650
3651#endif
3652
3653static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3654		unsigned long *zones_size, unsigned long *zholes_size)
3655{
3656	unsigned long realtotalpages, totalpages = 0;
3657	enum zone_type i;
3658
3659	for (i = 0; i < MAX_NR_ZONES; i++)
3660		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3661								zones_size);
3662	pgdat->node_spanned_pages = totalpages;
3663
3664	realtotalpages = totalpages;
3665	for (i = 0; i < MAX_NR_ZONES; i++)
3666		realtotalpages -=
3667			zone_absent_pages_in_node(pgdat->node_id, i,
3668								zholes_size);
3669	pgdat->node_present_pages = realtotalpages;
3670	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3671							realtotalpages);
3672}
3673
3674#ifndef CONFIG_SPARSEMEM
3675/*
3676 * Calculate the size of the zone->blockflags rounded to an unsigned long
3677 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3678 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3679 * round what is now in bits to nearest long in bits, then return it in
3680 * bytes.
3681 */
3682static unsigned long __init usemap_size(unsigned long zonesize)
3683{
3684	unsigned long usemapsize;
3685
3686	usemapsize = roundup(zonesize, pageblock_nr_pages);
3687	usemapsize = usemapsize >> pageblock_order;
3688	usemapsize *= NR_PAGEBLOCK_BITS;
3689	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3690
3691	return usemapsize / 8;
3692}
3693
3694static void __init setup_usemap(struct pglist_data *pgdat,
3695				struct zone *zone, unsigned long zonesize)
3696{
3697	unsigned long usemapsize = usemap_size(zonesize);
3698	zone->pageblock_flags = NULL;
3699	if (usemapsize)
3700		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3701}
3702#else
3703static void inline setup_usemap(struct pglist_data *pgdat,
3704				struct zone *zone, unsigned long zonesize) {}
3705#endif /* CONFIG_SPARSEMEM */
3706
3707#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3708
3709/* Return a sensible default order for the pageblock size. */
3710static inline int pageblock_default_order(void)
3711{
3712	if (HPAGE_SHIFT > PAGE_SHIFT)
3713		return HUGETLB_PAGE_ORDER;
3714
3715	return MAX_ORDER-1;
3716}
3717
3718/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3719static inline void __init set_pageblock_order(unsigned int order)
3720{
3721	/* Check that pageblock_nr_pages has not already been setup */
3722	if (pageblock_order)
3723		return;
3724
3725	/*
3726	 * Assume the largest contiguous order of interest is a huge page.
3727	 * This value may be variable depending on boot parameters on IA64
3728	 */
3729	pageblock_order = order;
3730}
3731#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3732
3733/*
3734 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3735 * and pageblock_default_order() are unused as pageblock_order is set
3736 * at compile-time. See include/linux/pageblock-flags.h for the values of
3737 * pageblock_order based on the kernel config
3738 */
3739static inline int pageblock_default_order(unsigned int order)
3740{
3741	return MAX_ORDER-1;
3742}
3743#define set_pageblock_order(x)	do {} while (0)
3744
3745#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3746
3747/*
3748 * Set up the zone data structures:
3749 *   - mark all pages reserved
3750 *   - mark all memory queues empty
3751 *   - clear the memory bitmaps
3752 */
3753static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3754		unsigned long *zones_size, unsigned long *zholes_size)
3755{
3756	enum zone_type j;
3757	int nid = pgdat->node_id;
3758	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3759	int ret;
3760
3761	pgdat_resize_init(pgdat);
3762	pgdat->nr_zones = 0;
3763	init_waitqueue_head(&pgdat->kswapd_wait);
3764	pgdat->kswapd_max_order = 0;
3765	pgdat_page_cgroup_init(pgdat);
3766
3767	for (j = 0; j < MAX_NR_ZONES; j++) {
3768		struct zone *zone = pgdat->node_zones + j;
3769		unsigned long size, realsize, memmap_pages;
3770		enum lru_list l;
3771
3772		size = zone_spanned_pages_in_node(nid, j, zones_size);
3773		realsize = size - zone_absent_pages_in_node(nid, j,
3774								zholes_size);
3775
3776		/*
3777		 * Adjust realsize so that it accounts for how much memory
3778		 * is used by this zone for memmap. This affects the watermark
3779		 * and per-cpu initialisations
3780		 */
3781		memmap_pages =
3782			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3783		if (realsize >= memmap_pages) {
3784			realsize -= memmap_pages;
3785			if (memmap_pages)
3786				printk(KERN_DEBUG
3787				       "  %s zone: %lu pages used for memmap\n",
3788				       zone_names[j], memmap_pages);
3789		} else
3790			printk(KERN_WARNING
3791				"  %s zone: %lu pages exceeds realsize %lu\n",
3792				zone_names[j], memmap_pages, realsize);
3793
3794		/* Account for reserved pages */
3795		if (j == 0 && realsize > dma_reserve) {
3796			realsize -= dma_reserve;
3797			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3798					zone_names[0], dma_reserve);
3799		}
3800
3801		if (!is_highmem_idx(j))
3802			nr_kernel_pages += realsize;
3803		nr_all_pages += realsize;
3804
3805		zone->spanned_pages = size;
3806		zone->present_pages = realsize;
3807#ifdef CONFIG_NUMA
3808		zone->node = nid;
3809		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3810						/ 100;
3811		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3812#endif
3813		zone->name = zone_names[j];
3814		spin_lock_init(&zone->lock);
3815		spin_lock_init(&zone->lru_lock);
3816		zone_seqlock_init(zone);
3817		zone->zone_pgdat = pgdat;
3818
3819		zone->prev_priority = DEF_PRIORITY;
3820
3821		zone_pcp_init(zone);
3822		for_each_lru(l) {
3823			INIT_LIST_HEAD(&zone->lru[l].list);
3824			zone->reclaim_stat.nr_saved_scan[l] = 0;
3825		}
3826		zone->reclaim_stat.recent_rotated[0] = 0;
3827		zone->reclaim_stat.recent_rotated[1] = 0;
3828		zone->reclaim_stat.recent_scanned[0] = 0;
3829		zone->reclaim_stat.recent_scanned[1] = 0;
3830		zap_zone_vm_stats(zone);
3831		zone->flags = 0;
3832		if (!size)
3833			continue;
3834
3835		set_pageblock_order(pageblock_default_order());
3836		setup_usemap(pgdat, zone, size);
3837		ret = init_currently_empty_zone(zone, zone_start_pfn,
3838						size, MEMMAP_EARLY);
3839		BUG_ON(ret);
3840		memmap_init(size, nid, j, zone_start_pfn);
3841		zone_start_pfn += size;
3842	}
3843}
3844
3845static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3846{
3847	/* Skip empty nodes */
3848	if (!pgdat->node_spanned_pages)
3849		return;
3850
3851#ifdef CONFIG_FLAT_NODE_MEM_MAP
3852	/* ia64 gets its own node_mem_map, before this, without bootmem */
3853	if (!pgdat->node_mem_map) {
3854		unsigned long size, start, end;
3855		struct page *map;
3856
3857		/*
3858		 * The zone's endpoints aren't required to be MAX_ORDER
3859		 * aligned but the node_mem_map endpoints must be in order
3860		 * for the buddy allocator to function correctly.
3861		 */
3862		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3863		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3864		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3865		size =  (end - start) * sizeof(struct page);
3866		map = alloc_remap(pgdat->node_id, size);
3867		if (!map)
3868			map = alloc_bootmem_node(pgdat, size);
3869		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3870	}
3871#ifndef CONFIG_NEED_MULTIPLE_NODES
3872	/*
3873	 * With no DISCONTIG, the global mem_map is just set as node 0's
3874	 */
3875	if (pgdat == NODE_DATA(0)) {
3876		mem_map = NODE_DATA(0)->node_mem_map;
3877#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3878		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3879			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3880#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3881	}
3882#endif
3883#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3884}
3885
3886void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3887		unsigned long node_start_pfn, unsigned long *zholes_size)
3888{
3889	pg_data_t *pgdat = NODE_DATA(nid);
3890
3891	pgdat->node_id = nid;
3892	pgdat->node_start_pfn = node_start_pfn;
3893	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3894
3895	alloc_node_mem_map(pgdat);
3896#ifdef CONFIG_FLAT_NODE_MEM_MAP
3897	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3898		nid, (unsigned long)pgdat,
3899		(unsigned long)pgdat->node_mem_map);
3900#endif
3901
3902	free_area_init_core(pgdat, zones_size, zholes_size);
3903}
3904
3905#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3906
3907#if MAX_NUMNODES > 1
3908/*
3909 * Figure out the number of possible node ids.
3910 */
3911static void __init setup_nr_node_ids(void)
3912{
3913	unsigned int node;
3914	unsigned int highest = 0;
3915
3916	for_each_node_mask(node, node_possible_map)
3917		highest = node;
3918	nr_node_ids = highest + 1;
3919}
3920#else
3921static inline void setup_nr_node_ids(void)
3922{
3923}
3924#endif
3925
3926/**
3927 * add_active_range - Register a range of PFNs backed by physical memory
3928 * @nid: The node ID the range resides on
3929 * @start_pfn: The start PFN of the available physical memory
3930 * @end_pfn: The end PFN of the available physical memory
3931 *
3932 * These ranges are stored in an early_node_map[] and later used by
3933 * free_area_init_nodes() to calculate zone sizes and holes. If the
3934 * range spans a memory hole, it is up to the architecture to ensure
3935 * the memory is not freed by the bootmem allocator. If possible
3936 * the range being registered will be merged with existing ranges.
3937 */
3938void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3939						unsigned long end_pfn)
3940{
3941	int i;
3942
3943	mminit_dprintk(MMINIT_TRACE, "memory_register",
3944			"Entering add_active_range(%d, %#lx, %#lx) "
3945			"%d entries of %d used\n",
3946			nid, start_pfn, end_pfn,
3947			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3948
3949	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3950
3951	/* Merge with existing active regions if possible */
3952	for (i = 0; i < nr_nodemap_entries; i++) {
3953		if (early_node_map[i].nid != nid)
3954			continue;
3955
3956		/* Skip if an existing region covers this new one */
3957		if (start_pfn >= early_node_map[i].start_pfn &&
3958				end_pfn <= early_node_map[i].end_pfn)
3959			return;
3960
3961		/* Merge forward if suitable */
3962		if (start_pfn <= early_node_map[i].end_pfn &&
3963				end_pfn > early_node_map[i].end_pfn) {
3964			early_node_map[i].end_pfn = end_pfn;
3965			return;
3966		}
3967
3968		/* Merge backward if suitable */
3969		if (start_pfn < early_node_map[i].end_pfn &&
3970				end_pfn >= early_node_map[i].start_pfn) {
3971			early_node_map[i].start_pfn = start_pfn;
3972			return;
3973		}
3974	}
3975
3976	/* Check that early_node_map is large enough */
3977	if (i >= MAX_ACTIVE_REGIONS) {
3978		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3979							MAX_ACTIVE_REGIONS);
3980		return;
3981	}
3982
3983	early_node_map[i].nid = nid;
3984	early_node_map[i].start_pfn = start_pfn;
3985	early_node_map[i].end_pfn = end_pfn;
3986	nr_nodemap_entries = i + 1;
3987}
3988
3989/**
3990 * remove_active_range - Shrink an existing registered range of PFNs
3991 * @nid: The node id the range is on that should be shrunk
3992 * @start_pfn: The new PFN of the range
3993 * @end_pfn: The new PFN of the range
3994 *
3995 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3996 * The map is kept near the end physical page range that has already been
3997 * registered. This function allows an arch to shrink an existing registered
3998 * range.
3999 */
4000void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4001				unsigned long end_pfn)
4002{
4003	int i, j;
4004	int removed = 0;
4005
4006	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4007			  nid, start_pfn, end_pfn);
4008
4009	/* Find the old active region end and shrink */
4010	for_each_active_range_index_in_nid(i, nid) {
4011		if (early_node_map[i].start_pfn >= start_pfn &&
4012		    early_node_map[i].end_pfn <= end_pfn) {
4013			/* clear it */
4014			early_node_map[i].start_pfn = 0;
4015			early_node_map[i].end_pfn = 0;
4016			removed = 1;
4017			continue;
4018		}
4019		if (early_node_map[i].start_pfn < start_pfn &&
4020		    early_node_map[i].end_pfn > start_pfn) {
4021			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4022			early_node_map[i].end_pfn = start_pfn;
4023			if (temp_end_pfn > end_pfn)
4024				add_active_range(nid, end_pfn, temp_end_pfn);
4025			continue;
4026		}
4027		if (early_node_map[i].start_pfn >= start_pfn &&
4028		    early_node_map[i].end_pfn > end_pfn &&
4029		    early_node_map[i].start_pfn < end_pfn) {
4030			early_node_map[i].start_pfn = end_pfn;
4031			continue;
4032		}
4033	}
4034
4035	if (!removed)
4036		return;
4037
4038	/* remove the blank ones */
4039	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4040		if (early_node_map[i].nid != nid)
4041			continue;
4042		if (early_node_map[i].end_pfn)
4043			continue;
4044		/* we found it, get rid of it */
4045		for (j = i; j < nr_nodemap_entries - 1; j++)
4046			memcpy(&early_node_map[j], &early_node_map[j+1],
4047				sizeof(early_node_map[j]));
4048		j = nr_nodemap_entries - 1;
4049		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4050		nr_nodemap_entries--;
4051	}
4052}
4053
4054/**
4055 * remove_all_active_ranges - Remove all currently registered regions
4056 *
4057 * During discovery, it may be found that a table like SRAT is invalid
4058 * and an alternative discovery method must be used. This function removes
4059 * all currently registered regions.
4060 */
4061void __init remove_all_active_ranges(void)
4062{
4063	memset(early_node_map, 0, sizeof(early_node_map));
4064	nr_nodemap_entries = 0;
4065}
4066
4067/* Compare two active node_active_regions */
4068static int __init cmp_node_active_region(const void *a, const void *b)
4069{
4070	struct node_active_region *arange = (struct node_active_region *)a;
4071	struct node_active_region *brange = (struct node_active_region *)b;
4072
4073	/* Done this way to avoid overflows */
4074	if (arange->start_pfn > brange->start_pfn)
4075		return 1;
4076	if (arange->start_pfn < brange->start_pfn)
4077		return -1;
4078
4079	return 0;
4080}
4081
4082/* sort the node_map by start_pfn */
4083static void __init sort_node_map(void)
4084{
4085	sort(early_node_map, (size_t)nr_nodemap_entries,
4086			sizeof(struct node_active_region),
4087			cmp_node_active_region, NULL);
4088}
4089
4090/* Find the lowest pfn for a node */
4091static unsigned long __init find_min_pfn_for_node(int nid)
4092{
4093	int i;
4094	unsigned long min_pfn = ULONG_MAX;
4095
4096	/* Assuming a sorted map, the first range found has the starting pfn */
4097	for_each_active_range_index_in_nid(i, nid)
4098		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4099
4100	if (min_pfn == ULONG_MAX) {
4101		printk(KERN_WARNING
4102			"Could not find start_pfn for node %d\n", nid);
4103		return 0;
4104	}
4105
4106	return min_pfn;
4107}
4108
4109/**
4110 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4111 *
4112 * It returns the minimum PFN based on information provided via
4113 * add_active_range().
4114 */
4115unsigned long __init find_min_pfn_with_active_regions(void)
4116{
4117	return find_min_pfn_for_node(MAX_NUMNODES);
4118}
4119
4120/*
4121 * early_calculate_totalpages()
4122 * Sum pages in active regions for movable zone.
4123 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4124 */
4125static unsigned long __init early_calculate_totalpages(void)
4126{
4127	int i;
4128	unsigned long totalpages = 0;
4129
4130	for (i = 0; i < nr_nodemap_entries; i++) {
4131		unsigned long pages = early_node_map[i].end_pfn -
4132						early_node_map[i].start_pfn;
4133		totalpages += pages;
4134		if (pages)
4135			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4136	}
4137  	return totalpages;
4138}
4139
4140/*
4141 * Find the PFN the Movable zone begins in each node. Kernel memory
4142 * is spread evenly between nodes as long as the nodes have enough
4143 * memory. When they don't, some nodes will have more kernelcore than
4144 * others
4145 */
4146static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4147{
4148	int i, nid;
4149	unsigned long usable_startpfn;
4150	unsigned long kernelcore_node, kernelcore_remaining;
4151	/* save the state before borrow the nodemask */
4152	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4153	unsigned long totalpages = early_calculate_totalpages();
4154	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4155
4156	/*
4157	 * If movablecore was specified, calculate what size of
4158	 * kernelcore that corresponds so that memory usable for
4159	 * any allocation type is evenly spread. If both kernelcore
4160	 * and movablecore are specified, then the value of kernelcore
4161	 * will be used for required_kernelcore if it's greater than
4162	 * what movablecore would have allowed.
4163	 */
4164	if (required_movablecore) {
4165		unsigned long corepages;
4166
4167		/*
4168		 * Round-up so that ZONE_MOVABLE is at least as large as what
4169		 * was requested by the user
4170		 */
4171		required_movablecore =
4172			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4173		corepages = totalpages - required_movablecore;
4174
4175		required_kernelcore = max(required_kernelcore, corepages);
4176	}
4177
4178	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4179	if (!required_kernelcore)
4180		goto out;
4181
4182	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4183	find_usable_zone_for_movable();
4184	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4185
4186restart:
4187	/* Spread kernelcore memory as evenly as possible throughout nodes */
4188	kernelcore_node = required_kernelcore / usable_nodes;
4189	for_each_node_state(nid, N_HIGH_MEMORY) {
4190		/*
4191		 * Recalculate kernelcore_node if the division per node
4192		 * now exceeds what is necessary to satisfy the requested
4193		 * amount of memory for the kernel
4194		 */
4195		if (required_kernelcore < kernelcore_node)
4196			kernelcore_node = required_kernelcore / usable_nodes;
4197
4198		/*
4199		 * As the map is walked, we track how much memory is usable
4200		 * by the kernel using kernelcore_remaining. When it is
4201		 * 0, the rest of the node is usable by ZONE_MOVABLE
4202		 */
4203		kernelcore_remaining = kernelcore_node;
4204
4205		/* Go through each range of PFNs within this node */
4206		for_each_active_range_index_in_nid(i, nid) {
4207			unsigned long start_pfn, end_pfn;
4208			unsigned long size_pages;
4209
4210			start_pfn = max(early_node_map[i].start_pfn,
4211						zone_movable_pfn[nid]);
4212			end_pfn = early_node_map[i].end_pfn;
4213			if (start_pfn >= end_pfn)
4214				continue;
4215
4216			/* Account for what is only usable for kernelcore */
4217			if (start_pfn < usable_startpfn) {
4218				unsigned long kernel_pages;
4219				kernel_pages = min(end_pfn, usable_startpfn)
4220								- start_pfn;
4221
4222				kernelcore_remaining -= min(kernel_pages,
4223							kernelcore_remaining);
4224				required_kernelcore -= min(kernel_pages,
4225							required_kernelcore);
4226
4227				/* Continue if range is now fully accounted */
4228				if (end_pfn <= usable_startpfn) {
4229
4230					/*
4231					 * Push zone_movable_pfn to the end so
4232					 * that if we have to rebalance
4233					 * kernelcore across nodes, we will
4234					 * not double account here
4235					 */
4236					zone_movable_pfn[nid] = end_pfn;
4237					continue;
4238				}
4239				start_pfn = usable_startpfn;
4240			}
4241
4242			/*
4243			 * The usable PFN range for ZONE_MOVABLE is from
4244			 * start_pfn->end_pfn. Calculate size_pages as the
4245			 * number of pages used as kernelcore
4246			 */
4247			size_pages = end_pfn - start_pfn;
4248			if (size_pages > kernelcore_remaining)
4249				size_pages = kernelcore_remaining;
4250			zone_movable_pfn[nid] = start_pfn + size_pages;
4251
4252			/*
4253			 * Some kernelcore has been met, update counts and
4254			 * break if the kernelcore for this node has been
4255			 * satisified
4256			 */
4257			required_kernelcore -= min(required_kernelcore,
4258								size_pages);
4259			kernelcore_remaining -= size_pages;
4260			if (!kernelcore_remaining)
4261				break;
4262		}
4263	}
4264
4265	/*
4266	 * If there is still required_kernelcore, we do another pass with one
4267	 * less node in the count. This will push zone_movable_pfn[nid] further
4268	 * along on the nodes that still have memory until kernelcore is
4269	 * satisified
4270	 */
4271	usable_nodes--;
4272	if (usable_nodes && required_kernelcore > usable_nodes)
4273		goto restart;
4274
4275	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4276	for (nid = 0; nid < MAX_NUMNODES; nid++)
4277		zone_movable_pfn[nid] =
4278			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4279
4280out:
4281	/* restore the node_state */
4282	node_states[N_HIGH_MEMORY] = saved_node_state;
4283}
4284
4285/* Any regular memory on that node ? */
4286static void check_for_regular_memory(pg_data_t *pgdat)
4287{
4288#ifdef CONFIG_HIGHMEM
4289	enum zone_type zone_type;
4290
4291	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4292		struct zone *zone = &pgdat->node_zones[zone_type];
4293		if (zone->present_pages)
4294			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4295	}
4296#endif
4297}
4298
4299/**
4300 * free_area_init_nodes - Initialise all pg_data_t and zone data
4301 * @max_zone_pfn: an array of max PFNs for each zone
4302 *
4303 * This will call free_area_init_node() for each active node in the system.
4304 * Using the page ranges provided by add_active_range(), the size of each
4305 * zone in each node and their holes is calculated. If the maximum PFN
4306 * between two adjacent zones match, it is assumed that the zone is empty.
4307 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4308 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4309 * starts where the previous one ended. For example, ZONE_DMA32 starts
4310 * at arch_max_dma_pfn.
4311 */
4312void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4313{
4314	unsigned long nid;
4315	int i;
4316
4317	/* Sort early_node_map as initialisation assumes it is sorted */
4318	sort_node_map();
4319
4320	/* Record where the zone boundaries are */
4321	memset(arch_zone_lowest_possible_pfn, 0,
4322				sizeof(arch_zone_lowest_possible_pfn));
4323	memset(arch_zone_highest_possible_pfn, 0,
4324				sizeof(arch_zone_highest_possible_pfn));
4325	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4326	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4327	for (i = 1; i < MAX_NR_ZONES; i++) {
4328		if (i == ZONE_MOVABLE)
4329			continue;
4330		arch_zone_lowest_possible_pfn[i] =
4331			arch_zone_highest_possible_pfn[i-1];
4332		arch_zone_highest_possible_pfn[i] =
4333			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4334	}
4335	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4336	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4337
4338	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4339	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4340	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4341
4342	/* Print out the zone ranges */
4343	printk("Zone PFN ranges:\n");
4344	for (i = 0; i < MAX_NR_ZONES; i++) {
4345		if (i == ZONE_MOVABLE)
4346			continue;
4347		printk("  %-8s %0#10lx -> %0#10lx\n",
4348				zone_names[i],
4349				arch_zone_lowest_possible_pfn[i],
4350				arch_zone_highest_possible_pfn[i]);
4351	}
4352
4353	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4354	printk("Movable zone start PFN for each node\n");
4355	for (i = 0; i < MAX_NUMNODES; i++) {
4356		if (zone_movable_pfn[i])
4357			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4358	}
4359
4360	/* Print out the early_node_map[] */
4361	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4362	for (i = 0; i < nr_nodemap_entries; i++)
4363		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4364						early_node_map[i].start_pfn,
4365						early_node_map[i].end_pfn);
4366
4367	/* Initialise every node */
4368	mminit_verify_pageflags_layout();
4369	setup_nr_node_ids();
4370	for_each_online_node(nid) {
4371		pg_data_t *pgdat = NODE_DATA(nid);
4372		free_area_init_node(nid, NULL,
4373				find_min_pfn_for_node(nid), NULL);
4374
4375		/* Any memory on that node */
4376		if (pgdat->node_present_pages)
4377			node_set_state(nid, N_HIGH_MEMORY);
4378		check_for_regular_memory(pgdat);
4379	}
4380}
4381
4382static int __init cmdline_parse_core(char *p, unsigned long *core)
4383{
4384	unsigned long long coremem;
4385	if (!p)
4386		return -EINVAL;
4387
4388	coremem = memparse(p, &p);
4389	*core = coremem >> PAGE_SHIFT;
4390
4391	/* Paranoid check that UL is enough for the coremem value */
4392	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4393
4394	return 0;
4395}
4396
4397/*
4398 * kernelcore=size sets the amount of memory for use for allocations that
4399 * cannot be reclaimed or migrated.
4400 */
4401static int __init cmdline_parse_kernelcore(char *p)
4402{
4403	return cmdline_parse_core(p, &required_kernelcore);
4404}
4405
4406/*
4407 * movablecore=size sets the amount of memory for use for allocations that
4408 * can be reclaimed or migrated.
4409 */
4410static int __init cmdline_parse_movablecore(char *p)
4411{
4412	return cmdline_parse_core(p, &required_movablecore);
4413}
4414
4415early_param("kernelcore", cmdline_parse_kernelcore);
4416early_param("movablecore", cmdline_parse_movablecore);
4417
4418#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4419
4420/**
4421 * set_dma_reserve - set the specified number of pages reserved in the first zone
4422 * @new_dma_reserve: The number of pages to mark reserved
4423 *
4424 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4425 * In the DMA zone, a significant percentage may be consumed by kernel image
4426 * and other unfreeable allocations which can skew the watermarks badly. This
4427 * function may optionally be used to account for unfreeable pages in the
4428 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4429 * smaller per-cpu batchsize.
4430 */
4431void __init set_dma_reserve(unsigned long new_dma_reserve)
4432{
4433	dma_reserve = new_dma_reserve;
4434}
4435
4436#ifndef CONFIG_NEED_MULTIPLE_NODES
4437struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4438EXPORT_SYMBOL(contig_page_data);
4439#endif
4440
4441void __init free_area_init(unsigned long *zones_size)
4442{
4443	free_area_init_node(0, zones_size,
4444			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4445}
4446
4447static int page_alloc_cpu_notify(struct notifier_block *self,
4448				 unsigned long action, void *hcpu)
4449{
4450	int cpu = (unsigned long)hcpu;
4451
4452	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4453		drain_pages(cpu);
4454
4455		/*
4456		 * Spill the event counters of the dead processor
4457		 * into the current processors event counters.
4458		 * This artificially elevates the count of the current
4459		 * processor.
4460		 */
4461		vm_events_fold_cpu(cpu);
4462
4463		/*
4464		 * Zero the differential counters of the dead processor
4465		 * so that the vm statistics are consistent.
4466		 *
4467		 * This is only okay since the processor is dead and cannot
4468		 * race with what we are doing.
4469		 */
4470		refresh_cpu_vm_stats(cpu);
4471	}
4472	return NOTIFY_OK;
4473}
4474
4475void __init page_alloc_init(void)
4476{
4477	hotcpu_notifier(page_alloc_cpu_notify, 0);
4478}
4479
4480/*
4481 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4482 *	or min_free_kbytes changes.
4483 */
4484static void calculate_totalreserve_pages(void)
4485{
4486	struct pglist_data *pgdat;
4487	unsigned long reserve_pages = 0;
4488	enum zone_type i, j;
4489
4490	for_each_online_pgdat(pgdat) {
4491		for (i = 0; i < MAX_NR_ZONES; i++) {
4492			struct zone *zone = pgdat->node_zones + i;
4493			unsigned long max = 0;
4494
4495			/* Find valid and maximum lowmem_reserve in the zone */
4496			for (j = i; j < MAX_NR_ZONES; j++) {
4497				if (zone->lowmem_reserve[j] > max)
4498					max = zone->lowmem_reserve[j];
4499			}
4500
4501			/* we treat the high watermark as reserved pages. */
4502			max += high_wmark_pages(zone);
4503
4504			if (max > zone->present_pages)
4505				max = zone->present_pages;
4506			reserve_pages += max;
4507		}
4508	}
4509	totalreserve_pages = reserve_pages;
4510}
4511
4512/*
4513 * setup_per_zone_lowmem_reserve - called whenever
4514 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4515 *	has a correct pages reserved value, so an adequate number of
4516 *	pages are left in the zone after a successful __alloc_pages().
4517 */
4518static void setup_per_zone_lowmem_reserve(void)
4519{
4520	struct pglist_data *pgdat;
4521	enum zone_type j, idx;
4522
4523	for_each_online_pgdat(pgdat) {
4524		for (j = 0; j < MAX_NR_ZONES; j++) {
4525			struct zone *zone = pgdat->node_zones + j;
4526			unsigned long present_pages = zone->present_pages;
4527
4528			zone->lowmem_reserve[j] = 0;
4529
4530			idx = j;
4531			while (idx) {
4532				struct zone *lower_zone;
4533
4534				idx--;
4535
4536				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4537					sysctl_lowmem_reserve_ratio[idx] = 1;
4538
4539				lower_zone = pgdat->node_zones + idx;
4540				lower_zone->lowmem_reserve[j] = present_pages /
4541					sysctl_lowmem_reserve_ratio[idx];
4542				present_pages += lower_zone->present_pages;
4543			}
4544		}
4545	}
4546
4547	/* update totalreserve_pages */
4548	calculate_totalreserve_pages();
4549}
4550
4551/**
4552 * setup_per_zone_wmarks - called when min_free_kbytes changes
4553 * or when memory is hot-{added|removed}
4554 *
4555 * Ensures that the watermark[min,low,high] values for each zone are set
4556 * correctly with respect to min_free_kbytes.
4557 */
4558void setup_per_zone_wmarks(void)
4559{
4560	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4561	unsigned long lowmem_pages = 0;
4562	struct zone *zone;
4563	unsigned long flags;
4564
4565	/* Calculate total number of !ZONE_HIGHMEM pages */
4566	for_each_zone(zone) {
4567		if (!is_highmem(zone))
4568			lowmem_pages += zone->present_pages;
4569	}
4570
4571	for_each_zone(zone) {
4572		u64 tmp;
4573
4574		spin_lock_irqsave(&zone->lock, flags);
4575		tmp = (u64)pages_min * zone->present_pages;
4576		do_div(tmp, lowmem_pages);
4577		if (is_highmem(zone)) {
4578			/*
4579			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4580			 * need highmem pages, so cap pages_min to a small
4581			 * value here.
4582			 *
4583			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4584			 * deltas controls asynch page reclaim, and so should
4585			 * not be capped for highmem.
4586			 */
4587			int min_pages;
4588
4589			min_pages = zone->present_pages / 1024;
4590			if (min_pages < SWAP_CLUSTER_MAX)
4591				min_pages = SWAP_CLUSTER_MAX;
4592			if (min_pages > 128)
4593				min_pages = 128;
4594			zone->watermark[WMARK_MIN] = min_pages;
4595		} else {
4596			/*
4597			 * If it's a lowmem zone, reserve a number of pages
4598			 * proportionate to the zone's size.
4599			 */
4600			zone->watermark[WMARK_MIN] = tmp;
4601		}
4602
4603		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4604		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4605		setup_zone_migrate_reserve(zone);
4606		spin_unlock_irqrestore(&zone->lock, flags);
4607	}
4608
4609	/* update totalreserve_pages */
4610	calculate_totalreserve_pages();
4611}
4612
4613/*
4614 * The inactive anon list should be small enough that the VM never has to
4615 * do too much work, but large enough that each inactive page has a chance
4616 * to be referenced again before it is swapped out.
4617 *
4618 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4619 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4620 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4621 * the anonymous pages are kept on the inactive list.
4622 *
4623 * total     target    max
4624 * memory    ratio     inactive anon
4625 * -------------------------------------
4626 *   10MB       1         5MB
4627 *  100MB       1        50MB
4628 *    1GB       3       250MB
4629 *   10GB      10       0.9GB
4630 *  100GB      31         3GB
4631 *    1TB     101        10GB
4632 *   10TB     320        32GB
4633 */
4634void calculate_zone_inactive_ratio(struct zone *zone)
4635{
4636	unsigned int gb, ratio;
4637
4638	/* Zone size in gigabytes */
4639	gb = zone->present_pages >> (30 - PAGE_SHIFT);
4640	if (gb)
4641		ratio = int_sqrt(10 * gb);
4642	else
4643		ratio = 1;
4644
4645	zone->inactive_ratio = ratio;
4646}
4647
4648static void __init setup_per_zone_inactive_ratio(void)
4649{
4650	struct zone *zone;
4651
4652	for_each_zone(zone)
4653		calculate_zone_inactive_ratio(zone);
4654}
4655
4656/*
4657 * Initialise min_free_kbytes.
4658 *
4659 * For small machines we want it small (128k min).  For large machines
4660 * we want it large (64MB max).  But it is not linear, because network
4661 * bandwidth does not increase linearly with machine size.  We use
4662 *
4663 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4664 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4665 *
4666 * which yields
4667 *
4668 * 16MB:	512k
4669 * 32MB:	724k
4670 * 64MB:	1024k
4671 * 128MB:	1448k
4672 * 256MB:	2048k
4673 * 512MB:	2896k
4674 * 1024MB:	4096k
4675 * 2048MB:	5792k
4676 * 4096MB:	8192k
4677 * 8192MB:	11584k
4678 * 16384MB:	16384k
4679 */
4680static int __init init_per_zone_wmark_min(void)
4681{
4682	unsigned long lowmem_kbytes;
4683
4684	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4685
4686	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4687	if (min_free_kbytes < 128)
4688		min_free_kbytes = 128;
4689	if (min_free_kbytes > 65536)
4690		min_free_kbytes = 65536;
4691	setup_per_zone_wmarks();
4692	setup_per_zone_lowmem_reserve();
4693	setup_per_zone_inactive_ratio();
4694	return 0;
4695}
4696module_init(init_per_zone_wmark_min)
4697
4698/*
4699 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4700 *	that we can call two helper functions whenever min_free_kbytes
4701 *	changes.
4702 */
4703int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4704	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4705{
4706	proc_dointvec(table, write, file, buffer, length, ppos);
4707	if (write)
4708		setup_per_zone_wmarks();
4709	return 0;
4710}
4711
4712#ifdef CONFIG_NUMA
4713int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4714	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4715{
4716	struct zone *zone;
4717	int rc;
4718
4719	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4720	if (rc)
4721		return rc;
4722
4723	for_each_zone(zone)
4724		zone->min_unmapped_pages = (zone->present_pages *
4725				sysctl_min_unmapped_ratio) / 100;
4726	return 0;
4727}
4728
4729int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4730	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4731{
4732	struct zone *zone;
4733	int rc;
4734
4735	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4736	if (rc)
4737		return rc;
4738
4739	for_each_zone(zone)
4740		zone->min_slab_pages = (zone->present_pages *
4741				sysctl_min_slab_ratio) / 100;
4742	return 0;
4743}
4744#endif
4745
4746/*
4747 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4748 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4749 *	whenever sysctl_lowmem_reserve_ratio changes.
4750 *
4751 * The reserve ratio obviously has absolutely no relation with the
4752 * minimum watermarks. The lowmem reserve ratio can only make sense
4753 * if in function of the boot time zone sizes.
4754 */
4755int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4756	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4757{
4758	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4759	setup_per_zone_lowmem_reserve();
4760	return 0;
4761}
4762
4763/*
4764 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4765 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4766 * can have before it gets flushed back to buddy allocator.
4767 */
4768
4769int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4770	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4771{
4772	struct zone *zone;
4773	unsigned int cpu;
4774	int ret;
4775
4776	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4777	if (!write || (ret == -EINVAL))
4778		return ret;
4779	for_each_populated_zone(zone) {
4780		for_each_online_cpu(cpu) {
4781			unsigned long  high;
4782			high = zone->present_pages / percpu_pagelist_fraction;
4783			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4784		}
4785	}
4786	return 0;
4787}
4788
4789int hashdist = HASHDIST_DEFAULT;
4790
4791#ifdef CONFIG_NUMA
4792static int __init set_hashdist(char *str)
4793{
4794	if (!str)
4795		return 0;
4796	hashdist = simple_strtoul(str, &str, 0);
4797	return 1;
4798}
4799__setup("hashdist=", set_hashdist);
4800#endif
4801
4802/*
4803 * allocate a large system hash table from bootmem
4804 * - it is assumed that the hash table must contain an exact power-of-2
4805 *   quantity of entries
4806 * - limit is the number of hash buckets, not the total allocation size
4807 */
4808void *__init alloc_large_system_hash(const char *tablename,
4809				     unsigned long bucketsize,
4810				     unsigned long numentries,
4811				     int scale,
4812				     int flags,
4813				     unsigned int *_hash_shift,
4814				     unsigned int *_hash_mask,
4815				     unsigned long limit)
4816{
4817	unsigned long long max = limit;
4818	unsigned long log2qty, size;
4819	void *table = NULL;
4820
4821	/* allow the kernel cmdline to have a say */
4822	if (!numentries) {
4823		/* round applicable memory size up to nearest megabyte */
4824		numentries = nr_kernel_pages;
4825		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4826		numentries >>= 20 - PAGE_SHIFT;
4827		numentries <<= 20 - PAGE_SHIFT;
4828
4829		/* limit to 1 bucket per 2^scale bytes of low memory */
4830		if (scale > PAGE_SHIFT)
4831			numentries >>= (scale - PAGE_SHIFT);
4832		else
4833			numentries <<= (PAGE_SHIFT - scale);
4834
4835		/* Make sure we've got at least a 0-order allocation.. */
4836		if (unlikely(flags & HASH_SMALL)) {
4837			/* Makes no sense without HASH_EARLY */
4838			WARN_ON(!(flags & HASH_EARLY));
4839			if (!(numentries >> *_hash_shift)) {
4840				numentries = 1UL << *_hash_shift;
4841				BUG_ON(!numentries);
4842			}
4843		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4844			numentries = PAGE_SIZE / bucketsize;
4845	}
4846	numentries = roundup_pow_of_two(numentries);
4847
4848	/* limit allocation size to 1/16 total memory by default */
4849	if (max == 0) {
4850		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4851		do_div(max, bucketsize);
4852	}
4853
4854	if (numentries > max)
4855		numentries = max;
4856
4857	log2qty = ilog2(numentries);
4858
4859	do {
4860		size = bucketsize << log2qty;
4861		if (flags & HASH_EARLY)
4862			table = alloc_bootmem_nopanic(size);
4863		else if (hashdist)
4864			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4865		else {
4866			/*
4867			 * If bucketsize is not a power-of-two, we may free
4868			 * some pages at the end of hash table which
4869			 * alloc_pages_exact() automatically does
4870			 */
4871			if (get_order(size) < MAX_ORDER) {
4872				table = alloc_pages_exact(size, GFP_ATOMIC);
4873				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4874			}
4875		}
4876	} while (!table && size > PAGE_SIZE && --log2qty);
4877
4878	if (!table)
4879		panic("Failed to allocate %s hash table\n", tablename);
4880
4881	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4882	       tablename,
4883	       (1U << log2qty),
4884	       ilog2(size) - PAGE_SHIFT,
4885	       size);
4886
4887	if (_hash_shift)
4888		*_hash_shift = log2qty;
4889	if (_hash_mask)
4890		*_hash_mask = (1 << log2qty) - 1;
4891
4892	return table;
4893}
4894
4895/* Return a pointer to the bitmap storing bits affecting a block of pages */
4896static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4897							unsigned long pfn)
4898{
4899#ifdef CONFIG_SPARSEMEM
4900	return __pfn_to_section(pfn)->pageblock_flags;
4901#else
4902	return zone->pageblock_flags;
4903#endif /* CONFIG_SPARSEMEM */
4904}
4905
4906static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4907{
4908#ifdef CONFIG_SPARSEMEM
4909	pfn &= (PAGES_PER_SECTION-1);
4910	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4911#else
4912	pfn = pfn - zone->zone_start_pfn;
4913	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4914#endif /* CONFIG_SPARSEMEM */
4915}
4916
4917/**
4918 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4919 * @page: The page within the block of interest
4920 * @start_bitidx: The first bit of interest to retrieve
4921 * @end_bitidx: The last bit of interest
4922 * returns pageblock_bits flags
4923 */
4924unsigned long get_pageblock_flags_group(struct page *page,
4925					int start_bitidx, int end_bitidx)
4926{
4927	struct zone *zone;
4928	unsigned long *bitmap;
4929	unsigned long pfn, bitidx;
4930	unsigned long flags = 0;
4931	unsigned long value = 1;
4932
4933	zone = page_zone(page);
4934	pfn = page_to_pfn(page);
4935	bitmap = get_pageblock_bitmap(zone, pfn);
4936	bitidx = pfn_to_bitidx(zone, pfn);
4937
4938	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4939		if (test_bit(bitidx + start_bitidx, bitmap))
4940			flags |= value;
4941
4942	return flags;
4943}
4944
4945/**
4946 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4947 * @page: The page within the block of interest
4948 * @start_bitidx: The first bit of interest
4949 * @end_bitidx: The last bit of interest
4950 * @flags: The flags to set
4951 */
4952void set_pageblock_flags_group(struct page *page, unsigned long flags,
4953					int start_bitidx, int end_bitidx)
4954{
4955	struct zone *zone;
4956	unsigned long *bitmap;
4957	unsigned long pfn, bitidx;
4958	unsigned long value = 1;
4959
4960	zone = page_zone(page);
4961	pfn = page_to_pfn(page);
4962	bitmap = get_pageblock_bitmap(zone, pfn);
4963	bitidx = pfn_to_bitidx(zone, pfn);
4964	VM_BUG_ON(pfn < zone->zone_start_pfn);
4965	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4966
4967	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4968		if (flags & value)
4969			__set_bit(bitidx + start_bitidx, bitmap);
4970		else
4971			__clear_bit(bitidx + start_bitidx, bitmap);
4972}
4973
4974/*
4975 * This is designed as sub function...plz see page_isolation.c also.
4976 * set/clear page block's type to be ISOLATE.
4977 * page allocater never alloc memory from ISOLATE block.
4978 */
4979
4980int set_migratetype_isolate(struct page *page)
4981{
4982	struct zone *zone;
4983	unsigned long flags;
4984	int ret = -EBUSY;
4985	int zone_idx;
4986
4987	zone = page_zone(page);
4988	zone_idx = zone_idx(zone);
4989	spin_lock_irqsave(&zone->lock, flags);
4990	/*
4991	 * In future, more migrate types will be able to be isolation target.
4992	 */
4993	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
4994	    zone_idx != ZONE_MOVABLE)
4995		goto out;
4996	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4997	move_freepages_block(zone, page, MIGRATE_ISOLATE);
4998	ret = 0;
4999out:
5000	spin_unlock_irqrestore(&zone->lock, flags);
5001	if (!ret)
5002		drain_all_pages();
5003	return ret;
5004}
5005
5006void unset_migratetype_isolate(struct page *page)
5007{
5008	struct zone *zone;
5009	unsigned long flags;
5010	zone = page_zone(page);
5011	spin_lock_irqsave(&zone->lock, flags);
5012	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5013		goto out;
5014	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5015	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5016out:
5017	spin_unlock_irqrestore(&zone->lock, flags);
5018}
5019
5020#ifdef CONFIG_MEMORY_HOTREMOVE
5021/*
5022 * All pages in the range must be isolated before calling this.
5023 */
5024void
5025__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5026{
5027	struct page *page;
5028	struct zone *zone;
5029	int order, i;
5030	unsigned long pfn;
5031	unsigned long flags;
5032	/* find the first valid pfn */
5033	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5034		if (pfn_valid(pfn))
5035			break;
5036	if (pfn == end_pfn)
5037		return;
5038	zone = page_zone(pfn_to_page(pfn));
5039	spin_lock_irqsave(&zone->lock, flags);
5040	pfn = start_pfn;
5041	while (pfn < end_pfn) {
5042		if (!pfn_valid(pfn)) {
5043			pfn++;
5044			continue;
5045		}
5046		page = pfn_to_page(pfn);
5047		BUG_ON(page_count(page));
5048		BUG_ON(!PageBuddy(page));
5049		order = page_order(page);
5050#ifdef CONFIG_DEBUG_VM
5051		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5052		       pfn, 1 << order, end_pfn);
5053#endif
5054		list_del(&page->lru);
5055		rmv_page_order(page);
5056		zone->free_area[order].nr_free--;
5057		__mod_zone_page_state(zone, NR_FREE_PAGES,
5058				      - (1UL << order));
5059		for (i = 0; i < (1 << order); i++)
5060			SetPageReserved((page+i));
5061		pfn += (1 << order);
5062	}
5063	spin_unlock_irqrestore(&zone->lock, flags);
5064}
5065#endif
5066