page_alloc.c revision 84be48d84a53044e13aa8816aab201ab5480815d
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25#include <linux/compiler.h>
26#include <linux/kernel.h>
27#include <linux/kmemcheck.h>
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
33#include <linux/oom.h>
34#include <linux/notifier.h>
35#include <linux/topology.h>
36#include <linux/sysctl.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
39#include <linux/memory_hotplug.h>
40#include <linux/nodemask.h>
41#include <linux/vmalloc.h>
42#include <linux/mempolicy.h>
43#include <linux/stop_machine.h>
44#include <linux/sort.h>
45#include <linux/pfn.h>
46#include <linux/backing-dev.h>
47#include <linux/fault-inject.h>
48#include <linux/page-isolation.h>
49#include <linux/page_cgroup.h>
50#include <linux/debugobjects.h>
51#include <linux/kmemleak.h>
52#include <linux/memory.h>
53#include <linux/compaction.h>
54#include <trace/events/kmem.h>
55#include <linux/ftrace_event.h>
56
57#include <asm/tlbflush.h>
58#include <asm/div64.h>
59#include "internal.h"
60
61#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
62DEFINE_PER_CPU(int, numa_node);
63EXPORT_PER_CPU_SYMBOL(numa_node);
64#endif
65
66#ifdef CONFIG_HAVE_MEMORYLESS_NODES
67/*
68 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
69 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
70 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
71 * defined in <linux/topology.h>.
72 */
73DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
74EXPORT_PER_CPU_SYMBOL(_numa_mem_);
75#endif
76
77/*
78 * Array of node states.
79 */
80nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
81	[N_POSSIBLE] = NODE_MASK_ALL,
82	[N_ONLINE] = { { [0] = 1UL } },
83#ifndef CONFIG_NUMA
84	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
85#ifdef CONFIG_HIGHMEM
86	[N_HIGH_MEMORY] = { { [0] = 1UL } },
87#endif
88	[N_CPU] = { { [0] = 1UL } },
89#endif	/* NUMA */
90};
91EXPORT_SYMBOL(node_states);
92
93unsigned long totalram_pages __read_mostly;
94unsigned long totalreserve_pages __read_mostly;
95int percpu_pagelist_fraction;
96gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
97
98#ifdef CONFIG_PM_SLEEP
99/*
100 * The following functions are used by the suspend/hibernate code to temporarily
101 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
102 * while devices are suspended.  To avoid races with the suspend/hibernate code,
103 * they should always be called with pm_mutex held (gfp_allowed_mask also should
104 * only be modified with pm_mutex held, unless the suspend/hibernate code is
105 * guaranteed not to run in parallel with that modification).
106 */
107
108static gfp_t saved_gfp_mask;
109
110void pm_restore_gfp_mask(void)
111{
112	WARN_ON(!mutex_is_locked(&pm_mutex));
113	if (saved_gfp_mask) {
114		gfp_allowed_mask = saved_gfp_mask;
115		saved_gfp_mask = 0;
116	}
117}
118
119void pm_restrict_gfp_mask(void)
120{
121	WARN_ON(!mutex_is_locked(&pm_mutex));
122	WARN_ON(saved_gfp_mask);
123	saved_gfp_mask = gfp_allowed_mask;
124	gfp_allowed_mask &= ~GFP_IOFS;
125}
126#endif /* CONFIG_PM_SLEEP */
127
128#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
129int pageblock_order __read_mostly;
130#endif
131
132static void __free_pages_ok(struct page *page, unsigned int order);
133
134/*
135 * results with 256, 32 in the lowmem_reserve sysctl:
136 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
137 *	1G machine -> (16M dma, 784M normal, 224M high)
138 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
139 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
140 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
141 *
142 * TBD: should special case ZONE_DMA32 machines here - in those we normally
143 * don't need any ZONE_NORMAL reservation
144 */
145int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
146#ifdef CONFIG_ZONE_DMA
147	 256,
148#endif
149#ifdef CONFIG_ZONE_DMA32
150	 256,
151#endif
152#ifdef CONFIG_HIGHMEM
153	 32,
154#endif
155	 32,
156};
157
158EXPORT_SYMBOL(totalram_pages);
159
160static char * const zone_names[MAX_NR_ZONES] = {
161#ifdef CONFIG_ZONE_DMA
162	 "DMA",
163#endif
164#ifdef CONFIG_ZONE_DMA32
165	 "DMA32",
166#endif
167	 "Normal",
168#ifdef CONFIG_HIGHMEM
169	 "HighMem",
170#endif
171	 "Movable",
172};
173
174int min_free_kbytes = 1024;
175
176static unsigned long __meminitdata nr_kernel_pages;
177static unsigned long __meminitdata nr_all_pages;
178static unsigned long __meminitdata dma_reserve;
179
180#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
181  /*
182   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
183   * ranges of memory (RAM) that may be registered with add_active_range().
184   * Ranges passed to add_active_range() will be merged if possible
185   * so the number of times add_active_range() can be called is
186   * related to the number of nodes and the number of holes
187   */
188  #ifdef CONFIG_MAX_ACTIVE_REGIONS
189    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
190    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
191  #else
192    #if MAX_NUMNODES >= 32
193      /* If there can be many nodes, allow up to 50 holes per node */
194      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
195    #else
196      /* By default, allow up to 256 distinct regions */
197      #define MAX_ACTIVE_REGIONS 256
198    #endif
199  #endif
200
201  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
202  static int __meminitdata nr_nodemap_entries;
203  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
204  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
205  static unsigned long __initdata required_kernelcore;
206  static unsigned long __initdata required_movablecore;
207  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
208
209  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
210  int movable_zone;
211  EXPORT_SYMBOL(movable_zone);
212#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
213
214#if MAX_NUMNODES > 1
215int nr_node_ids __read_mostly = MAX_NUMNODES;
216int nr_online_nodes __read_mostly = 1;
217EXPORT_SYMBOL(nr_node_ids);
218EXPORT_SYMBOL(nr_online_nodes);
219#endif
220
221int page_group_by_mobility_disabled __read_mostly;
222
223static void set_pageblock_migratetype(struct page *page, int migratetype)
224{
225
226	if (unlikely(page_group_by_mobility_disabled))
227		migratetype = MIGRATE_UNMOVABLE;
228
229	set_pageblock_flags_group(page, (unsigned long)migratetype,
230					PB_migrate, PB_migrate_end);
231}
232
233bool oom_killer_disabled __read_mostly;
234
235#ifdef CONFIG_DEBUG_VM
236static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
237{
238	int ret = 0;
239	unsigned seq;
240	unsigned long pfn = page_to_pfn(page);
241
242	do {
243		seq = zone_span_seqbegin(zone);
244		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
245			ret = 1;
246		else if (pfn < zone->zone_start_pfn)
247			ret = 1;
248	} while (zone_span_seqretry(zone, seq));
249
250	return ret;
251}
252
253static int page_is_consistent(struct zone *zone, struct page *page)
254{
255	if (!pfn_valid_within(page_to_pfn(page)))
256		return 0;
257	if (zone != page_zone(page))
258		return 0;
259
260	return 1;
261}
262/*
263 * Temporary debugging check for pages not lying within a given zone.
264 */
265static int bad_range(struct zone *zone, struct page *page)
266{
267	if (page_outside_zone_boundaries(zone, page))
268		return 1;
269	if (!page_is_consistent(zone, page))
270		return 1;
271
272	return 0;
273}
274#else
275static inline int bad_range(struct zone *zone, struct page *page)
276{
277	return 0;
278}
279#endif
280
281static void bad_page(struct page *page)
282{
283	static unsigned long resume;
284	static unsigned long nr_shown;
285	static unsigned long nr_unshown;
286
287	/* Don't complain about poisoned pages */
288	if (PageHWPoison(page)) {
289		reset_page_mapcount(page); /* remove PageBuddy */
290		return;
291	}
292
293	/*
294	 * Allow a burst of 60 reports, then keep quiet for that minute;
295	 * or allow a steady drip of one report per second.
296	 */
297	if (nr_shown == 60) {
298		if (time_before(jiffies, resume)) {
299			nr_unshown++;
300			goto out;
301		}
302		if (nr_unshown) {
303			printk(KERN_ALERT
304			      "BUG: Bad page state: %lu messages suppressed\n",
305				nr_unshown);
306			nr_unshown = 0;
307		}
308		nr_shown = 0;
309	}
310	if (nr_shown++ == 0)
311		resume = jiffies + 60 * HZ;
312
313	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
314		current->comm, page_to_pfn(page));
315	dump_page(page);
316
317	dump_stack();
318out:
319	/* Leave bad fields for debug, except PageBuddy could make trouble */
320	reset_page_mapcount(page); /* remove PageBuddy */
321	add_taint(TAINT_BAD_PAGE);
322}
323
324/*
325 * Higher-order pages are called "compound pages".  They are structured thusly:
326 *
327 * The first PAGE_SIZE page is called the "head page".
328 *
329 * The remaining PAGE_SIZE pages are called "tail pages".
330 *
331 * All pages have PG_compound set.  All pages have their ->private pointing at
332 * the head page (even the head page has this).
333 *
334 * The first tail page's ->lru.next holds the address of the compound page's
335 * put_page() function.  Its ->lru.prev holds the order of allocation.
336 * This usage means that zero-order pages may not be compound.
337 */
338
339static void free_compound_page(struct page *page)
340{
341	__free_pages_ok(page, compound_order(page));
342}
343
344void prep_compound_page(struct page *page, unsigned long order)
345{
346	int i;
347	int nr_pages = 1 << order;
348
349	set_compound_page_dtor(page, free_compound_page);
350	set_compound_order(page, order);
351	__SetPageHead(page);
352	for (i = 1; i < nr_pages; i++) {
353		struct page *p = page + i;
354
355		__SetPageTail(p);
356		p->first_page = page;
357	}
358}
359
360/* update __split_huge_page_refcount if you change this function */
361static int destroy_compound_page(struct page *page, unsigned long order)
362{
363	int i;
364	int nr_pages = 1 << order;
365	int bad = 0;
366
367	if (unlikely(compound_order(page) != order) ||
368	    unlikely(!PageHead(page))) {
369		bad_page(page);
370		bad++;
371	}
372
373	__ClearPageHead(page);
374
375	for (i = 1; i < nr_pages; i++) {
376		struct page *p = page + i;
377
378		if (unlikely(!PageTail(p) || (p->first_page != page))) {
379			bad_page(page);
380			bad++;
381		}
382		__ClearPageTail(p);
383	}
384
385	return bad;
386}
387
388static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
389{
390	int i;
391
392	/*
393	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
394	 * and __GFP_HIGHMEM from hard or soft interrupt context.
395	 */
396	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
397	for (i = 0; i < (1 << order); i++)
398		clear_highpage(page + i);
399}
400
401static inline void set_page_order(struct page *page, int order)
402{
403	set_page_private(page, order);
404	__SetPageBuddy(page);
405}
406
407static inline void rmv_page_order(struct page *page)
408{
409	__ClearPageBuddy(page);
410	set_page_private(page, 0);
411}
412
413/*
414 * Locate the struct page for both the matching buddy in our
415 * pair (buddy1) and the combined O(n+1) page they form (page).
416 *
417 * 1) Any buddy B1 will have an order O twin B2 which satisfies
418 * the following equation:
419 *     B2 = B1 ^ (1 << O)
420 * For example, if the starting buddy (buddy2) is #8 its order
421 * 1 buddy is #10:
422 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
423 *
424 * 2) Any buddy B will have an order O+1 parent P which
425 * satisfies the following equation:
426 *     P = B & ~(1 << O)
427 *
428 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
429 */
430static inline unsigned long
431__find_buddy_index(unsigned long page_idx, unsigned int order)
432{
433	return page_idx ^ (1 << order);
434}
435
436/*
437 * This function checks whether a page is free && is the buddy
438 * we can do coalesce a page and its buddy if
439 * (a) the buddy is not in a hole &&
440 * (b) the buddy is in the buddy system &&
441 * (c) a page and its buddy have the same order &&
442 * (d) a page and its buddy are in the same zone.
443 *
444 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
445 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
446 *
447 * For recording page's order, we use page_private(page).
448 */
449static inline int page_is_buddy(struct page *page, struct page *buddy,
450								int order)
451{
452	if (!pfn_valid_within(page_to_pfn(buddy)))
453		return 0;
454
455	if (page_zone_id(page) != page_zone_id(buddy))
456		return 0;
457
458	if (PageBuddy(buddy) && page_order(buddy) == order) {
459		VM_BUG_ON(page_count(buddy) != 0);
460		return 1;
461	}
462	return 0;
463}
464
465/*
466 * Freeing function for a buddy system allocator.
467 *
468 * The concept of a buddy system is to maintain direct-mapped table
469 * (containing bit values) for memory blocks of various "orders".
470 * The bottom level table contains the map for the smallest allocatable
471 * units of memory (here, pages), and each level above it describes
472 * pairs of units from the levels below, hence, "buddies".
473 * At a high level, all that happens here is marking the table entry
474 * at the bottom level available, and propagating the changes upward
475 * as necessary, plus some accounting needed to play nicely with other
476 * parts of the VM system.
477 * At each level, we keep a list of pages, which are heads of continuous
478 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
479 * order is recorded in page_private(page) field.
480 * So when we are allocating or freeing one, we can derive the state of the
481 * other.  That is, if we allocate a small block, and both were
482 * free, the remainder of the region must be split into blocks.
483 * If a block is freed, and its buddy is also free, then this
484 * triggers coalescing into a block of larger size.
485 *
486 * -- wli
487 */
488
489static inline void __free_one_page(struct page *page,
490		struct zone *zone, unsigned int order,
491		int migratetype)
492{
493	unsigned long page_idx;
494	unsigned long combined_idx;
495	unsigned long uninitialized_var(buddy_idx);
496	struct page *buddy;
497
498	if (unlikely(PageCompound(page)))
499		if (unlikely(destroy_compound_page(page, order)))
500			return;
501
502	VM_BUG_ON(migratetype == -1);
503
504	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
505
506	VM_BUG_ON(page_idx & ((1 << order) - 1));
507	VM_BUG_ON(bad_range(zone, page));
508
509	while (order < MAX_ORDER-1) {
510		buddy_idx = __find_buddy_index(page_idx, order);
511		buddy = page + (buddy_idx - page_idx);
512		if (!page_is_buddy(page, buddy, order))
513			break;
514
515		/* Our buddy is free, merge with it and move up one order. */
516		list_del(&buddy->lru);
517		zone->free_area[order].nr_free--;
518		rmv_page_order(buddy);
519		combined_idx = buddy_idx & page_idx;
520		page = page + (combined_idx - page_idx);
521		page_idx = combined_idx;
522		order++;
523	}
524	set_page_order(page, order);
525
526	/*
527	 * If this is not the largest possible page, check if the buddy
528	 * of the next-highest order is free. If it is, it's possible
529	 * that pages are being freed that will coalesce soon. In case,
530	 * that is happening, add the free page to the tail of the list
531	 * so it's less likely to be used soon and more likely to be merged
532	 * as a higher order page
533	 */
534	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
535		struct page *higher_page, *higher_buddy;
536		combined_idx = buddy_idx & page_idx;
537		higher_page = page + (combined_idx - page_idx);
538		buddy_idx = __find_buddy_index(combined_idx, order + 1);
539		higher_buddy = page + (buddy_idx - combined_idx);
540		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
541			list_add_tail(&page->lru,
542				&zone->free_area[order].free_list[migratetype]);
543			goto out;
544		}
545	}
546
547	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
548out:
549	zone->free_area[order].nr_free++;
550}
551
552/*
553 * free_page_mlock() -- clean up attempts to free and mlocked() page.
554 * Page should not be on lru, so no need to fix that up.
555 * free_pages_check() will verify...
556 */
557static inline void free_page_mlock(struct page *page)
558{
559	__dec_zone_page_state(page, NR_MLOCK);
560	__count_vm_event(UNEVICTABLE_MLOCKFREED);
561}
562
563static inline int free_pages_check(struct page *page)
564{
565	if (unlikely(page_mapcount(page) |
566		(page->mapping != NULL)  |
567		(atomic_read(&page->_count) != 0) |
568		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
569		bad_page(page);
570		return 1;
571	}
572	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
573		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
574	return 0;
575}
576
577/*
578 * Frees a number of pages from the PCP lists
579 * Assumes all pages on list are in same zone, and of same order.
580 * count is the number of pages to free.
581 *
582 * If the zone was previously in an "all pages pinned" state then look to
583 * see if this freeing clears that state.
584 *
585 * And clear the zone's pages_scanned counter, to hold off the "all pages are
586 * pinned" detection logic.
587 */
588static void free_pcppages_bulk(struct zone *zone, int count,
589					struct per_cpu_pages *pcp)
590{
591	int migratetype = 0;
592	int batch_free = 0;
593	int to_free = count;
594
595	spin_lock(&zone->lock);
596	zone->all_unreclaimable = 0;
597	zone->pages_scanned = 0;
598
599	while (to_free) {
600		struct page *page;
601		struct list_head *list;
602
603		/*
604		 * Remove pages from lists in a round-robin fashion. A
605		 * batch_free count is maintained that is incremented when an
606		 * empty list is encountered.  This is so more pages are freed
607		 * off fuller lists instead of spinning excessively around empty
608		 * lists
609		 */
610		do {
611			batch_free++;
612			if (++migratetype == MIGRATE_PCPTYPES)
613				migratetype = 0;
614			list = &pcp->lists[migratetype];
615		} while (list_empty(list));
616
617		/* This is the only non-empty list. Free them all. */
618		if (batch_free == MIGRATE_PCPTYPES)
619			batch_free = to_free;
620
621		do {
622			page = list_entry(list->prev, struct page, lru);
623			/* must delete as __free_one_page list manipulates */
624			list_del(&page->lru);
625			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
626			__free_one_page(page, zone, 0, page_private(page));
627			trace_mm_page_pcpu_drain(page, 0, page_private(page));
628		} while (--to_free && --batch_free && !list_empty(list));
629	}
630	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
631	spin_unlock(&zone->lock);
632}
633
634static void free_one_page(struct zone *zone, struct page *page, int order,
635				int migratetype)
636{
637	spin_lock(&zone->lock);
638	zone->all_unreclaimable = 0;
639	zone->pages_scanned = 0;
640
641	__free_one_page(page, zone, order, migratetype);
642	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
643	spin_unlock(&zone->lock);
644}
645
646static bool free_pages_prepare(struct page *page, unsigned int order)
647{
648	int i;
649	int bad = 0;
650
651	trace_mm_page_free_direct(page, order);
652	kmemcheck_free_shadow(page, order);
653
654	if (PageAnon(page))
655		page->mapping = NULL;
656	for (i = 0; i < (1 << order); i++)
657		bad += free_pages_check(page + i);
658	if (bad)
659		return false;
660
661	if (!PageHighMem(page)) {
662		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
663		debug_check_no_obj_freed(page_address(page),
664					   PAGE_SIZE << order);
665	}
666	arch_free_page(page, order);
667	kernel_map_pages(page, 1 << order, 0);
668
669	return true;
670}
671
672static void __free_pages_ok(struct page *page, unsigned int order)
673{
674	unsigned long flags;
675	int wasMlocked = __TestClearPageMlocked(page);
676
677	if (!free_pages_prepare(page, order))
678		return;
679
680	local_irq_save(flags);
681	if (unlikely(wasMlocked))
682		free_page_mlock(page);
683	__count_vm_events(PGFREE, 1 << order);
684	free_one_page(page_zone(page), page, order,
685					get_pageblock_migratetype(page));
686	local_irq_restore(flags);
687}
688
689/*
690 * permit the bootmem allocator to evade page validation on high-order frees
691 */
692void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
693{
694	if (order == 0) {
695		__ClearPageReserved(page);
696		set_page_count(page, 0);
697		set_page_refcounted(page);
698		__free_page(page);
699	} else {
700		int loop;
701
702		prefetchw(page);
703		for (loop = 0; loop < BITS_PER_LONG; loop++) {
704			struct page *p = &page[loop];
705
706			if (loop + 1 < BITS_PER_LONG)
707				prefetchw(p + 1);
708			__ClearPageReserved(p);
709			set_page_count(p, 0);
710		}
711
712		set_page_refcounted(page);
713		__free_pages(page, order);
714	}
715}
716
717
718/*
719 * The order of subdivision here is critical for the IO subsystem.
720 * Please do not alter this order without good reasons and regression
721 * testing. Specifically, as large blocks of memory are subdivided,
722 * the order in which smaller blocks are delivered depends on the order
723 * they're subdivided in this function. This is the primary factor
724 * influencing the order in which pages are delivered to the IO
725 * subsystem according to empirical testing, and this is also justified
726 * by considering the behavior of a buddy system containing a single
727 * large block of memory acted on by a series of small allocations.
728 * This behavior is a critical factor in sglist merging's success.
729 *
730 * -- wli
731 */
732static inline void expand(struct zone *zone, struct page *page,
733	int low, int high, struct free_area *area,
734	int migratetype)
735{
736	unsigned long size = 1 << high;
737
738	while (high > low) {
739		area--;
740		high--;
741		size >>= 1;
742		VM_BUG_ON(bad_range(zone, &page[size]));
743		list_add(&page[size].lru, &area->free_list[migratetype]);
744		area->nr_free++;
745		set_page_order(&page[size], high);
746	}
747}
748
749/*
750 * This page is about to be returned from the page allocator
751 */
752static inline int check_new_page(struct page *page)
753{
754	if (unlikely(page_mapcount(page) |
755		(page->mapping != NULL)  |
756		(atomic_read(&page->_count) != 0)  |
757		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
758		bad_page(page);
759		return 1;
760	}
761	return 0;
762}
763
764static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
765{
766	int i;
767
768	for (i = 0; i < (1 << order); i++) {
769		struct page *p = page + i;
770		if (unlikely(check_new_page(p)))
771			return 1;
772	}
773
774	set_page_private(page, 0);
775	set_page_refcounted(page);
776
777	arch_alloc_page(page, order);
778	kernel_map_pages(page, 1 << order, 1);
779
780	if (gfp_flags & __GFP_ZERO)
781		prep_zero_page(page, order, gfp_flags);
782
783	if (order && (gfp_flags & __GFP_COMP))
784		prep_compound_page(page, order);
785
786	return 0;
787}
788
789/*
790 * Go through the free lists for the given migratetype and remove
791 * the smallest available page from the freelists
792 */
793static inline
794struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
795						int migratetype)
796{
797	unsigned int current_order;
798	struct free_area * area;
799	struct page *page;
800
801	/* Find a page of the appropriate size in the preferred list */
802	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
803		area = &(zone->free_area[current_order]);
804		if (list_empty(&area->free_list[migratetype]))
805			continue;
806
807		page = list_entry(area->free_list[migratetype].next,
808							struct page, lru);
809		list_del(&page->lru);
810		rmv_page_order(page);
811		area->nr_free--;
812		expand(zone, page, order, current_order, area, migratetype);
813		return page;
814	}
815
816	return NULL;
817}
818
819
820/*
821 * This array describes the order lists are fallen back to when
822 * the free lists for the desirable migrate type are depleted
823 */
824static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
825	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
826	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
827	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
828	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
829};
830
831/*
832 * Move the free pages in a range to the free lists of the requested type.
833 * Note that start_page and end_pages are not aligned on a pageblock
834 * boundary. If alignment is required, use move_freepages_block()
835 */
836static int move_freepages(struct zone *zone,
837			  struct page *start_page, struct page *end_page,
838			  int migratetype)
839{
840	struct page *page;
841	unsigned long order;
842	int pages_moved = 0;
843
844#ifndef CONFIG_HOLES_IN_ZONE
845	/*
846	 * page_zone is not safe to call in this context when
847	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
848	 * anyway as we check zone boundaries in move_freepages_block().
849	 * Remove at a later date when no bug reports exist related to
850	 * grouping pages by mobility
851	 */
852	BUG_ON(page_zone(start_page) != page_zone(end_page));
853#endif
854
855	for (page = start_page; page <= end_page;) {
856		/* Make sure we are not inadvertently changing nodes */
857		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
858
859		if (!pfn_valid_within(page_to_pfn(page))) {
860			page++;
861			continue;
862		}
863
864		if (!PageBuddy(page)) {
865			page++;
866			continue;
867		}
868
869		order = page_order(page);
870		list_move(&page->lru,
871			  &zone->free_area[order].free_list[migratetype]);
872		page += 1 << order;
873		pages_moved += 1 << order;
874	}
875
876	return pages_moved;
877}
878
879static int move_freepages_block(struct zone *zone, struct page *page,
880				int migratetype)
881{
882	unsigned long start_pfn, end_pfn;
883	struct page *start_page, *end_page;
884
885	start_pfn = page_to_pfn(page);
886	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
887	start_page = pfn_to_page(start_pfn);
888	end_page = start_page + pageblock_nr_pages - 1;
889	end_pfn = start_pfn + pageblock_nr_pages - 1;
890
891	/* Do not cross zone boundaries */
892	if (start_pfn < zone->zone_start_pfn)
893		start_page = page;
894	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
895		return 0;
896
897	return move_freepages(zone, start_page, end_page, migratetype);
898}
899
900static void change_pageblock_range(struct page *pageblock_page,
901					int start_order, int migratetype)
902{
903	int nr_pageblocks = 1 << (start_order - pageblock_order);
904
905	while (nr_pageblocks--) {
906		set_pageblock_migratetype(pageblock_page, migratetype);
907		pageblock_page += pageblock_nr_pages;
908	}
909}
910
911/* Remove an element from the buddy allocator from the fallback list */
912static inline struct page *
913__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
914{
915	struct free_area * area;
916	int current_order;
917	struct page *page;
918	int migratetype, i;
919
920	/* Find the largest possible block of pages in the other list */
921	for (current_order = MAX_ORDER-1; current_order >= order;
922						--current_order) {
923		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
924			migratetype = fallbacks[start_migratetype][i];
925
926			/* MIGRATE_RESERVE handled later if necessary */
927			if (migratetype == MIGRATE_RESERVE)
928				continue;
929
930			area = &(zone->free_area[current_order]);
931			if (list_empty(&area->free_list[migratetype]))
932				continue;
933
934			page = list_entry(area->free_list[migratetype].next,
935					struct page, lru);
936			area->nr_free--;
937
938			/*
939			 * If breaking a large block of pages, move all free
940			 * pages to the preferred allocation list. If falling
941			 * back for a reclaimable kernel allocation, be more
942			 * agressive about taking ownership of free pages
943			 */
944			if (unlikely(current_order >= (pageblock_order >> 1)) ||
945					start_migratetype == MIGRATE_RECLAIMABLE ||
946					page_group_by_mobility_disabled) {
947				unsigned long pages;
948				pages = move_freepages_block(zone, page,
949								start_migratetype);
950
951				/* Claim the whole block if over half of it is free */
952				if (pages >= (1 << (pageblock_order-1)) ||
953						page_group_by_mobility_disabled)
954					set_pageblock_migratetype(page,
955								start_migratetype);
956
957				migratetype = start_migratetype;
958			}
959
960			/* Remove the page from the freelists */
961			list_del(&page->lru);
962			rmv_page_order(page);
963
964			/* Take ownership for orders >= pageblock_order */
965			if (current_order >= pageblock_order)
966				change_pageblock_range(page, current_order,
967							start_migratetype);
968
969			expand(zone, page, order, current_order, area, migratetype);
970
971			trace_mm_page_alloc_extfrag(page, order, current_order,
972				start_migratetype, migratetype);
973
974			return page;
975		}
976	}
977
978	return NULL;
979}
980
981/*
982 * Do the hard work of removing an element from the buddy allocator.
983 * Call me with the zone->lock already held.
984 */
985static struct page *__rmqueue(struct zone *zone, unsigned int order,
986						int migratetype)
987{
988	struct page *page;
989
990retry_reserve:
991	page = __rmqueue_smallest(zone, order, migratetype);
992
993	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
994		page = __rmqueue_fallback(zone, order, migratetype);
995
996		/*
997		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
998		 * is used because __rmqueue_smallest is an inline function
999		 * and we want just one call site
1000		 */
1001		if (!page) {
1002			migratetype = MIGRATE_RESERVE;
1003			goto retry_reserve;
1004		}
1005	}
1006
1007	trace_mm_page_alloc_zone_locked(page, order, migratetype);
1008	return page;
1009}
1010
1011/*
1012 * Obtain a specified number of elements from the buddy allocator, all under
1013 * a single hold of the lock, for efficiency.  Add them to the supplied list.
1014 * Returns the number of new pages which were placed at *list.
1015 */
1016static int rmqueue_bulk(struct zone *zone, unsigned int order,
1017			unsigned long count, struct list_head *list,
1018			int migratetype, int cold)
1019{
1020	int i;
1021
1022	spin_lock(&zone->lock);
1023	for (i = 0; i < count; ++i) {
1024		struct page *page = __rmqueue(zone, order, migratetype);
1025		if (unlikely(page == NULL))
1026			break;
1027
1028		/*
1029		 * Split buddy pages returned by expand() are received here
1030		 * in physical page order. The page is added to the callers and
1031		 * list and the list head then moves forward. From the callers
1032		 * perspective, the linked list is ordered by page number in
1033		 * some conditions. This is useful for IO devices that can
1034		 * merge IO requests if the physical pages are ordered
1035		 * properly.
1036		 */
1037		if (likely(cold == 0))
1038			list_add(&page->lru, list);
1039		else
1040			list_add_tail(&page->lru, list);
1041		set_page_private(page, migratetype);
1042		list = &page->lru;
1043	}
1044	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1045	spin_unlock(&zone->lock);
1046	return i;
1047}
1048
1049#ifdef CONFIG_NUMA
1050/*
1051 * Called from the vmstat counter updater to drain pagesets of this
1052 * currently executing processor on remote nodes after they have
1053 * expired.
1054 *
1055 * Note that this function must be called with the thread pinned to
1056 * a single processor.
1057 */
1058void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1059{
1060	unsigned long flags;
1061	int to_drain;
1062
1063	local_irq_save(flags);
1064	if (pcp->count >= pcp->batch)
1065		to_drain = pcp->batch;
1066	else
1067		to_drain = pcp->count;
1068	free_pcppages_bulk(zone, to_drain, pcp);
1069	pcp->count -= to_drain;
1070	local_irq_restore(flags);
1071}
1072#endif
1073
1074/*
1075 * Drain pages of the indicated processor.
1076 *
1077 * The processor must either be the current processor and the
1078 * thread pinned to the current processor or a processor that
1079 * is not online.
1080 */
1081static void drain_pages(unsigned int cpu)
1082{
1083	unsigned long flags;
1084	struct zone *zone;
1085
1086	for_each_populated_zone(zone) {
1087		struct per_cpu_pageset *pset;
1088		struct per_cpu_pages *pcp;
1089
1090		local_irq_save(flags);
1091		pset = per_cpu_ptr(zone->pageset, cpu);
1092
1093		pcp = &pset->pcp;
1094		if (pcp->count) {
1095			free_pcppages_bulk(zone, pcp->count, pcp);
1096			pcp->count = 0;
1097		}
1098		local_irq_restore(flags);
1099	}
1100}
1101
1102/*
1103 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1104 */
1105void drain_local_pages(void *arg)
1106{
1107	drain_pages(smp_processor_id());
1108}
1109
1110/*
1111 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1112 */
1113void drain_all_pages(void)
1114{
1115	on_each_cpu(drain_local_pages, NULL, 1);
1116}
1117
1118#ifdef CONFIG_HIBERNATION
1119
1120void mark_free_pages(struct zone *zone)
1121{
1122	unsigned long pfn, max_zone_pfn;
1123	unsigned long flags;
1124	int order, t;
1125	struct list_head *curr;
1126
1127	if (!zone->spanned_pages)
1128		return;
1129
1130	spin_lock_irqsave(&zone->lock, flags);
1131
1132	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1133	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1134		if (pfn_valid(pfn)) {
1135			struct page *page = pfn_to_page(pfn);
1136
1137			if (!swsusp_page_is_forbidden(page))
1138				swsusp_unset_page_free(page);
1139		}
1140
1141	for_each_migratetype_order(order, t) {
1142		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1143			unsigned long i;
1144
1145			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1146			for (i = 0; i < (1UL << order); i++)
1147				swsusp_set_page_free(pfn_to_page(pfn + i));
1148		}
1149	}
1150	spin_unlock_irqrestore(&zone->lock, flags);
1151}
1152#endif /* CONFIG_PM */
1153
1154/*
1155 * Free a 0-order page
1156 * cold == 1 ? free a cold page : free a hot page
1157 */
1158void free_hot_cold_page(struct page *page, int cold)
1159{
1160	struct zone *zone = page_zone(page);
1161	struct per_cpu_pages *pcp;
1162	unsigned long flags;
1163	int migratetype;
1164	int wasMlocked = __TestClearPageMlocked(page);
1165
1166	if (!free_pages_prepare(page, 0))
1167		return;
1168
1169	migratetype = get_pageblock_migratetype(page);
1170	set_page_private(page, migratetype);
1171	local_irq_save(flags);
1172	if (unlikely(wasMlocked))
1173		free_page_mlock(page);
1174	__count_vm_event(PGFREE);
1175
1176	/*
1177	 * We only track unmovable, reclaimable and movable on pcp lists.
1178	 * Free ISOLATE pages back to the allocator because they are being
1179	 * offlined but treat RESERVE as movable pages so we can get those
1180	 * areas back if necessary. Otherwise, we may have to free
1181	 * excessively into the page allocator
1182	 */
1183	if (migratetype >= MIGRATE_PCPTYPES) {
1184		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1185			free_one_page(zone, page, 0, migratetype);
1186			goto out;
1187		}
1188		migratetype = MIGRATE_MOVABLE;
1189	}
1190
1191	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1192	if (cold)
1193		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1194	else
1195		list_add(&page->lru, &pcp->lists[migratetype]);
1196	pcp->count++;
1197	if (pcp->count >= pcp->high) {
1198		free_pcppages_bulk(zone, pcp->batch, pcp);
1199		pcp->count -= pcp->batch;
1200	}
1201
1202out:
1203	local_irq_restore(flags);
1204}
1205
1206/*
1207 * split_page takes a non-compound higher-order page, and splits it into
1208 * n (1<<order) sub-pages: page[0..n]
1209 * Each sub-page must be freed individually.
1210 *
1211 * Note: this is probably too low level an operation for use in drivers.
1212 * Please consult with lkml before using this in your driver.
1213 */
1214void split_page(struct page *page, unsigned int order)
1215{
1216	int i;
1217
1218	VM_BUG_ON(PageCompound(page));
1219	VM_BUG_ON(!page_count(page));
1220
1221#ifdef CONFIG_KMEMCHECK
1222	/*
1223	 * Split shadow pages too, because free(page[0]) would
1224	 * otherwise free the whole shadow.
1225	 */
1226	if (kmemcheck_page_is_tracked(page))
1227		split_page(virt_to_page(page[0].shadow), order);
1228#endif
1229
1230	for (i = 1; i < (1 << order); i++)
1231		set_page_refcounted(page + i);
1232}
1233
1234/*
1235 * Similar to split_page except the page is already free. As this is only
1236 * being used for migration, the migratetype of the block also changes.
1237 * As this is called with interrupts disabled, the caller is responsible
1238 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1239 * are enabled.
1240 *
1241 * Note: this is probably too low level an operation for use in drivers.
1242 * Please consult with lkml before using this in your driver.
1243 */
1244int split_free_page(struct page *page)
1245{
1246	unsigned int order;
1247	unsigned long watermark;
1248	struct zone *zone;
1249
1250	BUG_ON(!PageBuddy(page));
1251
1252	zone = page_zone(page);
1253	order = page_order(page);
1254
1255	/* Obey watermarks as if the page was being allocated */
1256	watermark = low_wmark_pages(zone) + (1 << order);
1257	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1258		return 0;
1259
1260	/* Remove page from free list */
1261	list_del(&page->lru);
1262	zone->free_area[order].nr_free--;
1263	rmv_page_order(page);
1264	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1265
1266	/* Split into individual pages */
1267	set_page_refcounted(page);
1268	split_page(page, order);
1269
1270	if (order >= pageblock_order - 1) {
1271		struct page *endpage = page + (1 << order) - 1;
1272		for (; page < endpage; page += pageblock_nr_pages)
1273			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1274	}
1275
1276	return 1 << order;
1277}
1278
1279/*
1280 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1281 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1282 * or two.
1283 */
1284static inline
1285struct page *buffered_rmqueue(struct zone *preferred_zone,
1286			struct zone *zone, int order, gfp_t gfp_flags,
1287			int migratetype)
1288{
1289	unsigned long flags;
1290	struct page *page;
1291	int cold = !!(gfp_flags & __GFP_COLD);
1292
1293again:
1294	if (likely(order == 0)) {
1295		struct per_cpu_pages *pcp;
1296		struct list_head *list;
1297
1298		local_irq_save(flags);
1299		pcp = &this_cpu_ptr(zone->pageset)->pcp;
1300		list = &pcp->lists[migratetype];
1301		if (list_empty(list)) {
1302			pcp->count += rmqueue_bulk(zone, 0,
1303					pcp->batch, list,
1304					migratetype, cold);
1305			if (unlikely(list_empty(list)))
1306				goto failed;
1307		}
1308
1309		if (cold)
1310			page = list_entry(list->prev, struct page, lru);
1311		else
1312			page = list_entry(list->next, struct page, lru);
1313
1314		list_del(&page->lru);
1315		pcp->count--;
1316	} else {
1317		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1318			/*
1319			 * __GFP_NOFAIL is not to be used in new code.
1320			 *
1321			 * All __GFP_NOFAIL callers should be fixed so that they
1322			 * properly detect and handle allocation failures.
1323			 *
1324			 * We most definitely don't want callers attempting to
1325			 * allocate greater than order-1 page units with
1326			 * __GFP_NOFAIL.
1327			 */
1328			WARN_ON_ONCE(order > 1);
1329		}
1330		spin_lock_irqsave(&zone->lock, flags);
1331		page = __rmqueue(zone, order, migratetype);
1332		spin_unlock(&zone->lock);
1333		if (!page)
1334			goto failed;
1335		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1336	}
1337
1338	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1339	zone_statistics(preferred_zone, zone, gfp_flags);
1340	local_irq_restore(flags);
1341
1342	VM_BUG_ON(bad_range(zone, page));
1343	if (prep_new_page(page, order, gfp_flags))
1344		goto again;
1345	return page;
1346
1347failed:
1348	local_irq_restore(flags);
1349	return NULL;
1350}
1351
1352/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1353#define ALLOC_WMARK_MIN		WMARK_MIN
1354#define ALLOC_WMARK_LOW		WMARK_LOW
1355#define ALLOC_WMARK_HIGH	WMARK_HIGH
1356#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1357
1358/* Mask to get the watermark bits */
1359#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1360
1361#define ALLOC_HARDER		0x10 /* try to alloc harder */
1362#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1363#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1364
1365#ifdef CONFIG_FAIL_PAGE_ALLOC
1366
1367static struct fail_page_alloc_attr {
1368	struct fault_attr attr;
1369
1370	u32 ignore_gfp_highmem;
1371	u32 ignore_gfp_wait;
1372	u32 min_order;
1373
1374#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1375
1376	struct dentry *ignore_gfp_highmem_file;
1377	struct dentry *ignore_gfp_wait_file;
1378	struct dentry *min_order_file;
1379
1380#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1381
1382} fail_page_alloc = {
1383	.attr = FAULT_ATTR_INITIALIZER,
1384	.ignore_gfp_wait = 1,
1385	.ignore_gfp_highmem = 1,
1386	.min_order = 1,
1387};
1388
1389static int __init setup_fail_page_alloc(char *str)
1390{
1391	return setup_fault_attr(&fail_page_alloc.attr, str);
1392}
1393__setup("fail_page_alloc=", setup_fail_page_alloc);
1394
1395static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1396{
1397	if (order < fail_page_alloc.min_order)
1398		return 0;
1399	if (gfp_mask & __GFP_NOFAIL)
1400		return 0;
1401	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1402		return 0;
1403	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1404		return 0;
1405
1406	return should_fail(&fail_page_alloc.attr, 1 << order);
1407}
1408
1409#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1410
1411static int __init fail_page_alloc_debugfs(void)
1412{
1413	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1414	struct dentry *dir;
1415	int err;
1416
1417	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1418				       "fail_page_alloc");
1419	if (err)
1420		return err;
1421	dir = fail_page_alloc.attr.dentries.dir;
1422
1423	fail_page_alloc.ignore_gfp_wait_file =
1424		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1425				      &fail_page_alloc.ignore_gfp_wait);
1426
1427	fail_page_alloc.ignore_gfp_highmem_file =
1428		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1429				      &fail_page_alloc.ignore_gfp_highmem);
1430	fail_page_alloc.min_order_file =
1431		debugfs_create_u32("min-order", mode, dir,
1432				   &fail_page_alloc.min_order);
1433
1434	if (!fail_page_alloc.ignore_gfp_wait_file ||
1435            !fail_page_alloc.ignore_gfp_highmem_file ||
1436            !fail_page_alloc.min_order_file) {
1437		err = -ENOMEM;
1438		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1439		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1440		debugfs_remove(fail_page_alloc.min_order_file);
1441		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1442	}
1443
1444	return err;
1445}
1446
1447late_initcall(fail_page_alloc_debugfs);
1448
1449#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1450
1451#else /* CONFIG_FAIL_PAGE_ALLOC */
1452
1453static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1454{
1455	return 0;
1456}
1457
1458#endif /* CONFIG_FAIL_PAGE_ALLOC */
1459
1460/*
1461 * Return true if free pages are above 'mark'. This takes into account the order
1462 * of the allocation.
1463 */
1464static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1465		      int classzone_idx, int alloc_flags, long free_pages)
1466{
1467	/* free_pages my go negative - that's OK */
1468	long min = mark;
1469	int o;
1470
1471	free_pages -= (1 << order) + 1;
1472	if (alloc_flags & ALLOC_HIGH)
1473		min -= min / 2;
1474	if (alloc_flags & ALLOC_HARDER)
1475		min -= min / 4;
1476
1477	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1478		return false;
1479	for (o = 0; o < order; o++) {
1480		/* At the next order, this order's pages become unavailable */
1481		free_pages -= z->free_area[o].nr_free << o;
1482
1483		/* Require fewer higher order pages to be free */
1484		min >>= 1;
1485
1486		if (free_pages <= min)
1487			return false;
1488	}
1489	return true;
1490}
1491
1492bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1493		      int classzone_idx, int alloc_flags)
1494{
1495	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1496					zone_page_state(z, NR_FREE_PAGES));
1497}
1498
1499bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1500		      int classzone_idx, int alloc_flags)
1501{
1502	long free_pages = zone_page_state(z, NR_FREE_PAGES);
1503
1504	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1505		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1506
1507	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1508								free_pages);
1509}
1510
1511#ifdef CONFIG_NUMA
1512/*
1513 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1514 * skip over zones that are not allowed by the cpuset, or that have
1515 * been recently (in last second) found to be nearly full.  See further
1516 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1517 * that have to skip over a lot of full or unallowed zones.
1518 *
1519 * If the zonelist cache is present in the passed in zonelist, then
1520 * returns a pointer to the allowed node mask (either the current
1521 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1522 *
1523 * If the zonelist cache is not available for this zonelist, does
1524 * nothing and returns NULL.
1525 *
1526 * If the fullzones BITMAP in the zonelist cache is stale (more than
1527 * a second since last zap'd) then we zap it out (clear its bits.)
1528 *
1529 * We hold off even calling zlc_setup, until after we've checked the
1530 * first zone in the zonelist, on the theory that most allocations will
1531 * be satisfied from that first zone, so best to examine that zone as
1532 * quickly as we can.
1533 */
1534static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1535{
1536	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1537	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1538
1539	zlc = zonelist->zlcache_ptr;
1540	if (!zlc)
1541		return NULL;
1542
1543	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1544		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1545		zlc->last_full_zap = jiffies;
1546	}
1547
1548	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1549					&cpuset_current_mems_allowed :
1550					&node_states[N_HIGH_MEMORY];
1551	return allowednodes;
1552}
1553
1554/*
1555 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1556 * if it is worth looking at further for free memory:
1557 *  1) Check that the zone isn't thought to be full (doesn't have its
1558 *     bit set in the zonelist_cache fullzones BITMAP).
1559 *  2) Check that the zones node (obtained from the zonelist_cache
1560 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1561 * Return true (non-zero) if zone is worth looking at further, or
1562 * else return false (zero) if it is not.
1563 *
1564 * This check -ignores- the distinction between various watermarks,
1565 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1566 * found to be full for any variation of these watermarks, it will
1567 * be considered full for up to one second by all requests, unless
1568 * we are so low on memory on all allowed nodes that we are forced
1569 * into the second scan of the zonelist.
1570 *
1571 * In the second scan we ignore this zonelist cache and exactly
1572 * apply the watermarks to all zones, even it is slower to do so.
1573 * We are low on memory in the second scan, and should leave no stone
1574 * unturned looking for a free page.
1575 */
1576static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1577						nodemask_t *allowednodes)
1578{
1579	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1580	int i;				/* index of *z in zonelist zones */
1581	int n;				/* node that zone *z is on */
1582
1583	zlc = zonelist->zlcache_ptr;
1584	if (!zlc)
1585		return 1;
1586
1587	i = z - zonelist->_zonerefs;
1588	n = zlc->z_to_n[i];
1589
1590	/* This zone is worth trying if it is allowed but not full */
1591	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1592}
1593
1594/*
1595 * Given 'z' scanning a zonelist, set the corresponding bit in
1596 * zlc->fullzones, so that subsequent attempts to allocate a page
1597 * from that zone don't waste time re-examining it.
1598 */
1599static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1600{
1601	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1602	int i;				/* index of *z in zonelist zones */
1603
1604	zlc = zonelist->zlcache_ptr;
1605	if (!zlc)
1606		return;
1607
1608	i = z - zonelist->_zonerefs;
1609
1610	set_bit(i, zlc->fullzones);
1611}
1612
1613#else	/* CONFIG_NUMA */
1614
1615static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1616{
1617	return NULL;
1618}
1619
1620static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1621				nodemask_t *allowednodes)
1622{
1623	return 1;
1624}
1625
1626static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1627{
1628}
1629#endif	/* CONFIG_NUMA */
1630
1631/*
1632 * get_page_from_freelist goes through the zonelist trying to allocate
1633 * a page.
1634 */
1635static struct page *
1636get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1637		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1638		struct zone *preferred_zone, int migratetype)
1639{
1640	struct zoneref *z;
1641	struct page *page = NULL;
1642	int classzone_idx;
1643	struct zone *zone;
1644	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1645	int zlc_active = 0;		/* set if using zonelist_cache */
1646	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1647
1648	classzone_idx = zone_idx(preferred_zone);
1649zonelist_scan:
1650	/*
1651	 * Scan zonelist, looking for a zone with enough free.
1652	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1653	 */
1654	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1655						high_zoneidx, nodemask) {
1656		if (NUMA_BUILD && zlc_active &&
1657			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1658				continue;
1659		if ((alloc_flags & ALLOC_CPUSET) &&
1660			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1661				goto try_next_zone;
1662
1663		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1664		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1665			unsigned long mark;
1666			int ret;
1667
1668			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1669			if (zone_watermark_ok(zone, order, mark,
1670				    classzone_idx, alloc_flags))
1671				goto try_this_zone;
1672
1673			if (zone_reclaim_mode == 0)
1674				goto this_zone_full;
1675
1676			ret = zone_reclaim(zone, gfp_mask, order);
1677			switch (ret) {
1678			case ZONE_RECLAIM_NOSCAN:
1679				/* did not scan */
1680				goto try_next_zone;
1681			case ZONE_RECLAIM_FULL:
1682				/* scanned but unreclaimable */
1683				goto this_zone_full;
1684			default:
1685				/* did we reclaim enough */
1686				if (!zone_watermark_ok(zone, order, mark,
1687						classzone_idx, alloc_flags))
1688					goto this_zone_full;
1689			}
1690		}
1691
1692try_this_zone:
1693		page = buffered_rmqueue(preferred_zone, zone, order,
1694						gfp_mask, migratetype);
1695		if (page)
1696			break;
1697this_zone_full:
1698		if (NUMA_BUILD)
1699			zlc_mark_zone_full(zonelist, z);
1700try_next_zone:
1701		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1702			/*
1703			 * we do zlc_setup after the first zone is tried but only
1704			 * if there are multiple nodes make it worthwhile
1705			 */
1706			allowednodes = zlc_setup(zonelist, alloc_flags);
1707			zlc_active = 1;
1708			did_zlc_setup = 1;
1709		}
1710	}
1711
1712	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1713		/* Disable zlc cache for second zonelist scan */
1714		zlc_active = 0;
1715		goto zonelist_scan;
1716	}
1717	return page;
1718}
1719
1720/*
1721 * Large machines with many possible nodes should not always dump per-node
1722 * meminfo in irq context.
1723 */
1724static inline bool should_suppress_show_mem(void)
1725{
1726	bool ret = false;
1727
1728#if NODES_SHIFT > 8
1729	ret = in_interrupt();
1730#endif
1731	return ret;
1732}
1733
1734static inline int
1735should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1736				unsigned long pages_reclaimed)
1737{
1738	/* Do not loop if specifically requested */
1739	if (gfp_mask & __GFP_NORETRY)
1740		return 0;
1741
1742	/*
1743	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1744	 * means __GFP_NOFAIL, but that may not be true in other
1745	 * implementations.
1746	 */
1747	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1748		return 1;
1749
1750	/*
1751	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1752	 * specified, then we retry until we no longer reclaim any pages
1753	 * (above), or we've reclaimed an order of pages at least as
1754	 * large as the allocation's order. In both cases, if the
1755	 * allocation still fails, we stop retrying.
1756	 */
1757	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1758		return 1;
1759
1760	/*
1761	 * Don't let big-order allocations loop unless the caller
1762	 * explicitly requests that.
1763	 */
1764	if (gfp_mask & __GFP_NOFAIL)
1765		return 1;
1766
1767	return 0;
1768}
1769
1770static inline struct page *
1771__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1772	struct zonelist *zonelist, enum zone_type high_zoneidx,
1773	nodemask_t *nodemask, struct zone *preferred_zone,
1774	int migratetype)
1775{
1776	struct page *page;
1777
1778	/* Acquire the OOM killer lock for the zones in zonelist */
1779	if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1780		schedule_timeout_uninterruptible(1);
1781		return NULL;
1782	}
1783
1784	/*
1785	 * Go through the zonelist yet one more time, keep very high watermark
1786	 * here, this is only to catch a parallel oom killing, we must fail if
1787	 * we're still under heavy pressure.
1788	 */
1789	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1790		order, zonelist, high_zoneidx,
1791		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1792		preferred_zone, migratetype);
1793	if (page)
1794		goto out;
1795
1796	if (!(gfp_mask & __GFP_NOFAIL)) {
1797		/* The OOM killer will not help higher order allocs */
1798		if (order > PAGE_ALLOC_COSTLY_ORDER)
1799			goto out;
1800		/* The OOM killer does not needlessly kill tasks for lowmem */
1801		if (high_zoneidx < ZONE_NORMAL)
1802			goto out;
1803		/*
1804		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1805		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1806		 * The caller should handle page allocation failure by itself if
1807		 * it specifies __GFP_THISNODE.
1808		 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1809		 */
1810		if (gfp_mask & __GFP_THISNODE)
1811			goto out;
1812	}
1813	/* Exhausted what can be done so it's blamo time */
1814	out_of_memory(zonelist, gfp_mask, order, nodemask);
1815
1816out:
1817	clear_zonelist_oom(zonelist, gfp_mask);
1818	return page;
1819}
1820
1821#ifdef CONFIG_COMPACTION
1822/* Try memory compaction for high-order allocations before reclaim */
1823static struct page *
1824__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1825	struct zonelist *zonelist, enum zone_type high_zoneidx,
1826	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1827	int migratetype, unsigned long *did_some_progress,
1828	bool sync_migration)
1829{
1830	struct page *page;
1831
1832	if (!order || compaction_deferred(preferred_zone))
1833		return NULL;
1834
1835	current->flags |= PF_MEMALLOC;
1836	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1837						nodemask, sync_migration);
1838	current->flags &= ~PF_MEMALLOC;
1839	if (*did_some_progress != COMPACT_SKIPPED) {
1840
1841		/* Page migration frees to the PCP lists but we want merging */
1842		drain_pages(get_cpu());
1843		put_cpu();
1844
1845		page = get_page_from_freelist(gfp_mask, nodemask,
1846				order, zonelist, high_zoneidx,
1847				alloc_flags, preferred_zone,
1848				migratetype);
1849		if (page) {
1850			preferred_zone->compact_considered = 0;
1851			preferred_zone->compact_defer_shift = 0;
1852			count_vm_event(COMPACTSUCCESS);
1853			return page;
1854		}
1855
1856		/*
1857		 * It's bad if compaction run occurs and fails.
1858		 * The most likely reason is that pages exist,
1859		 * but not enough to satisfy watermarks.
1860		 */
1861		count_vm_event(COMPACTFAIL);
1862		defer_compaction(preferred_zone);
1863
1864		cond_resched();
1865	}
1866
1867	return NULL;
1868}
1869#else
1870static inline struct page *
1871__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1872	struct zonelist *zonelist, enum zone_type high_zoneidx,
1873	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1874	int migratetype, unsigned long *did_some_progress,
1875	bool sync_migration)
1876{
1877	return NULL;
1878}
1879#endif /* CONFIG_COMPACTION */
1880
1881/* The really slow allocator path where we enter direct reclaim */
1882static inline struct page *
1883__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1884	struct zonelist *zonelist, enum zone_type high_zoneidx,
1885	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1886	int migratetype, unsigned long *did_some_progress)
1887{
1888	struct page *page = NULL;
1889	struct reclaim_state reclaim_state;
1890	bool drained = false;
1891
1892	cond_resched();
1893
1894	/* We now go into synchronous reclaim */
1895	cpuset_memory_pressure_bump();
1896	current->flags |= PF_MEMALLOC;
1897	lockdep_set_current_reclaim_state(gfp_mask);
1898	reclaim_state.reclaimed_slab = 0;
1899	current->reclaim_state = &reclaim_state;
1900
1901	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1902
1903	current->reclaim_state = NULL;
1904	lockdep_clear_current_reclaim_state();
1905	current->flags &= ~PF_MEMALLOC;
1906
1907	cond_resched();
1908
1909	if (unlikely(!(*did_some_progress)))
1910		return NULL;
1911
1912retry:
1913	page = get_page_from_freelist(gfp_mask, nodemask, order,
1914					zonelist, high_zoneidx,
1915					alloc_flags, preferred_zone,
1916					migratetype);
1917
1918	/*
1919	 * If an allocation failed after direct reclaim, it could be because
1920	 * pages are pinned on the per-cpu lists. Drain them and try again
1921	 */
1922	if (!page && !drained) {
1923		drain_all_pages();
1924		drained = true;
1925		goto retry;
1926	}
1927
1928	return page;
1929}
1930
1931/*
1932 * This is called in the allocator slow-path if the allocation request is of
1933 * sufficient urgency to ignore watermarks and take other desperate measures
1934 */
1935static inline struct page *
1936__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1937	struct zonelist *zonelist, enum zone_type high_zoneidx,
1938	nodemask_t *nodemask, struct zone *preferred_zone,
1939	int migratetype)
1940{
1941	struct page *page;
1942
1943	do {
1944		page = get_page_from_freelist(gfp_mask, nodemask, order,
1945			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1946			preferred_zone, migratetype);
1947
1948		if (!page && gfp_mask & __GFP_NOFAIL)
1949			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
1950	} while (!page && (gfp_mask & __GFP_NOFAIL));
1951
1952	return page;
1953}
1954
1955static inline
1956void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1957						enum zone_type high_zoneidx,
1958						enum zone_type classzone_idx)
1959{
1960	struct zoneref *z;
1961	struct zone *zone;
1962
1963	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1964		wakeup_kswapd(zone, order, classzone_idx);
1965}
1966
1967static inline int
1968gfp_to_alloc_flags(gfp_t gfp_mask)
1969{
1970	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1971	const gfp_t wait = gfp_mask & __GFP_WAIT;
1972
1973	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1974	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
1975
1976	/*
1977	 * The caller may dip into page reserves a bit more if the caller
1978	 * cannot run direct reclaim, or if the caller has realtime scheduling
1979	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1980	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1981	 */
1982	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1983
1984	if (!wait) {
1985		/*
1986		 * Not worth trying to allocate harder for
1987		 * __GFP_NOMEMALLOC even if it can't schedule.
1988		 */
1989		if  (!(gfp_mask & __GFP_NOMEMALLOC))
1990			alloc_flags |= ALLOC_HARDER;
1991		/*
1992		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1993		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1994		 */
1995		alloc_flags &= ~ALLOC_CPUSET;
1996	} else if (unlikely(rt_task(current)) && !in_interrupt())
1997		alloc_flags |= ALLOC_HARDER;
1998
1999	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2000		if (!in_interrupt() &&
2001		    ((current->flags & PF_MEMALLOC) ||
2002		     unlikely(test_thread_flag(TIF_MEMDIE))))
2003			alloc_flags |= ALLOC_NO_WATERMARKS;
2004	}
2005
2006	return alloc_flags;
2007}
2008
2009static inline struct page *
2010__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2011	struct zonelist *zonelist, enum zone_type high_zoneidx,
2012	nodemask_t *nodemask, struct zone *preferred_zone,
2013	int migratetype)
2014{
2015	const gfp_t wait = gfp_mask & __GFP_WAIT;
2016	struct page *page = NULL;
2017	int alloc_flags;
2018	unsigned long pages_reclaimed = 0;
2019	unsigned long did_some_progress;
2020	bool sync_migration = false;
2021
2022	/*
2023	 * In the slowpath, we sanity check order to avoid ever trying to
2024	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2025	 * be using allocators in order of preference for an area that is
2026	 * too large.
2027	 */
2028	if (order >= MAX_ORDER) {
2029		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2030		return NULL;
2031	}
2032
2033	/*
2034	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2035	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2036	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2037	 * using a larger set of nodes after it has established that the
2038	 * allowed per node queues are empty and that nodes are
2039	 * over allocated.
2040	 */
2041	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2042		goto nopage;
2043
2044restart:
2045	if (!(gfp_mask & __GFP_NO_KSWAPD))
2046		wake_all_kswapd(order, zonelist, high_zoneidx,
2047						zone_idx(preferred_zone));
2048
2049	/*
2050	 * OK, we're below the kswapd watermark and have kicked background
2051	 * reclaim. Now things get more complex, so set up alloc_flags according
2052	 * to how we want to proceed.
2053	 */
2054	alloc_flags = gfp_to_alloc_flags(gfp_mask);
2055
2056	/*
2057	 * Find the true preferred zone if the allocation is unconstrained by
2058	 * cpusets.
2059	 */
2060	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2061		first_zones_zonelist(zonelist, high_zoneidx, NULL,
2062					&preferred_zone);
2063
2064	/* This is the last chance, in general, before the goto nopage. */
2065	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2066			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2067			preferred_zone, migratetype);
2068	if (page)
2069		goto got_pg;
2070
2071rebalance:
2072	/* Allocate without watermarks if the context allows */
2073	if (alloc_flags & ALLOC_NO_WATERMARKS) {
2074		page = __alloc_pages_high_priority(gfp_mask, order,
2075				zonelist, high_zoneidx, nodemask,
2076				preferred_zone, migratetype);
2077		if (page)
2078			goto got_pg;
2079	}
2080
2081	/* Atomic allocations - we can't balance anything */
2082	if (!wait)
2083		goto nopage;
2084
2085	/* Avoid recursion of direct reclaim */
2086	if (current->flags & PF_MEMALLOC)
2087		goto nopage;
2088
2089	/* Avoid allocations with no watermarks from looping endlessly */
2090	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2091		goto nopage;
2092
2093	/*
2094	 * Try direct compaction. The first pass is asynchronous. Subsequent
2095	 * attempts after direct reclaim are synchronous
2096	 */
2097	page = __alloc_pages_direct_compact(gfp_mask, order,
2098					zonelist, high_zoneidx,
2099					nodemask,
2100					alloc_flags, preferred_zone,
2101					migratetype, &did_some_progress,
2102					sync_migration);
2103	if (page)
2104		goto got_pg;
2105	sync_migration = !(gfp_mask & __GFP_NO_KSWAPD);
2106
2107	/* Try direct reclaim and then allocating */
2108	page = __alloc_pages_direct_reclaim(gfp_mask, order,
2109					zonelist, high_zoneidx,
2110					nodemask,
2111					alloc_flags, preferred_zone,
2112					migratetype, &did_some_progress);
2113	if (page)
2114		goto got_pg;
2115
2116	/*
2117	 * If we failed to make any progress reclaiming, then we are
2118	 * running out of options and have to consider going OOM
2119	 */
2120	if (!did_some_progress) {
2121		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2122			if (oom_killer_disabled)
2123				goto nopage;
2124			page = __alloc_pages_may_oom(gfp_mask, order,
2125					zonelist, high_zoneidx,
2126					nodemask, preferred_zone,
2127					migratetype);
2128			if (page)
2129				goto got_pg;
2130
2131			if (!(gfp_mask & __GFP_NOFAIL)) {
2132				/*
2133				 * The oom killer is not called for high-order
2134				 * allocations that may fail, so if no progress
2135				 * is being made, there are no other options and
2136				 * retrying is unlikely to help.
2137				 */
2138				if (order > PAGE_ALLOC_COSTLY_ORDER)
2139					goto nopage;
2140				/*
2141				 * The oom killer is not called for lowmem
2142				 * allocations to prevent needlessly killing
2143				 * innocent tasks.
2144				 */
2145				if (high_zoneidx < ZONE_NORMAL)
2146					goto nopage;
2147			}
2148
2149			goto restart;
2150		}
2151	}
2152
2153	/* Check if we should retry the allocation */
2154	pages_reclaimed += did_some_progress;
2155	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2156		/* Wait for some write requests to complete then retry */
2157		wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2158		goto rebalance;
2159	} else {
2160		/*
2161		 * High-order allocations do not necessarily loop after
2162		 * direct reclaim and reclaim/compaction depends on compaction
2163		 * being called after reclaim so call directly if necessary
2164		 */
2165		page = __alloc_pages_direct_compact(gfp_mask, order,
2166					zonelist, high_zoneidx,
2167					nodemask,
2168					alloc_flags, preferred_zone,
2169					migratetype, &did_some_progress,
2170					sync_migration);
2171		if (page)
2172			goto got_pg;
2173	}
2174
2175nopage:
2176	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2177		unsigned int filter = SHOW_MEM_FILTER_NODES;
2178
2179		/*
2180		 * This documents exceptions given to allocations in certain
2181		 * contexts that are allowed to allocate outside current's set
2182		 * of allowed nodes.
2183		 */
2184		if (!(gfp_mask & __GFP_NOMEMALLOC))
2185			if (test_thread_flag(TIF_MEMDIE) ||
2186			    (current->flags & (PF_MEMALLOC | PF_EXITING)))
2187				filter &= ~SHOW_MEM_FILTER_NODES;
2188		if (in_interrupt() || !wait)
2189			filter &= ~SHOW_MEM_FILTER_NODES;
2190
2191		pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n",
2192			current->comm, order, gfp_mask);
2193		dump_stack();
2194		if (!should_suppress_show_mem())
2195			__show_mem(filter);
2196	}
2197	return page;
2198got_pg:
2199	if (kmemcheck_enabled)
2200		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2201	return page;
2202
2203}
2204
2205/*
2206 * This is the 'heart' of the zoned buddy allocator.
2207 */
2208struct page *
2209__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2210			struct zonelist *zonelist, nodemask_t *nodemask)
2211{
2212	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2213	struct zone *preferred_zone;
2214	struct page *page;
2215	int migratetype = allocflags_to_migratetype(gfp_mask);
2216
2217	gfp_mask &= gfp_allowed_mask;
2218
2219	lockdep_trace_alloc(gfp_mask);
2220
2221	might_sleep_if(gfp_mask & __GFP_WAIT);
2222
2223	if (should_fail_alloc_page(gfp_mask, order))
2224		return NULL;
2225
2226	/*
2227	 * Check the zones suitable for the gfp_mask contain at least one
2228	 * valid zone. It's possible to have an empty zonelist as a result
2229	 * of GFP_THISNODE and a memoryless node
2230	 */
2231	if (unlikely(!zonelist->_zonerefs->zone))
2232		return NULL;
2233
2234	get_mems_allowed();
2235	/* The preferred zone is used for statistics later */
2236	first_zones_zonelist(zonelist, high_zoneidx,
2237				nodemask ? : &cpuset_current_mems_allowed,
2238				&preferred_zone);
2239	if (!preferred_zone) {
2240		put_mems_allowed();
2241		return NULL;
2242	}
2243
2244	/* First allocation attempt */
2245	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2246			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2247			preferred_zone, migratetype);
2248	if (unlikely(!page))
2249		page = __alloc_pages_slowpath(gfp_mask, order,
2250				zonelist, high_zoneidx, nodemask,
2251				preferred_zone, migratetype);
2252	put_mems_allowed();
2253
2254	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2255	return page;
2256}
2257EXPORT_SYMBOL(__alloc_pages_nodemask);
2258
2259/*
2260 * Common helper functions.
2261 */
2262unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2263{
2264	struct page *page;
2265
2266	/*
2267	 * __get_free_pages() returns a 32-bit address, which cannot represent
2268	 * a highmem page
2269	 */
2270	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2271
2272	page = alloc_pages(gfp_mask, order);
2273	if (!page)
2274		return 0;
2275	return (unsigned long) page_address(page);
2276}
2277EXPORT_SYMBOL(__get_free_pages);
2278
2279unsigned long get_zeroed_page(gfp_t gfp_mask)
2280{
2281	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2282}
2283EXPORT_SYMBOL(get_zeroed_page);
2284
2285void __pagevec_free(struct pagevec *pvec)
2286{
2287	int i = pagevec_count(pvec);
2288
2289	while (--i >= 0) {
2290		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2291		free_hot_cold_page(pvec->pages[i], pvec->cold);
2292	}
2293}
2294
2295void __free_pages(struct page *page, unsigned int order)
2296{
2297	if (put_page_testzero(page)) {
2298		if (order == 0)
2299			free_hot_cold_page(page, 0);
2300		else
2301			__free_pages_ok(page, order);
2302	}
2303}
2304
2305EXPORT_SYMBOL(__free_pages);
2306
2307void free_pages(unsigned long addr, unsigned int order)
2308{
2309	if (addr != 0) {
2310		VM_BUG_ON(!virt_addr_valid((void *)addr));
2311		__free_pages(virt_to_page((void *)addr), order);
2312	}
2313}
2314
2315EXPORT_SYMBOL(free_pages);
2316
2317/**
2318 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2319 * @size: the number of bytes to allocate
2320 * @gfp_mask: GFP flags for the allocation
2321 *
2322 * This function is similar to alloc_pages(), except that it allocates the
2323 * minimum number of pages to satisfy the request.  alloc_pages() can only
2324 * allocate memory in power-of-two pages.
2325 *
2326 * This function is also limited by MAX_ORDER.
2327 *
2328 * Memory allocated by this function must be released by free_pages_exact().
2329 */
2330void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2331{
2332	unsigned int order = get_order(size);
2333	unsigned long addr;
2334
2335	addr = __get_free_pages(gfp_mask, order);
2336	if (addr) {
2337		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2338		unsigned long used = addr + PAGE_ALIGN(size);
2339
2340		split_page(virt_to_page((void *)addr), order);
2341		while (used < alloc_end) {
2342			free_page(used);
2343			used += PAGE_SIZE;
2344		}
2345	}
2346
2347	return (void *)addr;
2348}
2349EXPORT_SYMBOL(alloc_pages_exact);
2350
2351/**
2352 * free_pages_exact - release memory allocated via alloc_pages_exact()
2353 * @virt: the value returned by alloc_pages_exact.
2354 * @size: size of allocation, same value as passed to alloc_pages_exact().
2355 *
2356 * Release the memory allocated by a previous call to alloc_pages_exact.
2357 */
2358void free_pages_exact(void *virt, size_t size)
2359{
2360	unsigned long addr = (unsigned long)virt;
2361	unsigned long end = addr + PAGE_ALIGN(size);
2362
2363	while (addr < end) {
2364		free_page(addr);
2365		addr += PAGE_SIZE;
2366	}
2367}
2368EXPORT_SYMBOL(free_pages_exact);
2369
2370static unsigned int nr_free_zone_pages(int offset)
2371{
2372	struct zoneref *z;
2373	struct zone *zone;
2374
2375	/* Just pick one node, since fallback list is circular */
2376	unsigned int sum = 0;
2377
2378	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2379
2380	for_each_zone_zonelist(zone, z, zonelist, offset) {
2381		unsigned long size = zone->present_pages;
2382		unsigned long high = high_wmark_pages(zone);
2383		if (size > high)
2384			sum += size - high;
2385	}
2386
2387	return sum;
2388}
2389
2390/*
2391 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2392 */
2393unsigned int nr_free_buffer_pages(void)
2394{
2395	return nr_free_zone_pages(gfp_zone(GFP_USER));
2396}
2397EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2398
2399/*
2400 * Amount of free RAM allocatable within all zones
2401 */
2402unsigned int nr_free_pagecache_pages(void)
2403{
2404	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2405}
2406
2407static inline void show_node(struct zone *zone)
2408{
2409	if (NUMA_BUILD)
2410		printk("Node %d ", zone_to_nid(zone));
2411}
2412
2413void si_meminfo(struct sysinfo *val)
2414{
2415	val->totalram = totalram_pages;
2416	val->sharedram = 0;
2417	val->freeram = global_page_state(NR_FREE_PAGES);
2418	val->bufferram = nr_blockdev_pages();
2419	val->totalhigh = totalhigh_pages;
2420	val->freehigh = nr_free_highpages();
2421	val->mem_unit = PAGE_SIZE;
2422}
2423
2424EXPORT_SYMBOL(si_meminfo);
2425
2426#ifdef CONFIG_NUMA
2427void si_meminfo_node(struct sysinfo *val, int nid)
2428{
2429	pg_data_t *pgdat = NODE_DATA(nid);
2430
2431	val->totalram = pgdat->node_present_pages;
2432	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2433#ifdef CONFIG_HIGHMEM
2434	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2435	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2436			NR_FREE_PAGES);
2437#else
2438	val->totalhigh = 0;
2439	val->freehigh = 0;
2440#endif
2441	val->mem_unit = PAGE_SIZE;
2442}
2443#endif
2444
2445/*
2446 * Determine whether the zone's node should be displayed or not, depending on
2447 * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas().
2448 */
2449static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone)
2450{
2451	bool ret = false;
2452
2453	if (!(flags & SHOW_MEM_FILTER_NODES))
2454		goto out;
2455
2456	get_mems_allowed();
2457	ret = !node_isset(zone->zone_pgdat->node_id,
2458				cpuset_current_mems_allowed);
2459	put_mems_allowed();
2460out:
2461	return ret;
2462}
2463
2464#define K(x) ((x) << (PAGE_SHIFT-10))
2465
2466/*
2467 * Show free area list (used inside shift_scroll-lock stuff)
2468 * We also calculate the percentage fragmentation. We do this by counting the
2469 * memory on each free list with the exception of the first item on the list.
2470 * Suppresses nodes that are not allowed by current's cpuset if
2471 * SHOW_MEM_FILTER_NODES is passed.
2472 */
2473void __show_free_areas(unsigned int filter)
2474{
2475	int cpu;
2476	struct zone *zone;
2477
2478	for_each_populated_zone(zone) {
2479		if (skip_free_areas_zone(filter, zone))
2480			continue;
2481		show_node(zone);
2482		printk("%s per-cpu:\n", zone->name);
2483
2484		for_each_online_cpu(cpu) {
2485			struct per_cpu_pageset *pageset;
2486
2487			pageset = per_cpu_ptr(zone->pageset, cpu);
2488
2489			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2490			       cpu, pageset->pcp.high,
2491			       pageset->pcp.batch, pageset->pcp.count);
2492		}
2493	}
2494
2495	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2496		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2497		" unevictable:%lu"
2498		" dirty:%lu writeback:%lu unstable:%lu\n"
2499		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2500		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2501		global_page_state(NR_ACTIVE_ANON),
2502		global_page_state(NR_INACTIVE_ANON),
2503		global_page_state(NR_ISOLATED_ANON),
2504		global_page_state(NR_ACTIVE_FILE),
2505		global_page_state(NR_INACTIVE_FILE),
2506		global_page_state(NR_ISOLATED_FILE),
2507		global_page_state(NR_UNEVICTABLE),
2508		global_page_state(NR_FILE_DIRTY),
2509		global_page_state(NR_WRITEBACK),
2510		global_page_state(NR_UNSTABLE_NFS),
2511		global_page_state(NR_FREE_PAGES),
2512		global_page_state(NR_SLAB_RECLAIMABLE),
2513		global_page_state(NR_SLAB_UNRECLAIMABLE),
2514		global_page_state(NR_FILE_MAPPED),
2515		global_page_state(NR_SHMEM),
2516		global_page_state(NR_PAGETABLE),
2517		global_page_state(NR_BOUNCE));
2518
2519	for_each_populated_zone(zone) {
2520		int i;
2521
2522		if (skip_free_areas_zone(filter, zone))
2523			continue;
2524		show_node(zone);
2525		printk("%s"
2526			" free:%lukB"
2527			" min:%lukB"
2528			" low:%lukB"
2529			" high:%lukB"
2530			" active_anon:%lukB"
2531			" inactive_anon:%lukB"
2532			" active_file:%lukB"
2533			" inactive_file:%lukB"
2534			" unevictable:%lukB"
2535			" isolated(anon):%lukB"
2536			" isolated(file):%lukB"
2537			" present:%lukB"
2538			" mlocked:%lukB"
2539			" dirty:%lukB"
2540			" writeback:%lukB"
2541			" mapped:%lukB"
2542			" shmem:%lukB"
2543			" slab_reclaimable:%lukB"
2544			" slab_unreclaimable:%lukB"
2545			" kernel_stack:%lukB"
2546			" pagetables:%lukB"
2547			" unstable:%lukB"
2548			" bounce:%lukB"
2549			" writeback_tmp:%lukB"
2550			" pages_scanned:%lu"
2551			" all_unreclaimable? %s"
2552			"\n",
2553			zone->name,
2554			K(zone_page_state(zone, NR_FREE_PAGES)),
2555			K(min_wmark_pages(zone)),
2556			K(low_wmark_pages(zone)),
2557			K(high_wmark_pages(zone)),
2558			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2559			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2560			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2561			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2562			K(zone_page_state(zone, NR_UNEVICTABLE)),
2563			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2564			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2565			K(zone->present_pages),
2566			K(zone_page_state(zone, NR_MLOCK)),
2567			K(zone_page_state(zone, NR_FILE_DIRTY)),
2568			K(zone_page_state(zone, NR_WRITEBACK)),
2569			K(zone_page_state(zone, NR_FILE_MAPPED)),
2570			K(zone_page_state(zone, NR_SHMEM)),
2571			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2572			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2573			zone_page_state(zone, NR_KERNEL_STACK) *
2574				THREAD_SIZE / 1024,
2575			K(zone_page_state(zone, NR_PAGETABLE)),
2576			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2577			K(zone_page_state(zone, NR_BOUNCE)),
2578			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2579			zone->pages_scanned,
2580			(zone->all_unreclaimable ? "yes" : "no")
2581			);
2582		printk("lowmem_reserve[]:");
2583		for (i = 0; i < MAX_NR_ZONES; i++)
2584			printk(" %lu", zone->lowmem_reserve[i]);
2585		printk("\n");
2586	}
2587
2588	for_each_populated_zone(zone) {
2589 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2590
2591		if (skip_free_areas_zone(filter, zone))
2592			continue;
2593		show_node(zone);
2594		printk("%s: ", zone->name);
2595
2596		spin_lock_irqsave(&zone->lock, flags);
2597		for (order = 0; order < MAX_ORDER; order++) {
2598			nr[order] = zone->free_area[order].nr_free;
2599			total += nr[order] << order;
2600		}
2601		spin_unlock_irqrestore(&zone->lock, flags);
2602		for (order = 0; order < MAX_ORDER; order++)
2603			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2604		printk("= %lukB\n", K(total));
2605	}
2606
2607	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2608
2609	show_swap_cache_info();
2610}
2611
2612void show_free_areas(void)
2613{
2614	__show_free_areas(0);
2615}
2616
2617static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2618{
2619	zoneref->zone = zone;
2620	zoneref->zone_idx = zone_idx(zone);
2621}
2622
2623/*
2624 * Builds allocation fallback zone lists.
2625 *
2626 * Add all populated zones of a node to the zonelist.
2627 */
2628static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2629				int nr_zones, enum zone_type zone_type)
2630{
2631	struct zone *zone;
2632
2633	BUG_ON(zone_type >= MAX_NR_ZONES);
2634	zone_type++;
2635
2636	do {
2637		zone_type--;
2638		zone = pgdat->node_zones + zone_type;
2639		if (populated_zone(zone)) {
2640			zoneref_set_zone(zone,
2641				&zonelist->_zonerefs[nr_zones++]);
2642			check_highest_zone(zone_type);
2643		}
2644
2645	} while (zone_type);
2646	return nr_zones;
2647}
2648
2649
2650/*
2651 *  zonelist_order:
2652 *  0 = automatic detection of better ordering.
2653 *  1 = order by ([node] distance, -zonetype)
2654 *  2 = order by (-zonetype, [node] distance)
2655 *
2656 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2657 *  the same zonelist. So only NUMA can configure this param.
2658 */
2659#define ZONELIST_ORDER_DEFAULT  0
2660#define ZONELIST_ORDER_NODE     1
2661#define ZONELIST_ORDER_ZONE     2
2662
2663/* zonelist order in the kernel.
2664 * set_zonelist_order() will set this to NODE or ZONE.
2665 */
2666static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2667static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2668
2669
2670#ifdef CONFIG_NUMA
2671/* The value user specified ....changed by config */
2672static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2673/* string for sysctl */
2674#define NUMA_ZONELIST_ORDER_LEN	16
2675char numa_zonelist_order[16] = "default";
2676
2677/*
2678 * interface for configure zonelist ordering.
2679 * command line option "numa_zonelist_order"
2680 *	= "[dD]efault	- default, automatic configuration.
2681 *	= "[nN]ode 	- order by node locality, then by zone within node
2682 *	= "[zZ]one      - order by zone, then by locality within zone
2683 */
2684
2685static int __parse_numa_zonelist_order(char *s)
2686{
2687	if (*s == 'd' || *s == 'D') {
2688		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2689	} else if (*s == 'n' || *s == 'N') {
2690		user_zonelist_order = ZONELIST_ORDER_NODE;
2691	} else if (*s == 'z' || *s == 'Z') {
2692		user_zonelist_order = ZONELIST_ORDER_ZONE;
2693	} else {
2694		printk(KERN_WARNING
2695			"Ignoring invalid numa_zonelist_order value:  "
2696			"%s\n", s);
2697		return -EINVAL;
2698	}
2699	return 0;
2700}
2701
2702static __init int setup_numa_zonelist_order(char *s)
2703{
2704	int ret;
2705
2706	if (!s)
2707		return 0;
2708
2709	ret = __parse_numa_zonelist_order(s);
2710	if (ret == 0)
2711		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2712
2713	return ret;
2714}
2715early_param("numa_zonelist_order", setup_numa_zonelist_order);
2716
2717/*
2718 * sysctl handler for numa_zonelist_order
2719 */
2720int numa_zonelist_order_handler(ctl_table *table, int write,
2721		void __user *buffer, size_t *length,
2722		loff_t *ppos)
2723{
2724	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2725	int ret;
2726	static DEFINE_MUTEX(zl_order_mutex);
2727
2728	mutex_lock(&zl_order_mutex);
2729	if (write)
2730		strcpy(saved_string, (char*)table->data);
2731	ret = proc_dostring(table, write, buffer, length, ppos);
2732	if (ret)
2733		goto out;
2734	if (write) {
2735		int oldval = user_zonelist_order;
2736		if (__parse_numa_zonelist_order((char*)table->data)) {
2737			/*
2738			 * bogus value.  restore saved string
2739			 */
2740			strncpy((char*)table->data, saved_string,
2741				NUMA_ZONELIST_ORDER_LEN);
2742			user_zonelist_order = oldval;
2743		} else if (oldval != user_zonelist_order) {
2744			mutex_lock(&zonelists_mutex);
2745			build_all_zonelists(NULL);
2746			mutex_unlock(&zonelists_mutex);
2747		}
2748	}
2749out:
2750	mutex_unlock(&zl_order_mutex);
2751	return ret;
2752}
2753
2754
2755#define MAX_NODE_LOAD (nr_online_nodes)
2756static int node_load[MAX_NUMNODES];
2757
2758/**
2759 * find_next_best_node - find the next node that should appear in a given node's fallback list
2760 * @node: node whose fallback list we're appending
2761 * @used_node_mask: nodemask_t of already used nodes
2762 *
2763 * We use a number of factors to determine which is the next node that should
2764 * appear on a given node's fallback list.  The node should not have appeared
2765 * already in @node's fallback list, and it should be the next closest node
2766 * according to the distance array (which contains arbitrary distance values
2767 * from each node to each node in the system), and should also prefer nodes
2768 * with no CPUs, since presumably they'll have very little allocation pressure
2769 * on them otherwise.
2770 * It returns -1 if no node is found.
2771 */
2772static int find_next_best_node(int node, nodemask_t *used_node_mask)
2773{
2774	int n, val;
2775	int min_val = INT_MAX;
2776	int best_node = -1;
2777	const struct cpumask *tmp = cpumask_of_node(0);
2778
2779	/* Use the local node if we haven't already */
2780	if (!node_isset(node, *used_node_mask)) {
2781		node_set(node, *used_node_mask);
2782		return node;
2783	}
2784
2785	for_each_node_state(n, N_HIGH_MEMORY) {
2786
2787		/* Don't want a node to appear more than once */
2788		if (node_isset(n, *used_node_mask))
2789			continue;
2790
2791		/* Use the distance array to find the distance */
2792		val = node_distance(node, n);
2793
2794		/* Penalize nodes under us ("prefer the next node") */
2795		val += (n < node);
2796
2797		/* Give preference to headless and unused nodes */
2798		tmp = cpumask_of_node(n);
2799		if (!cpumask_empty(tmp))
2800			val += PENALTY_FOR_NODE_WITH_CPUS;
2801
2802		/* Slight preference for less loaded node */
2803		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2804		val += node_load[n];
2805
2806		if (val < min_val) {
2807			min_val = val;
2808			best_node = n;
2809		}
2810	}
2811
2812	if (best_node >= 0)
2813		node_set(best_node, *used_node_mask);
2814
2815	return best_node;
2816}
2817
2818
2819/*
2820 * Build zonelists ordered by node and zones within node.
2821 * This results in maximum locality--normal zone overflows into local
2822 * DMA zone, if any--but risks exhausting DMA zone.
2823 */
2824static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2825{
2826	int j;
2827	struct zonelist *zonelist;
2828
2829	zonelist = &pgdat->node_zonelists[0];
2830	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2831		;
2832	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2833							MAX_NR_ZONES - 1);
2834	zonelist->_zonerefs[j].zone = NULL;
2835	zonelist->_zonerefs[j].zone_idx = 0;
2836}
2837
2838/*
2839 * Build gfp_thisnode zonelists
2840 */
2841static void build_thisnode_zonelists(pg_data_t *pgdat)
2842{
2843	int j;
2844	struct zonelist *zonelist;
2845
2846	zonelist = &pgdat->node_zonelists[1];
2847	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2848	zonelist->_zonerefs[j].zone = NULL;
2849	zonelist->_zonerefs[j].zone_idx = 0;
2850}
2851
2852/*
2853 * Build zonelists ordered by zone and nodes within zones.
2854 * This results in conserving DMA zone[s] until all Normal memory is
2855 * exhausted, but results in overflowing to remote node while memory
2856 * may still exist in local DMA zone.
2857 */
2858static int node_order[MAX_NUMNODES];
2859
2860static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2861{
2862	int pos, j, node;
2863	int zone_type;		/* needs to be signed */
2864	struct zone *z;
2865	struct zonelist *zonelist;
2866
2867	zonelist = &pgdat->node_zonelists[0];
2868	pos = 0;
2869	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2870		for (j = 0; j < nr_nodes; j++) {
2871			node = node_order[j];
2872			z = &NODE_DATA(node)->node_zones[zone_type];
2873			if (populated_zone(z)) {
2874				zoneref_set_zone(z,
2875					&zonelist->_zonerefs[pos++]);
2876				check_highest_zone(zone_type);
2877			}
2878		}
2879	}
2880	zonelist->_zonerefs[pos].zone = NULL;
2881	zonelist->_zonerefs[pos].zone_idx = 0;
2882}
2883
2884static int default_zonelist_order(void)
2885{
2886	int nid, zone_type;
2887	unsigned long low_kmem_size,total_size;
2888	struct zone *z;
2889	int average_size;
2890	/*
2891         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
2892	 * If they are really small and used heavily, the system can fall
2893	 * into OOM very easily.
2894	 * This function detect ZONE_DMA/DMA32 size and configures zone order.
2895	 */
2896	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2897	low_kmem_size = 0;
2898	total_size = 0;
2899	for_each_online_node(nid) {
2900		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2901			z = &NODE_DATA(nid)->node_zones[zone_type];
2902			if (populated_zone(z)) {
2903				if (zone_type < ZONE_NORMAL)
2904					low_kmem_size += z->present_pages;
2905				total_size += z->present_pages;
2906			} else if (zone_type == ZONE_NORMAL) {
2907				/*
2908				 * If any node has only lowmem, then node order
2909				 * is preferred to allow kernel allocations
2910				 * locally; otherwise, they can easily infringe
2911				 * on other nodes when there is an abundance of
2912				 * lowmem available to allocate from.
2913				 */
2914				return ZONELIST_ORDER_NODE;
2915			}
2916		}
2917	}
2918	if (!low_kmem_size ||  /* there are no DMA area. */
2919	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2920		return ZONELIST_ORDER_NODE;
2921	/*
2922	 * look into each node's config.
2923  	 * If there is a node whose DMA/DMA32 memory is very big area on
2924 	 * local memory, NODE_ORDER may be suitable.
2925         */
2926	average_size = total_size /
2927				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2928	for_each_online_node(nid) {
2929		low_kmem_size = 0;
2930		total_size = 0;
2931		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2932			z = &NODE_DATA(nid)->node_zones[zone_type];
2933			if (populated_zone(z)) {
2934				if (zone_type < ZONE_NORMAL)
2935					low_kmem_size += z->present_pages;
2936				total_size += z->present_pages;
2937			}
2938		}
2939		if (low_kmem_size &&
2940		    total_size > average_size && /* ignore small node */
2941		    low_kmem_size > total_size * 70/100)
2942			return ZONELIST_ORDER_NODE;
2943	}
2944	return ZONELIST_ORDER_ZONE;
2945}
2946
2947static void set_zonelist_order(void)
2948{
2949	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2950		current_zonelist_order = default_zonelist_order();
2951	else
2952		current_zonelist_order = user_zonelist_order;
2953}
2954
2955static void build_zonelists(pg_data_t *pgdat)
2956{
2957	int j, node, load;
2958	enum zone_type i;
2959	nodemask_t used_mask;
2960	int local_node, prev_node;
2961	struct zonelist *zonelist;
2962	int order = current_zonelist_order;
2963
2964	/* initialize zonelists */
2965	for (i = 0; i < MAX_ZONELISTS; i++) {
2966		zonelist = pgdat->node_zonelists + i;
2967		zonelist->_zonerefs[0].zone = NULL;
2968		zonelist->_zonerefs[0].zone_idx = 0;
2969	}
2970
2971	/* NUMA-aware ordering of nodes */
2972	local_node = pgdat->node_id;
2973	load = nr_online_nodes;
2974	prev_node = local_node;
2975	nodes_clear(used_mask);
2976
2977	memset(node_order, 0, sizeof(node_order));
2978	j = 0;
2979
2980	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2981		int distance = node_distance(local_node, node);
2982
2983		/*
2984		 * If another node is sufficiently far away then it is better
2985		 * to reclaim pages in a zone before going off node.
2986		 */
2987		if (distance > RECLAIM_DISTANCE)
2988			zone_reclaim_mode = 1;
2989
2990		/*
2991		 * We don't want to pressure a particular node.
2992		 * So adding penalty to the first node in same
2993		 * distance group to make it round-robin.
2994		 */
2995		if (distance != node_distance(local_node, prev_node))
2996			node_load[node] = load;
2997
2998		prev_node = node;
2999		load--;
3000		if (order == ZONELIST_ORDER_NODE)
3001			build_zonelists_in_node_order(pgdat, node);
3002		else
3003			node_order[j++] = node;	/* remember order */
3004	}
3005
3006	if (order == ZONELIST_ORDER_ZONE) {
3007		/* calculate node order -- i.e., DMA last! */
3008		build_zonelists_in_zone_order(pgdat, j);
3009	}
3010
3011	build_thisnode_zonelists(pgdat);
3012}
3013
3014/* Construct the zonelist performance cache - see further mmzone.h */
3015static void build_zonelist_cache(pg_data_t *pgdat)
3016{
3017	struct zonelist *zonelist;
3018	struct zonelist_cache *zlc;
3019	struct zoneref *z;
3020
3021	zonelist = &pgdat->node_zonelists[0];
3022	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3023	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3024	for (z = zonelist->_zonerefs; z->zone; z++)
3025		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3026}
3027
3028#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3029/*
3030 * Return node id of node used for "local" allocations.
3031 * I.e., first node id of first zone in arg node's generic zonelist.
3032 * Used for initializing percpu 'numa_mem', which is used primarily
3033 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3034 */
3035int local_memory_node(int node)
3036{
3037	struct zone *zone;
3038
3039	(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3040				   gfp_zone(GFP_KERNEL),
3041				   NULL,
3042				   &zone);
3043	return zone->node;
3044}
3045#endif
3046
3047#else	/* CONFIG_NUMA */
3048
3049static void set_zonelist_order(void)
3050{
3051	current_zonelist_order = ZONELIST_ORDER_ZONE;
3052}
3053
3054static void build_zonelists(pg_data_t *pgdat)
3055{
3056	int node, local_node;
3057	enum zone_type j;
3058	struct zonelist *zonelist;
3059
3060	local_node = pgdat->node_id;
3061
3062	zonelist = &pgdat->node_zonelists[0];
3063	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3064
3065	/*
3066	 * Now we build the zonelist so that it contains the zones
3067	 * of all the other nodes.
3068	 * We don't want to pressure a particular node, so when
3069	 * building the zones for node N, we make sure that the
3070	 * zones coming right after the local ones are those from
3071	 * node N+1 (modulo N)
3072	 */
3073	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3074		if (!node_online(node))
3075			continue;
3076		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3077							MAX_NR_ZONES - 1);
3078	}
3079	for (node = 0; node < local_node; node++) {
3080		if (!node_online(node))
3081			continue;
3082		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3083							MAX_NR_ZONES - 1);
3084	}
3085
3086	zonelist->_zonerefs[j].zone = NULL;
3087	zonelist->_zonerefs[j].zone_idx = 0;
3088}
3089
3090/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3091static void build_zonelist_cache(pg_data_t *pgdat)
3092{
3093	pgdat->node_zonelists[0].zlcache_ptr = NULL;
3094}
3095
3096#endif	/* CONFIG_NUMA */
3097
3098/*
3099 * Boot pageset table. One per cpu which is going to be used for all
3100 * zones and all nodes. The parameters will be set in such a way
3101 * that an item put on a list will immediately be handed over to
3102 * the buddy list. This is safe since pageset manipulation is done
3103 * with interrupts disabled.
3104 *
3105 * The boot_pagesets must be kept even after bootup is complete for
3106 * unused processors and/or zones. They do play a role for bootstrapping
3107 * hotplugged processors.
3108 *
3109 * zoneinfo_show() and maybe other functions do
3110 * not check if the processor is online before following the pageset pointer.
3111 * Other parts of the kernel may not check if the zone is available.
3112 */
3113static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3114static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3115static void setup_zone_pageset(struct zone *zone);
3116
3117/*
3118 * Global mutex to protect against size modification of zonelists
3119 * as well as to serialize pageset setup for the new populated zone.
3120 */
3121DEFINE_MUTEX(zonelists_mutex);
3122
3123/* return values int ....just for stop_machine() */
3124static __init_refok int __build_all_zonelists(void *data)
3125{
3126	int nid;
3127	int cpu;
3128
3129#ifdef CONFIG_NUMA
3130	memset(node_load, 0, sizeof(node_load));
3131#endif
3132	for_each_online_node(nid) {
3133		pg_data_t *pgdat = NODE_DATA(nid);
3134
3135		build_zonelists(pgdat);
3136		build_zonelist_cache(pgdat);
3137	}
3138
3139	/*
3140	 * Initialize the boot_pagesets that are going to be used
3141	 * for bootstrapping processors. The real pagesets for
3142	 * each zone will be allocated later when the per cpu
3143	 * allocator is available.
3144	 *
3145	 * boot_pagesets are used also for bootstrapping offline
3146	 * cpus if the system is already booted because the pagesets
3147	 * are needed to initialize allocators on a specific cpu too.
3148	 * F.e. the percpu allocator needs the page allocator which
3149	 * needs the percpu allocator in order to allocate its pagesets
3150	 * (a chicken-egg dilemma).
3151	 */
3152	for_each_possible_cpu(cpu) {
3153		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3154
3155#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3156		/*
3157		 * We now know the "local memory node" for each node--
3158		 * i.e., the node of the first zone in the generic zonelist.
3159		 * Set up numa_mem percpu variable for on-line cpus.  During
3160		 * boot, only the boot cpu should be on-line;  we'll init the
3161		 * secondary cpus' numa_mem as they come on-line.  During
3162		 * node/memory hotplug, we'll fixup all on-line cpus.
3163		 */
3164		if (cpu_online(cpu))
3165			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3166#endif
3167	}
3168
3169	return 0;
3170}
3171
3172/*
3173 * Called with zonelists_mutex held always
3174 * unless system_state == SYSTEM_BOOTING.
3175 */
3176void build_all_zonelists(void *data)
3177{
3178	set_zonelist_order();
3179
3180	if (system_state == SYSTEM_BOOTING) {
3181		__build_all_zonelists(NULL);
3182		mminit_verify_zonelist();
3183		cpuset_init_current_mems_allowed();
3184	} else {
3185		/* we have to stop all cpus to guarantee there is no user
3186		   of zonelist */
3187#ifdef CONFIG_MEMORY_HOTPLUG
3188		if (data)
3189			setup_zone_pageset((struct zone *)data);
3190#endif
3191		stop_machine(__build_all_zonelists, NULL, NULL);
3192		/* cpuset refresh routine should be here */
3193	}
3194	vm_total_pages = nr_free_pagecache_pages();
3195	/*
3196	 * Disable grouping by mobility if the number of pages in the
3197	 * system is too low to allow the mechanism to work. It would be
3198	 * more accurate, but expensive to check per-zone. This check is
3199	 * made on memory-hotadd so a system can start with mobility
3200	 * disabled and enable it later
3201	 */
3202	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3203		page_group_by_mobility_disabled = 1;
3204	else
3205		page_group_by_mobility_disabled = 0;
3206
3207	printk("Built %i zonelists in %s order, mobility grouping %s.  "
3208		"Total pages: %ld\n",
3209			nr_online_nodes,
3210			zonelist_order_name[current_zonelist_order],
3211			page_group_by_mobility_disabled ? "off" : "on",
3212			vm_total_pages);
3213#ifdef CONFIG_NUMA
3214	printk("Policy zone: %s\n", zone_names[policy_zone]);
3215#endif
3216}
3217
3218/*
3219 * Helper functions to size the waitqueue hash table.
3220 * Essentially these want to choose hash table sizes sufficiently
3221 * large so that collisions trying to wait on pages are rare.
3222 * But in fact, the number of active page waitqueues on typical
3223 * systems is ridiculously low, less than 200. So this is even
3224 * conservative, even though it seems large.
3225 *
3226 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3227 * waitqueues, i.e. the size of the waitq table given the number of pages.
3228 */
3229#define PAGES_PER_WAITQUEUE	256
3230
3231#ifndef CONFIG_MEMORY_HOTPLUG
3232static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3233{
3234	unsigned long size = 1;
3235
3236	pages /= PAGES_PER_WAITQUEUE;
3237
3238	while (size < pages)
3239		size <<= 1;
3240
3241	/*
3242	 * Once we have dozens or even hundreds of threads sleeping
3243	 * on IO we've got bigger problems than wait queue collision.
3244	 * Limit the size of the wait table to a reasonable size.
3245	 */
3246	size = min(size, 4096UL);
3247
3248	return max(size, 4UL);
3249}
3250#else
3251/*
3252 * A zone's size might be changed by hot-add, so it is not possible to determine
3253 * a suitable size for its wait_table.  So we use the maximum size now.
3254 *
3255 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3256 *
3257 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3258 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3259 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3260 *
3261 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3262 * or more by the traditional way. (See above).  It equals:
3263 *
3264 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3265 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3266 *    powerpc (64K page size)             : =  (32G +16M)byte.
3267 */
3268static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3269{
3270	return 4096UL;
3271}
3272#endif
3273
3274/*
3275 * This is an integer logarithm so that shifts can be used later
3276 * to extract the more random high bits from the multiplicative
3277 * hash function before the remainder is taken.
3278 */
3279static inline unsigned long wait_table_bits(unsigned long size)
3280{
3281	return ffz(~size);
3282}
3283
3284#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3285
3286/*
3287 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3288 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3289 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3290 * higher will lead to a bigger reserve which will get freed as contiguous
3291 * blocks as reclaim kicks in
3292 */
3293static void setup_zone_migrate_reserve(struct zone *zone)
3294{
3295	unsigned long start_pfn, pfn, end_pfn;
3296	struct page *page;
3297	unsigned long block_migratetype;
3298	int reserve;
3299
3300	/* Get the start pfn, end pfn and the number of blocks to reserve */
3301	start_pfn = zone->zone_start_pfn;
3302	end_pfn = start_pfn + zone->spanned_pages;
3303	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3304							pageblock_order;
3305
3306	/*
3307	 * Reserve blocks are generally in place to help high-order atomic
3308	 * allocations that are short-lived. A min_free_kbytes value that
3309	 * would result in more than 2 reserve blocks for atomic allocations
3310	 * is assumed to be in place to help anti-fragmentation for the
3311	 * future allocation of hugepages at runtime.
3312	 */
3313	reserve = min(2, reserve);
3314
3315	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3316		if (!pfn_valid(pfn))
3317			continue;
3318		page = pfn_to_page(pfn);
3319
3320		/* Watch out for overlapping nodes */
3321		if (page_to_nid(page) != zone_to_nid(zone))
3322			continue;
3323
3324		/* Blocks with reserved pages will never free, skip them. */
3325		if (PageReserved(page))
3326			continue;
3327
3328		block_migratetype = get_pageblock_migratetype(page);
3329
3330		/* If this block is reserved, account for it */
3331		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3332			reserve--;
3333			continue;
3334		}
3335
3336		/* Suitable for reserving if this block is movable */
3337		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3338			set_pageblock_migratetype(page, MIGRATE_RESERVE);
3339			move_freepages_block(zone, page, MIGRATE_RESERVE);
3340			reserve--;
3341			continue;
3342		}
3343
3344		/*
3345		 * If the reserve is met and this is a previous reserved block,
3346		 * take it back
3347		 */
3348		if (block_migratetype == MIGRATE_RESERVE) {
3349			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3350			move_freepages_block(zone, page, MIGRATE_MOVABLE);
3351		}
3352	}
3353}
3354
3355/*
3356 * Initially all pages are reserved - free ones are freed
3357 * up by free_all_bootmem() once the early boot process is
3358 * done. Non-atomic initialization, single-pass.
3359 */
3360void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3361		unsigned long start_pfn, enum memmap_context context)
3362{
3363	struct page *page;
3364	unsigned long end_pfn = start_pfn + size;
3365	unsigned long pfn;
3366	struct zone *z;
3367
3368	if (highest_memmap_pfn < end_pfn - 1)
3369		highest_memmap_pfn = end_pfn - 1;
3370
3371	z = &NODE_DATA(nid)->node_zones[zone];
3372	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3373		/*
3374		 * There can be holes in boot-time mem_map[]s
3375		 * handed to this function.  They do not
3376		 * exist on hotplugged memory.
3377		 */
3378		if (context == MEMMAP_EARLY) {
3379			if (!early_pfn_valid(pfn))
3380				continue;
3381			if (!early_pfn_in_nid(pfn, nid))
3382				continue;
3383		}
3384		page = pfn_to_page(pfn);
3385		set_page_links(page, zone, nid, pfn);
3386		mminit_verify_page_links(page, zone, nid, pfn);
3387		init_page_count(page);
3388		reset_page_mapcount(page);
3389		SetPageReserved(page);
3390		/*
3391		 * Mark the block movable so that blocks are reserved for
3392		 * movable at startup. This will force kernel allocations
3393		 * to reserve their blocks rather than leaking throughout
3394		 * the address space during boot when many long-lived
3395		 * kernel allocations are made. Later some blocks near
3396		 * the start are marked MIGRATE_RESERVE by
3397		 * setup_zone_migrate_reserve()
3398		 *
3399		 * bitmap is created for zone's valid pfn range. but memmap
3400		 * can be created for invalid pages (for alignment)
3401		 * check here not to call set_pageblock_migratetype() against
3402		 * pfn out of zone.
3403		 */
3404		if ((z->zone_start_pfn <= pfn)
3405		    && (pfn < z->zone_start_pfn + z->spanned_pages)
3406		    && !(pfn & (pageblock_nr_pages - 1)))
3407			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3408
3409		INIT_LIST_HEAD(&page->lru);
3410#ifdef WANT_PAGE_VIRTUAL
3411		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
3412		if (!is_highmem_idx(zone))
3413			set_page_address(page, __va(pfn << PAGE_SHIFT));
3414#endif
3415	}
3416}
3417
3418static void __meminit zone_init_free_lists(struct zone *zone)
3419{
3420	int order, t;
3421	for_each_migratetype_order(order, t) {
3422		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3423		zone->free_area[order].nr_free = 0;
3424	}
3425}
3426
3427#ifndef __HAVE_ARCH_MEMMAP_INIT
3428#define memmap_init(size, nid, zone, start_pfn) \
3429	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3430#endif
3431
3432static int zone_batchsize(struct zone *zone)
3433{
3434#ifdef CONFIG_MMU
3435	int batch;
3436
3437	/*
3438	 * The per-cpu-pages pools are set to around 1000th of the
3439	 * size of the zone.  But no more than 1/2 of a meg.
3440	 *
3441	 * OK, so we don't know how big the cache is.  So guess.
3442	 */
3443	batch = zone->present_pages / 1024;
3444	if (batch * PAGE_SIZE > 512 * 1024)
3445		batch = (512 * 1024) / PAGE_SIZE;
3446	batch /= 4;		/* We effectively *= 4 below */
3447	if (batch < 1)
3448		batch = 1;
3449
3450	/*
3451	 * Clamp the batch to a 2^n - 1 value. Having a power
3452	 * of 2 value was found to be more likely to have
3453	 * suboptimal cache aliasing properties in some cases.
3454	 *
3455	 * For example if 2 tasks are alternately allocating
3456	 * batches of pages, one task can end up with a lot
3457	 * of pages of one half of the possible page colors
3458	 * and the other with pages of the other colors.
3459	 */
3460	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3461
3462	return batch;
3463
3464#else
3465	/* The deferral and batching of frees should be suppressed under NOMMU
3466	 * conditions.
3467	 *
3468	 * The problem is that NOMMU needs to be able to allocate large chunks
3469	 * of contiguous memory as there's no hardware page translation to
3470	 * assemble apparent contiguous memory from discontiguous pages.
3471	 *
3472	 * Queueing large contiguous runs of pages for batching, however,
3473	 * causes the pages to actually be freed in smaller chunks.  As there
3474	 * can be a significant delay between the individual batches being
3475	 * recycled, this leads to the once large chunks of space being
3476	 * fragmented and becoming unavailable for high-order allocations.
3477	 */
3478	return 0;
3479#endif
3480}
3481
3482static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3483{
3484	struct per_cpu_pages *pcp;
3485	int migratetype;
3486
3487	memset(p, 0, sizeof(*p));
3488
3489	pcp = &p->pcp;
3490	pcp->count = 0;
3491	pcp->high = 6 * batch;
3492	pcp->batch = max(1UL, 1 * batch);
3493	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3494		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3495}
3496
3497/*
3498 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3499 * to the value high for the pageset p.
3500 */
3501
3502static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3503				unsigned long high)
3504{
3505	struct per_cpu_pages *pcp;
3506
3507	pcp = &p->pcp;
3508	pcp->high = high;
3509	pcp->batch = max(1UL, high/4);
3510	if ((high/4) > (PAGE_SHIFT * 8))
3511		pcp->batch = PAGE_SHIFT * 8;
3512}
3513
3514static __meminit void setup_zone_pageset(struct zone *zone)
3515{
3516	int cpu;
3517
3518	zone->pageset = alloc_percpu(struct per_cpu_pageset);
3519
3520	for_each_possible_cpu(cpu) {
3521		struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3522
3523		setup_pageset(pcp, zone_batchsize(zone));
3524
3525		if (percpu_pagelist_fraction)
3526			setup_pagelist_highmark(pcp,
3527				(zone->present_pages /
3528					percpu_pagelist_fraction));
3529	}
3530}
3531
3532/*
3533 * Allocate per cpu pagesets and initialize them.
3534 * Before this call only boot pagesets were available.
3535 */
3536void __init setup_per_cpu_pageset(void)
3537{
3538	struct zone *zone;
3539
3540	for_each_populated_zone(zone)
3541		setup_zone_pageset(zone);
3542}
3543
3544static noinline __init_refok
3545int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3546{
3547	int i;
3548	struct pglist_data *pgdat = zone->zone_pgdat;
3549	size_t alloc_size;
3550
3551	/*
3552	 * The per-page waitqueue mechanism uses hashed waitqueues
3553	 * per zone.
3554	 */
3555	zone->wait_table_hash_nr_entries =
3556		 wait_table_hash_nr_entries(zone_size_pages);
3557	zone->wait_table_bits =
3558		wait_table_bits(zone->wait_table_hash_nr_entries);
3559	alloc_size = zone->wait_table_hash_nr_entries
3560					* sizeof(wait_queue_head_t);
3561
3562	if (!slab_is_available()) {
3563		zone->wait_table = (wait_queue_head_t *)
3564			alloc_bootmem_node(pgdat, alloc_size);
3565	} else {
3566		/*
3567		 * This case means that a zone whose size was 0 gets new memory
3568		 * via memory hot-add.
3569		 * But it may be the case that a new node was hot-added.  In
3570		 * this case vmalloc() will not be able to use this new node's
3571		 * memory - this wait_table must be initialized to use this new
3572		 * node itself as well.
3573		 * To use this new node's memory, further consideration will be
3574		 * necessary.
3575		 */
3576		zone->wait_table = vmalloc(alloc_size);
3577	}
3578	if (!zone->wait_table)
3579		return -ENOMEM;
3580
3581	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3582		init_waitqueue_head(zone->wait_table + i);
3583
3584	return 0;
3585}
3586
3587static int __zone_pcp_update(void *data)
3588{
3589	struct zone *zone = data;
3590	int cpu;
3591	unsigned long batch = zone_batchsize(zone), flags;
3592
3593	for_each_possible_cpu(cpu) {
3594		struct per_cpu_pageset *pset;
3595		struct per_cpu_pages *pcp;
3596
3597		pset = per_cpu_ptr(zone->pageset, cpu);
3598		pcp = &pset->pcp;
3599
3600		local_irq_save(flags);
3601		free_pcppages_bulk(zone, pcp->count, pcp);
3602		setup_pageset(pset, batch);
3603		local_irq_restore(flags);
3604	}
3605	return 0;
3606}
3607
3608void zone_pcp_update(struct zone *zone)
3609{
3610	stop_machine(__zone_pcp_update, zone, NULL);
3611}
3612
3613static __meminit void zone_pcp_init(struct zone *zone)
3614{
3615	/*
3616	 * per cpu subsystem is not up at this point. The following code
3617	 * relies on the ability of the linker to provide the
3618	 * offset of a (static) per cpu variable into the per cpu area.
3619	 */
3620	zone->pageset = &boot_pageset;
3621
3622	if (zone->present_pages)
3623		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
3624			zone->name, zone->present_pages,
3625					 zone_batchsize(zone));
3626}
3627
3628__meminit int init_currently_empty_zone(struct zone *zone,
3629					unsigned long zone_start_pfn,
3630					unsigned long size,
3631					enum memmap_context context)
3632{
3633	struct pglist_data *pgdat = zone->zone_pgdat;
3634	int ret;
3635	ret = zone_wait_table_init(zone, size);
3636	if (ret)
3637		return ret;
3638	pgdat->nr_zones = zone_idx(zone) + 1;
3639
3640	zone->zone_start_pfn = zone_start_pfn;
3641
3642	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3643			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3644			pgdat->node_id,
3645			(unsigned long)zone_idx(zone),
3646			zone_start_pfn, (zone_start_pfn + size));
3647
3648	zone_init_free_lists(zone);
3649
3650	return 0;
3651}
3652
3653#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3654/*
3655 * Basic iterator support. Return the first range of PFNs for a node
3656 * Note: nid == MAX_NUMNODES returns first region regardless of node
3657 */
3658static int __meminit first_active_region_index_in_nid(int nid)
3659{
3660	int i;
3661
3662	for (i = 0; i < nr_nodemap_entries; i++)
3663		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3664			return i;
3665
3666	return -1;
3667}
3668
3669/*
3670 * Basic iterator support. Return the next active range of PFNs for a node
3671 * Note: nid == MAX_NUMNODES returns next region regardless of node
3672 */
3673static int __meminit next_active_region_index_in_nid(int index, int nid)
3674{
3675	for (index = index + 1; index < nr_nodemap_entries; index++)
3676		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3677			return index;
3678
3679	return -1;
3680}
3681
3682#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3683/*
3684 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3685 * Architectures may implement their own version but if add_active_range()
3686 * was used and there are no special requirements, this is a convenient
3687 * alternative
3688 */
3689int __meminit __early_pfn_to_nid(unsigned long pfn)
3690{
3691	int i;
3692
3693	for (i = 0; i < nr_nodemap_entries; i++) {
3694		unsigned long start_pfn = early_node_map[i].start_pfn;
3695		unsigned long end_pfn = early_node_map[i].end_pfn;
3696
3697		if (start_pfn <= pfn && pfn < end_pfn)
3698			return early_node_map[i].nid;
3699	}
3700	/* This is a memory hole */
3701	return -1;
3702}
3703#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3704
3705int __meminit early_pfn_to_nid(unsigned long pfn)
3706{
3707	int nid;
3708
3709	nid = __early_pfn_to_nid(pfn);
3710	if (nid >= 0)
3711		return nid;
3712	/* just returns 0 */
3713	return 0;
3714}
3715
3716#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3717bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3718{
3719	int nid;
3720
3721	nid = __early_pfn_to_nid(pfn);
3722	if (nid >= 0 && nid != node)
3723		return false;
3724	return true;
3725}
3726#endif
3727
3728/* Basic iterator support to walk early_node_map[] */
3729#define for_each_active_range_index_in_nid(i, nid) \
3730	for (i = first_active_region_index_in_nid(nid); i != -1; \
3731				i = next_active_region_index_in_nid(i, nid))
3732
3733/**
3734 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3735 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3736 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3737 *
3738 * If an architecture guarantees that all ranges registered with
3739 * add_active_ranges() contain no holes and may be freed, this
3740 * this function may be used instead of calling free_bootmem() manually.
3741 */
3742void __init free_bootmem_with_active_regions(int nid,
3743						unsigned long max_low_pfn)
3744{
3745	int i;
3746
3747	for_each_active_range_index_in_nid(i, nid) {
3748		unsigned long size_pages = 0;
3749		unsigned long end_pfn = early_node_map[i].end_pfn;
3750
3751		if (early_node_map[i].start_pfn >= max_low_pfn)
3752			continue;
3753
3754		if (end_pfn > max_low_pfn)
3755			end_pfn = max_low_pfn;
3756
3757		size_pages = end_pfn - early_node_map[i].start_pfn;
3758		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3759				PFN_PHYS(early_node_map[i].start_pfn),
3760				size_pages << PAGE_SHIFT);
3761	}
3762}
3763
3764#ifdef CONFIG_HAVE_MEMBLOCK
3765/*
3766 * Basic iterator support. Return the last range of PFNs for a node
3767 * Note: nid == MAX_NUMNODES returns last region regardless of node
3768 */
3769static int __meminit last_active_region_index_in_nid(int nid)
3770{
3771	int i;
3772
3773	for (i = nr_nodemap_entries - 1; i >= 0; i--)
3774		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3775			return i;
3776
3777	return -1;
3778}
3779
3780/*
3781 * Basic iterator support. Return the previous active range of PFNs for a node
3782 * Note: nid == MAX_NUMNODES returns next region regardless of node
3783 */
3784static int __meminit previous_active_region_index_in_nid(int index, int nid)
3785{
3786	for (index = index - 1; index >= 0; index--)
3787		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3788			return index;
3789
3790	return -1;
3791}
3792
3793#define for_each_active_range_index_in_nid_reverse(i, nid) \
3794	for (i = last_active_region_index_in_nid(nid); i != -1; \
3795				i = previous_active_region_index_in_nid(i, nid))
3796
3797u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3798					u64 goal, u64 limit)
3799{
3800	int i;
3801
3802	/* Need to go over early_node_map to find out good range for node */
3803	for_each_active_range_index_in_nid_reverse(i, nid) {
3804		u64 addr;
3805		u64 ei_start, ei_last;
3806		u64 final_start, final_end;
3807
3808		ei_last = early_node_map[i].end_pfn;
3809		ei_last <<= PAGE_SHIFT;
3810		ei_start = early_node_map[i].start_pfn;
3811		ei_start <<= PAGE_SHIFT;
3812
3813		final_start = max(ei_start, goal);
3814		final_end = min(ei_last, limit);
3815
3816		if (final_start >= final_end)
3817			continue;
3818
3819		addr = memblock_find_in_range(final_start, final_end, size, align);
3820
3821		if (addr == MEMBLOCK_ERROR)
3822			continue;
3823
3824		return addr;
3825	}
3826
3827	return MEMBLOCK_ERROR;
3828}
3829#endif
3830
3831int __init add_from_early_node_map(struct range *range, int az,
3832				   int nr_range, int nid)
3833{
3834	int i;
3835	u64 start, end;
3836
3837	/* need to go over early_node_map to find out good range for node */
3838	for_each_active_range_index_in_nid(i, nid) {
3839		start = early_node_map[i].start_pfn;
3840		end = early_node_map[i].end_pfn;
3841		nr_range = add_range(range, az, nr_range, start, end);
3842	}
3843	return nr_range;
3844}
3845
3846void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3847{
3848	int i;
3849	int ret;
3850
3851	for_each_active_range_index_in_nid(i, nid) {
3852		ret = work_fn(early_node_map[i].start_pfn,
3853			      early_node_map[i].end_pfn, data);
3854		if (ret)
3855			break;
3856	}
3857}
3858/**
3859 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3860 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3861 *
3862 * If an architecture guarantees that all ranges registered with
3863 * add_active_ranges() contain no holes and may be freed, this
3864 * function may be used instead of calling memory_present() manually.
3865 */
3866void __init sparse_memory_present_with_active_regions(int nid)
3867{
3868	int i;
3869
3870	for_each_active_range_index_in_nid(i, nid)
3871		memory_present(early_node_map[i].nid,
3872				early_node_map[i].start_pfn,
3873				early_node_map[i].end_pfn);
3874}
3875
3876/**
3877 * get_pfn_range_for_nid - Return the start and end page frames for a node
3878 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3879 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3880 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3881 *
3882 * It returns the start and end page frame of a node based on information
3883 * provided by an arch calling add_active_range(). If called for a node
3884 * with no available memory, a warning is printed and the start and end
3885 * PFNs will be 0.
3886 */
3887void __meminit get_pfn_range_for_nid(unsigned int nid,
3888			unsigned long *start_pfn, unsigned long *end_pfn)
3889{
3890	int i;
3891	*start_pfn = -1UL;
3892	*end_pfn = 0;
3893
3894	for_each_active_range_index_in_nid(i, nid) {
3895		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3896		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3897	}
3898
3899	if (*start_pfn == -1UL)
3900		*start_pfn = 0;
3901}
3902
3903/*
3904 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3905 * assumption is made that zones within a node are ordered in monotonic
3906 * increasing memory addresses so that the "highest" populated zone is used
3907 */
3908static void __init find_usable_zone_for_movable(void)
3909{
3910	int zone_index;
3911	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3912		if (zone_index == ZONE_MOVABLE)
3913			continue;
3914
3915		if (arch_zone_highest_possible_pfn[zone_index] >
3916				arch_zone_lowest_possible_pfn[zone_index])
3917			break;
3918	}
3919
3920	VM_BUG_ON(zone_index == -1);
3921	movable_zone = zone_index;
3922}
3923
3924/*
3925 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3926 * because it is sized independant of architecture. Unlike the other zones,
3927 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3928 * in each node depending on the size of each node and how evenly kernelcore
3929 * is distributed. This helper function adjusts the zone ranges
3930 * provided by the architecture for a given node by using the end of the
3931 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3932 * zones within a node are in order of monotonic increases memory addresses
3933 */
3934static void __meminit adjust_zone_range_for_zone_movable(int nid,
3935					unsigned long zone_type,
3936					unsigned long node_start_pfn,
3937					unsigned long node_end_pfn,
3938					unsigned long *zone_start_pfn,
3939					unsigned long *zone_end_pfn)
3940{
3941	/* Only adjust if ZONE_MOVABLE is on this node */
3942	if (zone_movable_pfn[nid]) {
3943		/* Size ZONE_MOVABLE */
3944		if (zone_type == ZONE_MOVABLE) {
3945			*zone_start_pfn = zone_movable_pfn[nid];
3946			*zone_end_pfn = min(node_end_pfn,
3947				arch_zone_highest_possible_pfn[movable_zone]);
3948
3949		/* Adjust for ZONE_MOVABLE starting within this range */
3950		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3951				*zone_end_pfn > zone_movable_pfn[nid]) {
3952			*zone_end_pfn = zone_movable_pfn[nid];
3953
3954		/* Check if this whole range is within ZONE_MOVABLE */
3955		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3956			*zone_start_pfn = *zone_end_pfn;
3957	}
3958}
3959
3960/*
3961 * Return the number of pages a zone spans in a node, including holes
3962 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3963 */
3964static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3965					unsigned long zone_type,
3966					unsigned long *ignored)
3967{
3968	unsigned long node_start_pfn, node_end_pfn;
3969	unsigned long zone_start_pfn, zone_end_pfn;
3970
3971	/* Get the start and end of the node and zone */
3972	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3973	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3974	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3975	adjust_zone_range_for_zone_movable(nid, zone_type,
3976				node_start_pfn, node_end_pfn,
3977				&zone_start_pfn, &zone_end_pfn);
3978
3979	/* Check that this node has pages within the zone's required range */
3980	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3981		return 0;
3982
3983	/* Move the zone boundaries inside the node if necessary */
3984	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3985	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3986
3987	/* Return the spanned pages */
3988	return zone_end_pfn - zone_start_pfn;
3989}
3990
3991/*
3992 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3993 * then all holes in the requested range will be accounted for.
3994 */
3995unsigned long __meminit __absent_pages_in_range(int nid,
3996				unsigned long range_start_pfn,
3997				unsigned long range_end_pfn)
3998{
3999	int i = 0;
4000	unsigned long prev_end_pfn = 0, hole_pages = 0;
4001	unsigned long start_pfn;
4002
4003	/* Find the end_pfn of the first active range of pfns in the node */
4004	i = first_active_region_index_in_nid(nid);
4005	if (i == -1)
4006		return 0;
4007
4008	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4009
4010	/* Account for ranges before physical memory on this node */
4011	if (early_node_map[i].start_pfn > range_start_pfn)
4012		hole_pages = prev_end_pfn - range_start_pfn;
4013
4014	/* Find all holes for the zone within the node */
4015	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
4016
4017		/* No need to continue if prev_end_pfn is outside the zone */
4018		if (prev_end_pfn >= range_end_pfn)
4019			break;
4020
4021		/* Make sure the end of the zone is not within the hole */
4022		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4023		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
4024
4025		/* Update the hole size cound and move on */
4026		if (start_pfn > range_start_pfn) {
4027			BUG_ON(prev_end_pfn > start_pfn);
4028			hole_pages += start_pfn - prev_end_pfn;
4029		}
4030		prev_end_pfn = early_node_map[i].end_pfn;
4031	}
4032
4033	/* Account for ranges past physical memory on this node */
4034	if (range_end_pfn > prev_end_pfn)
4035		hole_pages += range_end_pfn -
4036				max(range_start_pfn, prev_end_pfn);
4037
4038	return hole_pages;
4039}
4040
4041/**
4042 * absent_pages_in_range - Return number of page frames in holes within a range
4043 * @start_pfn: The start PFN to start searching for holes
4044 * @end_pfn: The end PFN to stop searching for holes
4045 *
4046 * It returns the number of pages frames in memory holes within a range.
4047 */
4048unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4049							unsigned long end_pfn)
4050{
4051	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4052}
4053
4054/* Return the number of page frames in holes in a zone on a node */
4055static unsigned long __meminit zone_absent_pages_in_node(int nid,
4056					unsigned long zone_type,
4057					unsigned long *ignored)
4058{
4059	unsigned long node_start_pfn, node_end_pfn;
4060	unsigned long zone_start_pfn, zone_end_pfn;
4061
4062	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4063	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
4064							node_start_pfn);
4065	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
4066							node_end_pfn);
4067
4068	adjust_zone_range_for_zone_movable(nid, zone_type,
4069			node_start_pfn, node_end_pfn,
4070			&zone_start_pfn, &zone_end_pfn);
4071	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4072}
4073
4074#else
4075static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4076					unsigned long zone_type,
4077					unsigned long *zones_size)
4078{
4079	return zones_size[zone_type];
4080}
4081
4082static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4083						unsigned long zone_type,
4084						unsigned long *zholes_size)
4085{
4086	if (!zholes_size)
4087		return 0;
4088
4089	return zholes_size[zone_type];
4090}
4091
4092#endif
4093
4094static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4095		unsigned long *zones_size, unsigned long *zholes_size)
4096{
4097	unsigned long realtotalpages, totalpages = 0;
4098	enum zone_type i;
4099
4100	for (i = 0; i < MAX_NR_ZONES; i++)
4101		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4102								zones_size);
4103	pgdat->node_spanned_pages = totalpages;
4104
4105	realtotalpages = totalpages;
4106	for (i = 0; i < MAX_NR_ZONES; i++)
4107		realtotalpages -=
4108			zone_absent_pages_in_node(pgdat->node_id, i,
4109								zholes_size);
4110	pgdat->node_present_pages = realtotalpages;
4111	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4112							realtotalpages);
4113}
4114
4115#ifndef CONFIG_SPARSEMEM
4116/*
4117 * Calculate the size of the zone->blockflags rounded to an unsigned long
4118 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4119 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4120 * round what is now in bits to nearest long in bits, then return it in
4121 * bytes.
4122 */
4123static unsigned long __init usemap_size(unsigned long zonesize)
4124{
4125	unsigned long usemapsize;
4126
4127	usemapsize = roundup(zonesize, pageblock_nr_pages);
4128	usemapsize = usemapsize >> pageblock_order;
4129	usemapsize *= NR_PAGEBLOCK_BITS;
4130	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4131
4132	return usemapsize / 8;
4133}
4134
4135static void __init setup_usemap(struct pglist_data *pgdat,
4136				struct zone *zone, unsigned long zonesize)
4137{
4138	unsigned long usemapsize = usemap_size(zonesize);
4139	zone->pageblock_flags = NULL;
4140	if (usemapsize)
4141		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
4142}
4143#else
4144static inline void setup_usemap(struct pglist_data *pgdat,
4145				struct zone *zone, unsigned long zonesize) {}
4146#endif /* CONFIG_SPARSEMEM */
4147
4148#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4149
4150/* Return a sensible default order for the pageblock size. */
4151static inline int pageblock_default_order(void)
4152{
4153	if (HPAGE_SHIFT > PAGE_SHIFT)
4154		return HUGETLB_PAGE_ORDER;
4155
4156	return MAX_ORDER-1;
4157}
4158
4159/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4160static inline void __init set_pageblock_order(unsigned int order)
4161{
4162	/* Check that pageblock_nr_pages has not already been setup */
4163	if (pageblock_order)
4164		return;
4165
4166	/*
4167	 * Assume the largest contiguous order of interest is a huge page.
4168	 * This value may be variable depending on boot parameters on IA64
4169	 */
4170	pageblock_order = order;
4171}
4172#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4173
4174/*
4175 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4176 * and pageblock_default_order() are unused as pageblock_order is set
4177 * at compile-time. See include/linux/pageblock-flags.h for the values of
4178 * pageblock_order based on the kernel config
4179 */
4180static inline int pageblock_default_order(unsigned int order)
4181{
4182	return MAX_ORDER-1;
4183}
4184#define set_pageblock_order(x)	do {} while (0)
4185
4186#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4187
4188/*
4189 * Set up the zone data structures:
4190 *   - mark all pages reserved
4191 *   - mark all memory queues empty
4192 *   - clear the memory bitmaps
4193 */
4194static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4195		unsigned long *zones_size, unsigned long *zholes_size)
4196{
4197	enum zone_type j;
4198	int nid = pgdat->node_id;
4199	unsigned long zone_start_pfn = pgdat->node_start_pfn;
4200	int ret;
4201
4202	pgdat_resize_init(pgdat);
4203	pgdat->nr_zones = 0;
4204	init_waitqueue_head(&pgdat->kswapd_wait);
4205	pgdat->kswapd_max_order = 0;
4206	pgdat_page_cgroup_init(pgdat);
4207
4208	for (j = 0; j < MAX_NR_ZONES; j++) {
4209		struct zone *zone = pgdat->node_zones + j;
4210		unsigned long size, realsize, memmap_pages;
4211		enum lru_list l;
4212
4213		size = zone_spanned_pages_in_node(nid, j, zones_size);
4214		realsize = size - zone_absent_pages_in_node(nid, j,
4215								zholes_size);
4216
4217		/*
4218		 * Adjust realsize so that it accounts for how much memory
4219		 * is used by this zone for memmap. This affects the watermark
4220		 * and per-cpu initialisations
4221		 */
4222		memmap_pages =
4223			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4224		if (realsize >= memmap_pages) {
4225			realsize -= memmap_pages;
4226			if (memmap_pages)
4227				printk(KERN_DEBUG
4228				       "  %s zone: %lu pages used for memmap\n",
4229				       zone_names[j], memmap_pages);
4230		} else
4231			printk(KERN_WARNING
4232				"  %s zone: %lu pages exceeds realsize %lu\n",
4233				zone_names[j], memmap_pages, realsize);
4234
4235		/* Account for reserved pages */
4236		if (j == 0 && realsize > dma_reserve) {
4237			realsize -= dma_reserve;
4238			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4239					zone_names[0], dma_reserve);
4240		}
4241
4242		if (!is_highmem_idx(j))
4243			nr_kernel_pages += realsize;
4244		nr_all_pages += realsize;
4245
4246		zone->spanned_pages = size;
4247		zone->present_pages = realsize;
4248#ifdef CONFIG_NUMA
4249		zone->node = nid;
4250		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4251						/ 100;
4252		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4253#endif
4254		zone->name = zone_names[j];
4255		spin_lock_init(&zone->lock);
4256		spin_lock_init(&zone->lru_lock);
4257		zone_seqlock_init(zone);
4258		zone->zone_pgdat = pgdat;
4259
4260		zone_pcp_init(zone);
4261		for_each_lru(l) {
4262			INIT_LIST_HEAD(&zone->lru[l].list);
4263			zone->reclaim_stat.nr_saved_scan[l] = 0;
4264		}
4265		zone->reclaim_stat.recent_rotated[0] = 0;
4266		zone->reclaim_stat.recent_rotated[1] = 0;
4267		zone->reclaim_stat.recent_scanned[0] = 0;
4268		zone->reclaim_stat.recent_scanned[1] = 0;
4269		zap_zone_vm_stats(zone);
4270		zone->flags = 0;
4271		if (!size)
4272			continue;
4273
4274		set_pageblock_order(pageblock_default_order());
4275		setup_usemap(pgdat, zone, size);
4276		ret = init_currently_empty_zone(zone, zone_start_pfn,
4277						size, MEMMAP_EARLY);
4278		BUG_ON(ret);
4279		memmap_init(size, nid, j, zone_start_pfn);
4280		zone_start_pfn += size;
4281	}
4282}
4283
4284static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4285{
4286	/* Skip empty nodes */
4287	if (!pgdat->node_spanned_pages)
4288		return;
4289
4290#ifdef CONFIG_FLAT_NODE_MEM_MAP
4291	/* ia64 gets its own node_mem_map, before this, without bootmem */
4292	if (!pgdat->node_mem_map) {
4293		unsigned long size, start, end;
4294		struct page *map;
4295
4296		/*
4297		 * The zone's endpoints aren't required to be MAX_ORDER
4298		 * aligned but the node_mem_map endpoints must be in order
4299		 * for the buddy allocator to function correctly.
4300		 */
4301		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4302		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4303		end = ALIGN(end, MAX_ORDER_NR_PAGES);
4304		size =  (end - start) * sizeof(struct page);
4305		map = alloc_remap(pgdat->node_id, size);
4306		if (!map)
4307			map = alloc_bootmem_node(pgdat, size);
4308		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4309	}
4310#ifndef CONFIG_NEED_MULTIPLE_NODES
4311	/*
4312	 * With no DISCONTIG, the global mem_map is just set as node 0's
4313	 */
4314	if (pgdat == NODE_DATA(0)) {
4315		mem_map = NODE_DATA(0)->node_mem_map;
4316#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4317		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4318			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4319#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4320	}
4321#endif
4322#endif /* CONFIG_FLAT_NODE_MEM_MAP */
4323}
4324
4325void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4326		unsigned long node_start_pfn, unsigned long *zholes_size)
4327{
4328	pg_data_t *pgdat = NODE_DATA(nid);
4329
4330	pgdat->node_id = nid;
4331	pgdat->node_start_pfn = node_start_pfn;
4332	calculate_node_totalpages(pgdat, zones_size, zholes_size);
4333
4334	alloc_node_mem_map(pgdat);
4335#ifdef CONFIG_FLAT_NODE_MEM_MAP
4336	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4337		nid, (unsigned long)pgdat,
4338		(unsigned long)pgdat->node_mem_map);
4339#endif
4340
4341	free_area_init_core(pgdat, zones_size, zholes_size);
4342}
4343
4344#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4345
4346#if MAX_NUMNODES > 1
4347/*
4348 * Figure out the number of possible node ids.
4349 */
4350static void __init setup_nr_node_ids(void)
4351{
4352	unsigned int node;
4353	unsigned int highest = 0;
4354
4355	for_each_node_mask(node, node_possible_map)
4356		highest = node;
4357	nr_node_ids = highest + 1;
4358}
4359#else
4360static inline void setup_nr_node_ids(void)
4361{
4362}
4363#endif
4364
4365/**
4366 * add_active_range - Register a range of PFNs backed by physical memory
4367 * @nid: The node ID the range resides on
4368 * @start_pfn: The start PFN of the available physical memory
4369 * @end_pfn: The end PFN of the available physical memory
4370 *
4371 * These ranges are stored in an early_node_map[] and later used by
4372 * free_area_init_nodes() to calculate zone sizes and holes. If the
4373 * range spans a memory hole, it is up to the architecture to ensure
4374 * the memory is not freed by the bootmem allocator. If possible
4375 * the range being registered will be merged with existing ranges.
4376 */
4377void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4378						unsigned long end_pfn)
4379{
4380	int i;
4381
4382	mminit_dprintk(MMINIT_TRACE, "memory_register",
4383			"Entering add_active_range(%d, %#lx, %#lx) "
4384			"%d entries of %d used\n",
4385			nid, start_pfn, end_pfn,
4386			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
4387
4388	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4389
4390	/* Merge with existing active regions if possible */
4391	for (i = 0; i < nr_nodemap_entries; i++) {
4392		if (early_node_map[i].nid != nid)
4393			continue;
4394
4395		/* Skip if an existing region covers this new one */
4396		if (start_pfn >= early_node_map[i].start_pfn &&
4397				end_pfn <= early_node_map[i].end_pfn)
4398			return;
4399
4400		/* Merge forward if suitable */
4401		if (start_pfn <= early_node_map[i].end_pfn &&
4402				end_pfn > early_node_map[i].end_pfn) {
4403			early_node_map[i].end_pfn = end_pfn;
4404			return;
4405		}
4406
4407		/* Merge backward if suitable */
4408		if (start_pfn < early_node_map[i].start_pfn &&
4409				end_pfn >= early_node_map[i].start_pfn) {
4410			early_node_map[i].start_pfn = start_pfn;
4411			return;
4412		}
4413	}
4414
4415	/* Check that early_node_map is large enough */
4416	if (i >= MAX_ACTIVE_REGIONS) {
4417		printk(KERN_CRIT "More than %d memory regions, truncating\n",
4418							MAX_ACTIVE_REGIONS);
4419		return;
4420	}
4421
4422	early_node_map[i].nid = nid;
4423	early_node_map[i].start_pfn = start_pfn;
4424	early_node_map[i].end_pfn = end_pfn;
4425	nr_nodemap_entries = i + 1;
4426}
4427
4428/**
4429 * remove_active_range - Shrink an existing registered range of PFNs
4430 * @nid: The node id the range is on that should be shrunk
4431 * @start_pfn: The new PFN of the range
4432 * @end_pfn: The new PFN of the range
4433 *
4434 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4435 * The map is kept near the end physical page range that has already been
4436 * registered. This function allows an arch to shrink an existing registered
4437 * range.
4438 */
4439void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4440				unsigned long end_pfn)
4441{
4442	int i, j;
4443	int removed = 0;
4444
4445	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4446			  nid, start_pfn, end_pfn);
4447
4448	/* Find the old active region end and shrink */
4449	for_each_active_range_index_in_nid(i, nid) {
4450		if (early_node_map[i].start_pfn >= start_pfn &&
4451		    early_node_map[i].end_pfn <= end_pfn) {
4452			/* clear it */
4453			early_node_map[i].start_pfn = 0;
4454			early_node_map[i].end_pfn = 0;
4455			removed = 1;
4456			continue;
4457		}
4458		if (early_node_map[i].start_pfn < start_pfn &&
4459		    early_node_map[i].end_pfn > start_pfn) {
4460			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4461			early_node_map[i].end_pfn = start_pfn;
4462			if (temp_end_pfn > end_pfn)
4463				add_active_range(nid, end_pfn, temp_end_pfn);
4464			continue;
4465		}
4466		if (early_node_map[i].start_pfn >= start_pfn &&
4467		    early_node_map[i].end_pfn > end_pfn &&
4468		    early_node_map[i].start_pfn < end_pfn) {
4469			early_node_map[i].start_pfn = end_pfn;
4470			continue;
4471		}
4472	}
4473
4474	if (!removed)
4475		return;
4476
4477	/* remove the blank ones */
4478	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4479		if (early_node_map[i].nid != nid)
4480			continue;
4481		if (early_node_map[i].end_pfn)
4482			continue;
4483		/* we found it, get rid of it */
4484		for (j = i; j < nr_nodemap_entries - 1; j++)
4485			memcpy(&early_node_map[j], &early_node_map[j+1],
4486				sizeof(early_node_map[j]));
4487		j = nr_nodemap_entries - 1;
4488		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4489		nr_nodemap_entries--;
4490	}
4491}
4492
4493/**
4494 * remove_all_active_ranges - Remove all currently registered regions
4495 *
4496 * During discovery, it may be found that a table like SRAT is invalid
4497 * and an alternative discovery method must be used. This function removes
4498 * all currently registered regions.
4499 */
4500void __init remove_all_active_ranges(void)
4501{
4502	memset(early_node_map, 0, sizeof(early_node_map));
4503	nr_nodemap_entries = 0;
4504}
4505
4506/* Compare two active node_active_regions */
4507static int __init cmp_node_active_region(const void *a, const void *b)
4508{
4509	struct node_active_region *arange = (struct node_active_region *)a;
4510	struct node_active_region *brange = (struct node_active_region *)b;
4511
4512	/* Done this way to avoid overflows */
4513	if (arange->start_pfn > brange->start_pfn)
4514		return 1;
4515	if (arange->start_pfn < brange->start_pfn)
4516		return -1;
4517
4518	return 0;
4519}
4520
4521/* sort the node_map by start_pfn */
4522void __init sort_node_map(void)
4523{
4524	sort(early_node_map, (size_t)nr_nodemap_entries,
4525			sizeof(struct node_active_region),
4526			cmp_node_active_region, NULL);
4527}
4528
4529/* Find the lowest pfn for a node */
4530static unsigned long __init find_min_pfn_for_node(int nid)
4531{
4532	int i;
4533	unsigned long min_pfn = ULONG_MAX;
4534
4535	/* Assuming a sorted map, the first range found has the starting pfn */
4536	for_each_active_range_index_in_nid(i, nid)
4537		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4538
4539	if (min_pfn == ULONG_MAX) {
4540		printk(KERN_WARNING
4541			"Could not find start_pfn for node %d\n", nid);
4542		return 0;
4543	}
4544
4545	return min_pfn;
4546}
4547
4548/**
4549 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4550 *
4551 * It returns the minimum PFN based on information provided via
4552 * add_active_range().
4553 */
4554unsigned long __init find_min_pfn_with_active_regions(void)
4555{
4556	return find_min_pfn_for_node(MAX_NUMNODES);
4557}
4558
4559/*
4560 * early_calculate_totalpages()
4561 * Sum pages in active regions for movable zone.
4562 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4563 */
4564static unsigned long __init early_calculate_totalpages(void)
4565{
4566	int i;
4567	unsigned long totalpages = 0;
4568
4569	for (i = 0; i < nr_nodemap_entries; i++) {
4570		unsigned long pages = early_node_map[i].end_pfn -
4571						early_node_map[i].start_pfn;
4572		totalpages += pages;
4573		if (pages)
4574			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4575	}
4576  	return totalpages;
4577}
4578
4579/*
4580 * Find the PFN the Movable zone begins in each node. Kernel memory
4581 * is spread evenly between nodes as long as the nodes have enough
4582 * memory. When they don't, some nodes will have more kernelcore than
4583 * others
4584 */
4585static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4586{
4587	int i, nid;
4588	unsigned long usable_startpfn;
4589	unsigned long kernelcore_node, kernelcore_remaining;
4590	/* save the state before borrow the nodemask */
4591	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4592	unsigned long totalpages = early_calculate_totalpages();
4593	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4594
4595	/*
4596	 * If movablecore was specified, calculate what size of
4597	 * kernelcore that corresponds so that memory usable for
4598	 * any allocation type is evenly spread. If both kernelcore
4599	 * and movablecore are specified, then the value of kernelcore
4600	 * will be used for required_kernelcore if it's greater than
4601	 * what movablecore would have allowed.
4602	 */
4603	if (required_movablecore) {
4604		unsigned long corepages;
4605
4606		/*
4607		 * Round-up so that ZONE_MOVABLE is at least as large as what
4608		 * was requested by the user
4609		 */
4610		required_movablecore =
4611			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4612		corepages = totalpages - required_movablecore;
4613
4614		required_kernelcore = max(required_kernelcore, corepages);
4615	}
4616
4617	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4618	if (!required_kernelcore)
4619		goto out;
4620
4621	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4622	find_usable_zone_for_movable();
4623	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4624
4625restart:
4626	/* Spread kernelcore memory as evenly as possible throughout nodes */
4627	kernelcore_node = required_kernelcore / usable_nodes;
4628	for_each_node_state(nid, N_HIGH_MEMORY) {
4629		/*
4630		 * Recalculate kernelcore_node if the division per node
4631		 * now exceeds what is necessary to satisfy the requested
4632		 * amount of memory for the kernel
4633		 */
4634		if (required_kernelcore < kernelcore_node)
4635			kernelcore_node = required_kernelcore / usable_nodes;
4636
4637		/*
4638		 * As the map is walked, we track how much memory is usable
4639		 * by the kernel using kernelcore_remaining. When it is
4640		 * 0, the rest of the node is usable by ZONE_MOVABLE
4641		 */
4642		kernelcore_remaining = kernelcore_node;
4643
4644		/* Go through each range of PFNs within this node */
4645		for_each_active_range_index_in_nid(i, nid) {
4646			unsigned long start_pfn, end_pfn;
4647			unsigned long size_pages;
4648
4649			start_pfn = max(early_node_map[i].start_pfn,
4650						zone_movable_pfn[nid]);
4651			end_pfn = early_node_map[i].end_pfn;
4652			if (start_pfn >= end_pfn)
4653				continue;
4654
4655			/* Account for what is only usable for kernelcore */
4656			if (start_pfn < usable_startpfn) {
4657				unsigned long kernel_pages;
4658				kernel_pages = min(end_pfn, usable_startpfn)
4659								- start_pfn;
4660
4661				kernelcore_remaining -= min(kernel_pages,
4662							kernelcore_remaining);
4663				required_kernelcore -= min(kernel_pages,
4664							required_kernelcore);
4665
4666				/* Continue if range is now fully accounted */
4667				if (end_pfn <= usable_startpfn) {
4668
4669					/*
4670					 * Push zone_movable_pfn to the end so
4671					 * that if we have to rebalance
4672					 * kernelcore across nodes, we will
4673					 * not double account here
4674					 */
4675					zone_movable_pfn[nid] = end_pfn;
4676					continue;
4677				}
4678				start_pfn = usable_startpfn;
4679			}
4680
4681			/*
4682			 * The usable PFN range for ZONE_MOVABLE is from
4683			 * start_pfn->end_pfn. Calculate size_pages as the
4684			 * number of pages used as kernelcore
4685			 */
4686			size_pages = end_pfn - start_pfn;
4687			if (size_pages > kernelcore_remaining)
4688				size_pages = kernelcore_remaining;
4689			zone_movable_pfn[nid] = start_pfn + size_pages;
4690
4691			/*
4692			 * Some kernelcore has been met, update counts and
4693			 * break if the kernelcore for this node has been
4694			 * satisified
4695			 */
4696			required_kernelcore -= min(required_kernelcore,
4697								size_pages);
4698			kernelcore_remaining -= size_pages;
4699			if (!kernelcore_remaining)
4700				break;
4701		}
4702	}
4703
4704	/*
4705	 * If there is still required_kernelcore, we do another pass with one
4706	 * less node in the count. This will push zone_movable_pfn[nid] further
4707	 * along on the nodes that still have memory until kernelcore is
4708	 * satisified
4709	 */
4710	usable_nodes--;
4711	if (usable_nodes && required_kernelcore > usable_nodes)
4712		goto restart;
4713
4714	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4715	for (nid = 0; nid < MAX_NUMNODES; nid++)
4716		zone_movable_pfn[nid] =
4717			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4718
4719out:
4720	/* restore the node_state */
4721	node_states[N_HIGH_MEMORY] = saved_node_state;
4722}
4723
4724/* Any regular memory on that node ? */
4725static void check_for_regular_memory(pg_data_t *pgdat)
4726{
4727#ifdef CONFIG_HIGHMEM
4728	enum zone_type zone_type;
4729
4730	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4731		struct zone *zone = &pgdat->node_zones[zone_type];
4732		if (zone->present_pages)
4733			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4734	}
4735#endif
4736}
4737
4738/**
4739 * free_area_init_nodes - Initialise all pg_data_t and zone data
4740 * @max_zone_pfn: an array of max PFNs for each zone
4741 *
4742 * This will call free_area_init_node() for each active node in the system.
4743 * Using the page ranges provided by add_active_range(), the size of each
4744 * zone in each node and their holes is calculated. If the maximum PFN
4745 * between two adjacent zones match, it is assumed that the zone is empty.
4746 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4747 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4748 * starts where the previous one ended. For example, ZONE_DMA32 starts
4749 * at arch_max_dma_pfn.
4750 */
4751void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4752{
4753	unsigned long nid;
4754	int i;
4755
4756	/* Sort early_node_map as initialisation assumes it is sorted */
4757	sort_node_map();
4758
4759	/* Record where the zone boundaries are */
4760	memset(arch_zone_lowest_possible_pfn, 0,
4761				sizeof(arch_zone_lowest_possible_pfn));
4762	memset(arch_zone_highest_possible_pfn, 0,
4763				sizeof(arch_zone_highest_possible_pfn));
4764	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4765	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4766	for (i = 1; i < MAX_NR_ZONES; i++) {
4767		if (i == ZONE_MOVABLE)
4768			continue;
4769		arch_zone_lowest_possible_pfn[i] =
4770			arch_zone_highest_possible_pfn[i-1];
4771		arch_zone_highest_possible_pfn[i] =
4772			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4773	}
4774	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4775	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4776
4777	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4778	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4779	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4780
4781	/* Print out the zone ranges */
4782	printk("Zone PFN ranges:\n");
4783	for (i = 0; i < MAX_NR_ZONES; i++) {
4784		if (i == ZONE_MOVABLE)
4785			continue;
4786		printk("  %-8s ", zone_names[i]);
4787		if (arch_zone_lowest_possible_pfn[i] ==
4788				arch_zone_highest_possible_pfn[i])
4789			printk("empty\n");
4790		else
4791			printk("%0#10lx -> %0#10lx\n",
4792				arch_zone_lowest_possible_pfn[i],
4793				arch_zone_highest_possible_pfn[i]);
4794	}
4795
4796	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4797	printk("Movable zone start PFN for each node\n");
4798	for (i = 0; i < MAX_NUMNODES; i++) {
4799		if (zone_movable_pfn[i])
4800			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4801	}
4802
4803	/* Print out the early_node_map[] */
4804	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4805	for (i = 0; i < nr_nodemap_entries; i++)
4806		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4807						early_node_map[i].start_pfn,
4808						early_node_map[i].end_pfn);
4809
4810	/* Initialise every node */
4811	mminit_verify_pageflags_layout();
4812	setup_nr_node_ids();
4813	for_each_online_node(nid) {
4814		pg_data_t *pgdat = NODE_DATA(nid);
4815		free_area_init_node(nid, NULL,
4816				find_min_pfn_for_node(nid), NULL);
4817
4818		/* Any memory on that node */
4819		if (pgdat->node_present_pages)
4820			node_set_state(nid, N_HIGH_MEMORY);
4821		check_for_regular_memory(pgdat);
4822	}
4823}
4824
4825static int __init cmdline_parse_core(char *p, unsigned long *core)
4826{
4827	unsigned long long coremem;
4828	if (!p)
4829		return -EINVAL;
4830
4831	coremem = memparse(p, &p);
4832	*core = coremem >> PAGE_SHIFT;
4833
4834	/* Paranoid check that UL is enough for the coremem value */
4835	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4836
4837	return 0;
4838}
4839
4840/*
4841 * kernelcore=size sets the amount of memory for use for allocations that
4842 * cannot be reclaimed or migrated.
4843 */
4844static int __init cmdline_parse_kernelcore(char *p)
4845{
4846	return cmdline_parse_core(p, &required_kernelcore);
4847}
4848
4849/*
4850 * movablecore=size sets the amount of memory for use for allocations that
4851 * can be reclaimed or migrated.
4852 */
4853static int __init cmdline_parse_movablecore(char *p)
4854{
4855	return cmdline_parse_core(p, &required_movablecore);
4856}
4857
4858early_param("kernelcore", cmdline_parse_kernelcore);
4859early_param("movablecore", cmdline_parse_movablecore);
4860
4861#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4862
4863/**
4864 * set_dma_reserve - set the specified number of pages reserved in the first zone
4865 * @new_dma_reserve: The number of pages to mark reserved
4866 *
4867 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4868 * In the DMA zone, a significant percentage may be consumed by kernel image
4869 * and other unfreeable allocations which can skew the watermarks badly. This
4870 * function may optionally be used to account for unfreeable pages in the
4871 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4872 * smaller per-cpu batchsize.
4873 */
4874void __init set_dma_reserve(unsigned long new_dma_reserve)
4875{
4876	dma_reserve = new_dma_reserve;
4877}
4878
4879void __init free_area_init(unsigned long *zones_size)
4880{
4881	free_area_init_node(0, zones_size,
4882			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4883}
4884
4885static int page_alloc_cpu_notify(struct notifier_block *self,
4886				 unsigned long action, void *hcpu)
4887{
4888	int cpu = (unsigned long)hcpu;
4889
4890	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4891		drain_pages(cpu);
4892
4893		/*
4894		 * Spill the event counters of the dead processor
4895		 * into the current processors event counters.
4896		 * This artificially elevates the count of the current
4897		 * processor.
4898		 */
4899		vm_events_fold_cpu(cpu);
4900
4901		/*
4902		 * Zero the differential counters of the dead processor
4903		 * so that the vm statistics are consistent.
4904		 *
4905		 * This is only okay since the processor is dead and cannot
4906		 * race with what we are doing.
4907		 */
4908		refresh_cpu_vm_stats(cpu);
4909	}
4910	return NOTIFY_OK;
4911}
4912
4913void __init page_alloc_init(void)
4914{
4915	hotcpu_notifier(page_alloc_cpu_notify, 0);
4916}
4917
4918/*
4919 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4920 *	or min_free_kbytes changes.
4921 */
4922static void calculate_totalreserve_pages(void)
4923{
4924	struct pglist_data *pgdat;
4925	unsigned long reserve_pages = 0;
4926	enum zone_type i, j;
4927
4928	for_each_online_pgdat(pgdat) {
4929		for (i = 0; i < MAX_NR_ZONES; i++) {
4930			struct zone *zone = pgdat->node_zones + i;
4931			unsigned long max = 0;
4932
4933			/* Find valid and maximum lowmem_reserve in the zone */
4934			for (j = i; j < MAX_NR_ZONES; j++) {
4935				if (zone->lowmem_reserve[j] > max)
4936					max = zone->lowmem_reserve[j];
4937			}
4938
4939			/* we treat the high watermark as reserved pages. */
4940			max += high_wmark_pages(zone);
4941
4942			if (max > zone->present_pages)
4943				max = zone->present_pages;
4944			reserve_pages += max;
4945		}
4946	}
4947	totalreserve_pages = reserve_pages;
4948}
4949
4950/*
4951 * setup_per_zone_lowmem_reserve - called whenever
4952 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4953 *	has a correct pages reserved value, so an adequate number of
4954 *	pages are left in the zone after a successful __alloc_pages().
4955 */
4956static void setup_per_zone_lowmem_reserve(void)
4957{
4958	struct pglist_data *pgdat;
4959	enum zone_type j, idx;
4960
4961	for_each_online_pgdat(pgdat) {
4962		for (j = 0; j < MAX_NR_ZONES; j++) {
4963			struct zone *zone = pgdat->node_zones + j;
4964			unsigned long present_pages = zone->present_pages;
4965
4966			zone->lowmem_reserve[j] = 0;
4967
4968			idx = j;
4969			while (idx) {
4970				struct zone *lower_zone;
4971
4972				idx--;
4973
4974				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4975					sysctl_lowmem_reserve_ratio[idx] = 1;
4976
4977				lower_zone = pgdat->node_zones + idx;
4978				lower_zone->lowmem_reserve[j] = present_pages /
4979					sysctl_lowmem_reserve_ratio[idx];
4980				present_pages += lower_zone->present_pages;
4981			}
4982		}
4983	}
4984
4985	/* update totalreserve_pages */
4986	calculate_totalreserve_pages();
4987}
4988
4989/**
4990 * setup_per_zone_wmarks - called when min_free_kbytes changes
4991 * or when memory is hot-{added|removed}
4992 *
4993 * Ensures that the watermark[min,low,high] values for each zone are set
4994 * correctly with respect to min_free_kbytes.
4995 */
4996void setup_per_zone_wmarks(void)
4997{
4998	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4999	unsigned long lowmem_pages = 0;
5000	struct zone *zone;
5001	unsigned long flags;
5002
5003	/* Calculate total number of !ZONE_HIGHMEM pages */
5004	for_each_zone(zone) {
5005		if (!is_highmem(zone))
5006			lowmem_pages += zone->present_pages;
5007	}
5008
5009	for_each_zone(zone) {
5010		u64 tmp;
5011
5012		spin_lock_irqsave(&zone->lock, flags);
5013		tmp = (u64)pages_min * zone->present_pages;
5014		do_div(tmp, lowmem_pages);
5015		if (is_highmem(zone)) {
5016			/*
5017			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5018			 * need highmem pages, so cap pages_min to a small
5019			 * value here.
5020			 *
5021			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5022			 * deltas controls asynch page reclaim, and so should
5023			 * not be capped for highmem.
5024			 */
5025			int min_pages;
5026
5027			min_pages = zone->present_pages / 1024;
5028			if (min_pages < SWAP_CLUSTER_MAX)
5029				min_pages = SWAP_CLUSTER_MAX;
5030			if (min_pages > 128)
5031				min_pages = 128;
5032			zone->watermark[WMARK_MIN] = min_pages;
5033		} else {
5034			/*
5035			 * If it's a lowmem zone, reserve a number of pages
5036			 * proportionate to the zone's size.
5037			 */
5038			zone->watermark[WMARK_MIN] = tmp;
5039		}
5040
5041		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
5042		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5043		setup_zone_migrate_reserve(zone);
5044		spin_unlock_irqrestore(&zone->lock, flags);
5045	}
5046
5047	/* update totalreserve_pages */
5048	calculate_totalreserve_pages();
5049}
5050
5051/*
5052 * The inactive anon list should be small enough that the VM never has to
5053 * do too much work, but large enough that each inactive page has a chance
5054 * to be referenced again before it is swapped out.
5055 *
5056 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5057 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5058 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5059 * the anonymous pages are kept on the inactive list.
5060 *
5061 * total     target    max
5062 * memory    ratio     inactive anon
5063 * -------------------------------------
5064 *   10MB       1         5MB
5065 *  100MB       1        50MB
5066 *    1GB       3       250MB
5067 *   10GB      10       0.9GB
5068 *  100GB      31         3GB
5069 *    1TB     101        10GB
5070 *   10TB     320        32GB
5071 */
5072void calculate_zone_inactive_ratio(struct zone *zone)
5073{
5074	unsigned int gb, ratio;
5075
5076	/* Zone size in gigabytes */
5077	gb = zone->present_pages >> (30 - PAGE_SHIFT);
5078	if (gb)
5079		ratio = int_sqrt(10 * gb);
5080	else
5081		ratio = 1;
5082
5083	zone->inactive_ratio = ratio;
5084}
5085
5086static void __init setup_per_zone_inactive_ratio(void)
5087{
5088	struct zone *zone;
5089
5090	for_each_zone(zone)
5091		calculate_zone_inactive_ratio(zone);
5092}
5093
5094/*
5095 * Initialise min_free_kbytes.
5096 *
5097 * For small machines we want it small (128k min).  For large machines
5098 * we want it large (64MB max).  But it is not linear, because network
5099 * bandwidth does not increase linearly with machine size.  We use
5100 *
5101 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5102 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
5103 *
5104 * which yields
5105 *
5106 * 16MB:	512k
5107 * 32MB:	724k
5108 * 64MB:	1024k
5109 * 128MB:	1448k
5110 * 256MB:	2048k
5111 * 512MB:	2896k
5112 * 1024MB:	4096k
5113 * 2048MB:	5792k
5114 * 4096MB:	8192k
5115 * 8192MB:	11584k
5116 * 16384MB:	16384k
5117 */
5118static int __init init_per_zone_wmark_min(void)
5119{
5120	unsigned long lowmem_kbytes;
5121
5122	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5123
5124	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5125	if (min_free_kbytes < 128)
5126		min_free_kbytes = 128;
5127	if (min_free_kbytes > 65536)
5128		min_free_kbytes = 65536;
5129	setup_per_zone_wmarks();
5130	setup_per_zone_lowmem_reserve();
5131	setup_per_zone_inactive_ratio();
5132	return 0;
5133}
5134module_init(init_per_zone_wmark_min)
5135
5136/*
5137 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5138 *	that we can call two helper functions whenever min_free_kbytes
5139 *	changes.
5140 */
5141int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5142	void __user *buffer, size_t *length, loff_t *ppos)
5143{
5144	proc_dointvec(table, write, buffer, length, ppos);
5145	if (write)
5146		setup_per_zone_wmarks();
5147	return 0;
5148}
5149
5150#ifdef CONFIG_NUMA
5151int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5152	void __user *buffer, size_t *length, loff_t *ppos)
5153{
5154	struct zone *zone;
5155	int rc;
5156
5157	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5158	if (rc)
5159		return rc;
5160
5161	for_each_zone(zone)
5162		zone->min_unmapped_pages = (zone->present_pages *
5163				sysctl_min_unmapped_ratio) / 100;
5164	return 0;
5165}
5166
5167int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5168	void __user *buffer, size_t *length, loff_t *ppos)
5169{
5170	struct zone *zone;
5171	int rc;
5172
5173	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5174	if (rc)
5175		return rc;
5176
5177	for_each_zone(zone)
5178		zone->min_slab_pages = (zone->present_pages *
5179				sysctl_min_slab_ratio) / 100;
5180	return 0;
5181}
5182#endif
5183
5184/*
5185 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5186 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5187 *	whenever sysctl_lowmem_reserve_ratio changes.
5188 *
5189 * The reserve ratio obviously has absolutely no relation with the
5190 * minimum watermarks. The lowmem reserve ratio can only make sense
5191 * if in function of the boot time zone sizes.
5192 */
5193int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5194	void __user *buffer, size_t *length, loff_t *ppos)
5195{
5196	proc_dointvec_minmax(table, write, buffer, length, ppos);
5197	setup_per_zone_lowmem_reserve();
5198	return 0;
5199}
5200
5201/*
5202 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5203 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
5204 * can have before it gets flushed back to buddy allocator.
5205 */
5206
5207int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5208	void __user *buffer, size_t *length, loff_t *ppos)
5209{
5210	struct zone *zone;
5211	unsigned int cpu;
5212	int ret;
5213
5214	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5215	if (!write || (ret == -EINVAL))
5216		return ret;
5217	for_each_populated_zone(zone) {
5218		for_each_possible_cpu(cpu) {
5219			unsigned long  high;
5220			high = zone->present_pages / percpu_pagelist_fraction;
5221			setup_pagelist_highmark(
5222				per_cpu_ptr(zone->pageset, cpu), high);
5223		}
5224	}
5225	return 0;
5226}
5227
5228int hashdist = HASHDIST_DEFAULT;
5229
5230#ifdef CONFIG_NUMA
5231static int __init set_hashdist(char *str)
5232{
5233	if (!str)
5234		return 0;
5235	hashdist = simple_strtoul(str, &str, 0);
5236	return 1;
5237}
5238__setup("hashdist=", set_hashdist);
5239#endif
5240
5241/*
5242 * allocate a large system hash table from bootmem
5243 * - it is assumed that the hash table must contain an exact power-of-2
5244 *   quantity of entries
5245 * - limit is the number of hash buckets, not the total allocation size
5246 */
5247void *__init alloc_large_system_hash(const char *tablename,
5248				     unsigned long bucketsize,
5249				     unsigned long numentries,
5250				     int scale,
5251				     int flags,
5252				     unsigned int *_hash_shift,
5253				     unsigned int *_hash_mask,
5254				     unsigned long limit)
5255{
5256	unsigned long long max = limit;
5257	unsigned long log2qty, size;
5258	void *table = NULL;
5259
5260	/* allow the kernel cmdline to have a say */
5261	if (!numentries) {
5262		/* round applicable memory size up to nearest megabyte */
5263		numentries = nr_kernel_pages;
5264		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5265		numentries >>= 20 - PAGE_SHIFT;
5266		numentries <<= 20 - PAGE_SHIFT;
5267
5268		/* limit to 1 bucket per 2^scale bytes of low memory */
5269		if (scale > PAGE_SHIFT)
5270			numentries >>= (scale - PAGE_SHIFT);
5271		else
5272			numentries <<= (PAGE_SHIFT - scale);
5273
5274		/* Make sure we've got at least a 0-order allocation.. */
5275		if (unlikely(flags & HASH_SMALL)) {
5276			/* Makes no sense without HASH_EARLY */
5277			WARN_ON(!(flags & HASH_EARLY));
5278			if (!(numentries >> *_hash_shift)) {
5279				numentries = 1UL << *_hash_shift;
5280				BUG_ON(!numentries);
5281			}
5282		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5283			numentries = PAGE_SIZE / bucketsize;
5284	}
5285	numentries = roundup_pow_of_two(numentries);
5286
5287	/* limit allocation size to 1/16 total memory by default */
5288	if (max == 0) {
5289		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5290		do_div(max, bucketsize);
5291	}
5292
5293	if (numentries > max)
5294		numentries = max;
5295
5296	log2qty = ilog2(numentries);
5297
5298	do {
5299		size = bucketsize << log2qty;
5300		if (flags & HASH_EARLY)
5301			table = alloc_bootmem_nopanic(size);
5302		else if (hashdist)
5303			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5304		else {
5305			/*
5306			 * If bucketsize is not a power-of-two, we may free
5307			 * some pages at the end of hash table which
5308			 * alloc_pages_exact() automatically does
5309			 */
5310			if (get_order(size) < MAX_ORDER) {
5311				table = alloc_pages_exact(size, GFP_ATOMIC);
5312				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5313			}
5314		}
5315	} while (!table && size > PAGE_SIZE && --log2qty);
5316
5317	if (!table)
5318		panic("Failed to allocate %s hash table\n", tablename);
5319
5320	printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5321	       tablename,
5322	       (1UL << log2qty),
5323	       ilog2(size) - PAGE_SHIFT,
5324	       size);
5325
5326	if (_hash_shift)
5327		*_hash_shift = log2qty;
5328	if (_hash_mask)
5329		*_hash_mask = (1 << log2qty) - 1;
5330
5331	return table;
5332}
5333
5334/* Return a pointer to the bitmap storing bits affecting a block of pages */
5335static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5336							unsigned long pfn)
5337{
5338#ifdef CONFIG_SPARSEMEM
5339	return __pfn_to_section(pfn)->pageblock_flags;
5340#else
5341	return zone->pageblock_flags;
5342#endif /* CONFIG_SPARSEMEM */
5343}
5344
5345static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5346{
5347#ifdef CONFIG_SPARSEMEM
5348	pfn &= (PAGES_PER_SECTION-1);
5349	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5350#else
5351	pfn = pfn - zone->zone_start_pfn;
5352	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5353#endif /* CONFIG_SPARSEMEM */
5354}
5355
5356/**
5357 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5358 * @page: The page within the block of interest
5359 * @start_bitidx: The first bit of interest to retrieve
5360 * @end_bitidx: The last bit of interest
5361 * returns pageblock_bits flags
5362 */
5363unsigned long get_pageblock_flags_group(struct page *page,
5364					int start_bitidx, int end_bitidx)
5365{
5366	struct zone *zone;
5367	unsigned long *bitmap;
5368	unsigned long pfn, bitidx;
5369	unsigned long flags = 0;
5370	unsigned long value = 1;
5371
5372	zone = page_zone(page);
5373	pfn = page_to_pfn(page);
5374	bitmap = get_pageblock_bitmap(zone, pfn);
5375	bitidx = pfn_to_bitidx(zone, pfn);
5376
5377	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5378		if (test_bit(bitidx + start_bitidx, bitmap))
5379			flags |= value;
5380
5381	return flags;
5382}
5383
5384/**
5385 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5386 * @page: The page within the block of interest
5387 * @start_bitidx: The first bit of interest
5388 * @end_bitidx: The last bit of interest
5389 * @flags: The flags to set
5390 */
5391void set_pageblock_flags_group(struct page *page, unsigned long flags,
5392					int start_bitidx, int end_bitidx)
5393{
5394	struct zone *zone;
5395	unsigned long *bitmap;
5396	unsigned long pfn, bitidx;
5397	unsigned long value = 1;
5398
5399	zone = page_zone(page);
5400	pfn = page_to_pfn(page);
5401	bitmap = get_pageblock_bitmap(zone, pfn);
5402	bitidx = pfn_to_bitidx(zone, pfn);
5403	VM_BUG_ON(pfn < zone->zone_start_pfn);
5404	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5405
5406	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5407		if (flags & value)
5408			__set_bit(bitidx + start_bitidx, bitmap);
5409		else
5410			__clear_bit(bitidx + start_bitidx, bitmap);
5411}
5412
5413/*
5414 * This is designed as sub function...plz see page_isolation.c also.
5415 * set/clear page block's type to be ISOLATE.
5416 * page allocater never alloc memory from ISOLATE block.
5417 */
5418
5419static int
5420__count_immobile_pages(struct zone *zone, struct page *page, int count)
5421{
5422	unsigned long pfn, iter, found;
5423	/*
5424	 * For avoiding noise data, lru_add_drain_all() should be called
5425	 * If ZONE_MOVABLE, the zone never contains immobile pages
5426	 */
5427	if (zone_idx(zone) == ZONE_MOVABLE)
5428		return true;
5429
5430	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5431		return true;
5432
5433	pfn = page_to_pfn(page);
5434	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5435		unsigned long check = pfn + iter;
5436
5437		if (!pfn_valid_within(check))
5438			continue;
5439
5440		page = pfn_to_page(check);
5441		if (!page_count(page)) {
5442			if (PageBuddy(page))
5443				iter += (1 << page_order(page)) - 1;
5444			continue;
5445		}
5446		if (!PageLRU(page))
5447			found++;
5448		/*
5449		 * If there are RECLAIMABLE pages, we need to check it.
5450		 * But now, memory offline itself doesn't call shrink_slab()
5451		 * and it still to be fixed.
5452		 */
5453		/*
5454		 * If the page is not RAM, page_count()should be 0.
5455		 * we don't need more check. This is an _used_ not-movable page.
5456		 *
5457		 * The problematic thing here is PG_reserved pages. PG_reserved
5458		 * is set to both of a memory hole page and a _used_ kernel
5459		 * page at boot.
5460		 */
5461		if (found > count)
5462			return false;
5463	}
5464	return true;
5465}
5466
5467bool is_pageblock_removable_nolock(struct page *page)
5468{
5469	struct zone *zone = page_zone(page);
5470	return __count_immobile_pages(zone, page, 0);
5471}
5472
5473int set_migratetype_isolate(struct page *page)
5474{
5475	struct zone *zone;
5476	unsigned long flags, pfn;
5477	struct memory_isolate_notify arg;
5478	int notifier_ret;
5479	int ret = -EBUSY;
5480	int zone_idx;
5481
5482	zone = page_zone(page);
5483	zone_idx = zone_idx(zone);
5484
5485	spin_lock_irqsave(&zone->lock, flags);
5486
5487	pfn = page_to_pfn(page);
5488	arg.start_pfn = pfn;
5489	arg.nr_pages = pageblock_nr_pages;
5490	arg.pages_found = 0;
5491
5492	/*
5493	 * It may be possible to isolate a pageblock even if the
5494	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5495	 * notifier chain is used by balloon drivers to return the
5496	 * number of pages in a range that are held by the balloon
5497	 * driver to shrink memory. If all the pages are accounted for
5498	 * by balloons, are free, or on the LRU, isolation can continue.
5499	 * Later, for example, when memory hotplug notifier runs, these
5500	 * pages reported as "can be isolated" should be isolated(freed)
5501	 * by the balloon driver through the memory notifier chain.
5502	 */
5503	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5504	notifier_ret = notifier_to_errno(notifier_ret);
5505	if (notifier_ret)
5506		goto out;
5507	/*
5508	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5509	 * We just check MOVABLE pages.
5510	 */
5511	if (__count_immobile_pages(zone, page, arg.pages_found))
5512		ret = 0;
5513
5514	/*
5515	 * immobile means "not-on-lru" paes. If immobile is larger than
5516	 * removable-by-driver pages reported by notifier, we'll fail.
5517	 */
5518
5519out:
5520	if (!ret) {
5521		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5522		move_freepages_block(zone, page, MIGRATE_ISOLATE);
5523	}
5524
5525	spin_unlock_irqrestore(&zone->lock, flags);
5526	if (!ret)
5527		drain_all_pages();
5528	return ret;
5529}
5530
5531void unset_migratetype_isolate(struct page *page)
5532{
5533	struct zone *zone;
5534	unsigned long flags;
5535	zone = page_zone(page);
5536	spin_lock_irqsave(&zone->lock, flags);
5537	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5538		goto out;
5539	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5540	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5541out:
5542	spin_unlock_irqrestore(&zone->lock, flags);
5543}
5544
5545#ifdef CONFIG_MEMORY_HOTREMOVE
5546/*
5547 * All pages in the range must be isolated before calling this.
5548 */
5549void
5550__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5551{
5552	struct page *page;
5553	struct zone *zone;
5554	int order, i;
5555	unsigned long pfn;
5556	unsigned long flags;
5557	/* find the first valid pfn */
5558	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5559		if (pfn_valid(pfn))
5560			break;
5561	if (pfn == end_pfn)
5562		return;
5563	zone = page_zone(pfn_to_page(pfn));
5564	spin_lock_irqsave(&zone->lock, flags);
5565	pfn = start_pfn;
5566	while (pfn < end_pfn) {
5567		if (!pfn_valid(pfn)) {
5568			pfn++;
5569			continue;
5570		}
5571		page = pfn_to_page(pfn);
5572		BUG_ON(page_count(page));
5573		BUG_ON(!PageBuddy(page));
5574		order = page_order(page);
5575#ifdef CONFIG_DEBUG_VM
5576		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5577		       pfn, 1 << order, end_pfn);
5578#endif
5579		list_del(&page->lru);
5580		rmv_page_order(page);
5581		zone->free_area[order].nr_free--;
5582		__mod_zone_page_state(zone, NR_FREE_PAGES,
5583				      - (1UL << order));
5584		for (i = 0; i < (1 << order); i++)
5585			SetPageReserved((page+i));
5586		pfn += (1 << order);
5587	}
5588	spin_unlock_irqrestore(&zone->lock, flags);
5589}
5590#endif
5591
5592#ifdef CONFIG_MEMORY_FAILURE
5593bool is_free_buddy_page(struct page *page)
5594{
5595	struct zone *zone = page_zone(page);
5596	unsigned long pfn = page_to_pfn(page);
5597	unsigned long flags;
5598	int order;
5599
5600	spin_lock_irqsave(&zone->lock, flags);
5601	for (order = 0; order < MAX_ORDER; order++) {
5602		struct page *page_head = page - (pfn & ((1 << order) - 1));
5603
5604		if (PageBuddy(page_head) && page_order(page_head) >= order)
5605			break;
5606	}
5607	spin_unlock_irqrestore(&zone->lock, flags);
5608
5609	return order < MAX_ORDER;
5610}
5611#endif
5612
5613static struct trace_print_flags pageflag_names[] = {
5614	{1UL << PG_locked,		"locked"	},
5615	{1UL << PG_error,		"error"		},
5616	{1UL << PG_referenced,		"referenced"	},
5617	{1UL << PG_uptodate,		"uptodate"	},
5618	{1UL << PG_dirty,		"dirty"		},
5619	{1UL << PG_lru,			"lru"		},
5620	{1UL << PG_active,		"active"	},
5621	{1UL << PG_slab,		"slab"		},
5622	{1UL << PG_owner_priv_1,	"owner_priv_1"	},
5623	{1UL << PG_arch_1,		"arch_1"	},
5624	{1UL << PG_reserved,		"reserved"	},
5625	{1UL << PG_private,		"private"	},
5626	{1UL << PG_private_2,		"private_2"	},
5627	{1UL << PG_writeback,		"writeback"	},
5628#ifdef CONFIG_PAGEFLAGS_EXTENDED
5629	{1UL << PG_head,		"head"		},
5630	{1UL << PG_tail,		"tail"		},
5631#else
5632	{1UL << PG_compound,		"compound"	},
5633#endif
5634	{1UL << PG_swapcache,		"swapcache"	},
5635	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
5636	{1UL << PG_reclaim,		"reclaim"	},
5637	{1UL << PG_swapbacked,		"swapbacked"	},
5638	{1UL << PG_unevictable,		"unevictable"	},
5639#ifdef CONFIG_MMU
5640	{1UL << PG_mlocked,		"mlocked"	},
5641#endif
5642#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5643	{1UL << PG_uncached,		"uncached"	},
5644#endif
5645#ifdef CONFIG_MEMORY_FAILURE
5646	{1UL << PG_hwpoison,		"hwpoison"	},
5647#endif
5648	{-1UL,				NULL		},
5649};
5650
5651static void dump_page_flags(unsigned long flags)
5652{
5653	const char *delim = "";
5654	unsigned long mask;
5655	int i;
5656
5657	printk(KERN_ALERT "page flags: %#lx(", flags);
5658
5659	/* remove zone id */
5660	flags &= (1UL << NR_PAGEFLAGS) - 1;
5661
5662	for (i = 0; pageflag_names[i].name && flags; i++) {
5663
5664		mask = pageflag_names[i].mask;
5665		if ((flags & mask) != mask)
5666			continue;
5667
5668		flags &= ~mask;
5669		printk("%s%s", delim, pageflag_names[i].name);
5670		delim = "|";
5671	}
5672
5673	/* check for left over flags */
5674	if (flags)
5675		printk("%s%#lx", delim, flags);
5676
5677	printk(")\n");
5678}
5679
5680void dump_page(struct page *page)
5681{
5682	printk(KERN_ALERT
5683	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5684		page, atomic_read(&page->_count), page_mapcount(page),
5685		page->mapping, page->index);
5686	dump_page_flags(page->flags);
5687}
5688