page_alloc.c revision b2588c4b4c3c075e9b45d61065d86c60de2b6441
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25#include <linux/compiler.h>
26#include <linux/kernel.h>
27#include <linux/kmemcheck.h>
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
33#include <linux/ratelimit.h>
34#include <linux/oom.h>
35#include <linux/notifier.h>
36#include <linux/topology.h>
37#include <linux/sysctl.h>
38#include <linux/cpu.h>
39#include <linux/cpuset.h>
40#include <linux/memory_hotplug.h>
41#include <linux/nodemask.h>
42#include <linux/vmalloc.h>
43#include <linux/vmstat.h>
44#include <linux/mempolicy.h>
45#include <linux/stop_machine.h>
46#include <linux/sort.h>
47#include <linux/pfn.h>
48#include <linux/backing-dev.h>
49#include <linux/fault-inject.h>
50#include <linux/page-isolation.h>
51#include <linux/page_cgroup.h>
52#include <linux/debugobjects.h>
53#include <linux/kmemleak.h>
54#include <linux/memory.h>
55#include <linux/compaction.h>
56#include <trace/events/kmem.h>
57#include <linux/ftrace_event.h>
58#include <linux/memcontrol.h>
59#include <linux/prefetch.h>
60
61#include <asm/tlbflush.h>
62#include <asm/div64.h>
63#include "internal.h"
64
65#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
66DEFINE_PER_CPU(int, numa_node);
67EXPORT_PER_CPU_SYMBOL(numa_node);
68#endif
69
70#ifdef CONFIG_HAVE_MEMORYLESS_NODES
71/*
72 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
73 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
74 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
75 * defined in <linux/topology.h>.
76 */
77DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
78EXPORT_PER_CPU_SYMBOL(_numa_mem_);
79#endif
80
81/*
82 * Array of node states.
83 */
84nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
85	[N_POSSIBLE] = NODE_MASK_ALL,
86	[N_ONLINE] = { { [0] = 1UL } },
87#ifndef CONFIG_NUMA
88	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
89#ifdef CONFIG_HIGHMEM
90	[N_HIGH_MEMORY] = { { [0] = 1UL } },
91#endif
92	[N_CPU] = { { [0] = 1UL } },
93#endif	/* NUMA */
94};
95EXPORT_SYMBOL(node_states);
96
97unsigned long totalram_pages __read_mostly;
98unsigned long totalreserve_pages __read_mostly;
99int percpu_pagelist_fraction;
100gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
101
102#ifdef CONFIG_PM_SLEEP
103/*
104 * The following functions are used by the suspend/hibernate code to temporarily
105 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
106 * while devices are suspended.  To avoid races with the suspend/hibernate code,
107 * they should always be called with pm_mutex held (gfp_allowed_mask also should
108 * only be modified with pm_mutex held, unless the suspend/hibernate code is
109 * guaranteed not to run in parallel with that modification).
110 */
111
112static gfp_t saved_gfp_mask;
113
114void pm_restore_gfp_mask(void)
115{
116	WARN_ON(!mutex_is_locked(&pm_mutex));
117	if (saved_gfp_mask) {
118		gfp_allowed_mask = saved_gfp_mask;
119		saved_gfp_mask = 0;
120	}
121}
122
123void pm_restrict_gfp_mask(void)
124{
125	WARN_ON(!mutex_is_locked(&pm_mutex));
126	WARN_ON(saved_gfp_mask);
127	saved_gfp_mask = gfp_allowed_mask;
128	gfp_allowed_mask &= ~GFP_IOFS;
129}
130#endif /* CONFIG_PM_SLEEP */
131
132#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
133int pageblock_order __read_mostly;
134#endif
135
136static void __free_pages_ok(struct page *page, unsigned int order);
137
138/*
139 * results with 256, 32 in the lowmem_reserve sysctl:
140 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
141 *	1G machine -> (16M dma, 784M normal, 224M high)
142 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
143 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
144 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
145 *
146 * TBD: should special case ZONE_DMA32 machines here - in those we normally
147 * don't need any ZONE_NORMAL reservation
148 */
149int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
150#ifdef CONFIG_ZONE_DMA
151	 256,
152#endif
153#ifdef CONFIG_ZONE_DMA32
154	 256,
155#endif
156#ifdef CONFIG_HIGHMEM
157	 32,
158#endif
159	 32,
160};
161
162EXPORT_SYMBOL(totalram_pages);
163
164static char * const zone_names[MAX_NR_ZONES] = {
165#ifdef CONFIG_ZONE_DMA
166	 "DMA",
167#endif
168#ifdef CONFIG_ZONE_DMA32
169	 "DMA32",
170#endif
171	 "Normal",
172#ifdef CONFIG_HIGHMEM
173	 "HighMem",
174#endif
175	 "Movable",
176};
177
178int min_free_kbytes = 1024;
179
180static unsigned long __meminitdata nr_kernel_pages;
181static unsigned long __meminitdata nr_all_pages;
182static unsigned long __meminitdata dma_reserve;
183
184#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
185  /*
186   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
187   * ranges of memory (RAM) that may be registered with add_active_range().
188   * Ranges passed to add_active_range() will be merged if possible
189   * so the number of times add_active_range() can be called is
190   * related to the number of nodes and the number of holes
191   */
192  #ifdef CONFIG_MAX_ACTIVE_REGIONS
193    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
194    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
195  #else
196    #if MAX_NUMNODES >= 32
197      /* If there can be many nodes, allow up to 50 holes per node */
198      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
199    #else
200      /* By default, allow up to 256 distinct regions */
201      #define MAX_ACTIVE_REGIONS 256
202    #endif
203  #endif
204
205  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
206  static int __meminitdata nr_nodemap_entries;
207  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
208  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
209  static unsigned long __initdata required_kernelcore;
210  static unsigned long __initdata required_movablecore;
211  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
212
213  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
214  int movable_zone;
215  EXPORT_SYMBOL(movable_zone);
216#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
217
218#if MAX_NUMNODES > 1
219int nr_node_ids __read_mostly = MAX_NUMNODES;
220int nr_online_nodes __read_mostly = 1;
221EXPORT_SYMBOL(nr_node_ids);
222EXPORT_SYMBOL(nr_online_nodes);
223#endif
224
225int page_group_by_mobility_disabled __read_mostly;
226
227static void set_pageblock_migratetype(struct page *page, int migratetype)
228{
229
230	if (unlikely(page_group_by_mobility_disabled))
231		migratetype = MIGRATE_UNMOVABLE;
232
233	set_pageblock_flags_group(page, (unsigned long)migratetype,
234					PB_migrate, PB_migrate_end);
235}
236
237bool oom_killer_disabled __read_mostly;
238
239#ifdef CONFIG_DEBUG_VM
240static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
241{
242	int ret = 0;
243	unsigned seq;
244	unsigned long pfn = page_to_pfn(page);
245
246	do {
247		seq = zone_span_seqbegin(zone);
248		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
249			ret = 1;
250		else if (pfn < zone->zone_start_pfn)
251			ret = 1;
252	} while (zone_span_seqretry(zone, seq));
253
254	return ret;
255}
256
257static int page_is_consistent(struct zone *zone, struct page *page)
258{
259	if (!pfn_valid_within(page_to_pfn(page)))
260		return 0;
261	if (zone != page_zone(page))
262		return 0;
263
264	return 1;
265}
266/*
267 * Temporary debugging check for pages not lying within a given zone.
268 */
269static int bad_range(struct zone *zone, struct page *page)
270{
271	if (page_outside_zone_boundaries(zone, page))
272		return 1;
273	if (!page_is_consistent(zone, page))
274		return 1;
275
276	return 0;
277}
278#else
279static inline int bad_range(struct zone *zone, struct page *page)
280{
281	return 0;
282}
283#endif
284
285static void bad_page(struct page *page)
286{
287	static unsigned long resume;
288	static unsigned long nr_shown;
289	static unsigned long nr_unshown;
290
291	/* Don't complain about poisoned pages */
292	if (PageHWPoison(page)) {
293		reset_page_mapcount(page); /* remove PageBuddy */
294		return;
295	}
296
297	/*
298	 * Allow a burst of 60 reports, then keep quiet for that minute;
299	 * or allow a steady drip of one report per second.
300	 */
301	if (nr_shown == 60) {
302		if (time_before(jiffies, resume)) {
303			nr_unshown++;
304			goto out;
305		}
306		if (nr_unshown) {
307			printk(KERN_ALERT
308			      "BUG: Bad page state: %lu messages suppressed\n",
309				nr_unshown);
310			nr_unshown = 0;
311		}
312		nr_shown = 0;
313	}
314	if (nr_shown++ == 0)
315		resume = jiffies + 60 * HZ;
316
317	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
318		current->comm, page_to_pfn(page));
319	dump_page(page);
320
321	dump_stack();
322out:
323	/* Leave bad fields for debug, except PageBuddy could make trouble */
324	reset_page_mapcount(page); /* remove PageBuddy */
325	add_taint(TAINT_BAD_PAGE);
326}
327
328/*
329 * Higher-order pages are called "compound pages".  They are structured thusly:
330 *
331 * The first PAGE_SIZE page is called the "head page".
332 *
333 * The remaining PAGE_SIZE pages are called "tail pages".
334 *
335 * All pages have PG_compound set.  All pages have their ->private pointing at
336 * the head page (even the head page has this).
337 *
338 * The first tail page's ->lru.next holds the address of the compound page's
339 * put_page() function.  Its ->lru.prev holds the order of allocation.
340 * This usage means that zero-order pages may not be compound.
341 */
342
343static void free_compound_page(struct page *page)
344{
345	__free_pages_ok(page, compound_order(page));
346}
347
348void prep_compound_page(struct page *page, unsigned long order)
349{
350	int i;
351	int nr_pages = 1 << order;
352
353	set_compound_page_dtor(page, free_compound_page);
354	set_compound_order(page, order);
355	__SetPageHead(page);
356	for (i = 1; i < nr_pages; i++) {
357		struct page *p = page + i;
358
359		__SetPageTail(p);
360		p->first_page = page;
361	}
362}
363
364/* update __split_huge_page_refcount if you change this function */
365static int destroy_compound_page(struct page *page, unsigned long order)
366{
367	int i;
368	int nr_pages = 1 << order;
369	int bad = 0;
370
371	if (unlikely(compound_order(page) != order) ||
372	    unlikely(!PageHead(page))) {
373		bad_page(page);
374		bad++;
375	}
376
377	__ClearPageHead(page);
378
379	for (i = 1; i < nr_pages; i++) {
380		struct page *p = page + i;
381
382		if (unlikely(!PageTail(p) || (p->first_page != page))) {
383			bad_page(page);
384			bad++;
385		}
386		__ClearPageTail(p);
387	}
388
389	return bad;
390}
391
392static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
393{
394	int i;
395
396	/*
397	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
398	 * and __GFP_HIGHMEM from hard or soft interrupt context.
399	 */
400	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
401	for (i = 0; i < (1 << order); i++)
402		clear_highpage(page + i);
403}
404
405static inline void set_page_order(struct page *page, int order)
406{
407	set_page_private(page, order);
408	__SetPageBuddy(page);
409}
410
411static inline void rmv_page_order(struct page *page)
412{
413	__ClearPageBuddy(page);
414	set_page_private(page, 0);
415}
416
417/*
418 * Locate the struct page for both the matching buddy in our
419 * pair (buddy1) and the combined O(n+1) page they form (page).
420 *
421 * 1) Any buddy B1 will have an order O twin B2 which satisfies
422 * the following equation:
423 *     B2 = B1 ^ (1 << O)
424 * For example, if the starting buddy (buddy2) is #8 its order
425 * 1 buddy is #10:
426 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
427 *
428 * 2) Any buddy B will have an order O+1 parent P which
429 * satisfies the following equation:
430 *     P = B & ~(1 << O)
431 *
432 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
433 */
434static inline unsigned long
435__find_buddy_index(unsigned long page_idx, unsigned int order)
436{
437	return page_idx ^ (1 << order);
438}
439
440/*
441 * This function checks whether a page is free && is the buddy
442 * we can do coalesce a page and its buddy if
443 * (a) the buddy is not in a hole &&
444 * (b) the buddy is in the buddy system &&
445 * (c) a page and its buddy have the same order &&
446 * (d) a page and its buddy are in the same zone.
447 *
448 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
449 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
450 *
451 * For recording page's order, we use page_private(page).
452 */
453static inline int page_is_buddy(struct page *page, struct page *buddy,
454								int order)
455{
456	if (!pfn_valid_within(page_to_pfn(buddy)))
457		return 0;
458
459	if (page_zone_id(page) != page_zone_id(buddy))
460		return 0;
461
462	if (PageBuddy(buddy) && page_order(buddy) == order) {
463		VM_BUG_ON(page_count(buddy) != 0);
464		return 1;
465	}
466	return 0;
467}
468
469/*
470 * Freeing function for a buddy system allocator.
471 *
472 * The concept of a buddy system is to maintain direct-mapped table
473 * (containing bit values) for memory blocks of various "orders".
474 * The bottom level table contains the map for the smallest allocatable
475 * units of memory (here, pages), and each level above it describes
476 * pairs of units from the levels below, hence, "buddies".
477 * At a high level, all that happens here is marking the table entry
478 * at the bottom level available, and propagating the changes upward
479 * as necessary, plus some accounting needed to play nicely with other
480 * parts of the VM system.
481 * At each level, we keep a list of pages, which are heads of continuous
482 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
483 * order is recorded in page_private(page) field.
484 * So when we are allocating or freeing one, we can derive the state of the
485 * other.  That is, if we allocate a small block, and both were
486 * free, the remainder of the region must be split into blocks.
487 * If a block is freed, and its buddy is also free, then this
488 * triggers coalescing into a block of larger size.
489 *
490 * -- wli
491 */
492
493static inline void __free_one_page(struct page *page,
494		struct zone *zone, unsigned int order,
495		int migratetype)
496{
497	unsigned long page_idx;
498	unsigned long combined_idx;
499	unsigned long uninitialized_var(buddy_idx);
500	struct page *buddy;
501
502	if (unlikely(PageCompound(page)))
503		if (unlikely(destroy_compound_page(page, order)))
504			return;
505
506	VM_BUG_ON(migratetype == -1);
507
508	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
509
510	VM_BUG_ON(page_idx & ((1 << order) - 1));
511	VM_BUG_ON(bad_range(zone, page));
512
513	while (order < MAX_ORDER-1) {
514		buddy_idx = __find_buddy_index(page_idx, order);
515		buddy = page + (buddy_idx - page_idx);
516		if (!page_is_buddy(page, buddy, order))
517			break;
518
519		/* Our buddy is free, merge with it and move up one order. */
520		list_del(&buddy->lru);
521		zone->free_area[order].nr_free--;
522		rmv_page_order(buddy);
523		combined_idx = buddy_idx & page_idx;
524		page = page + (combined_idx - page_idx);
525		page_idx = combined_idx;
526		order++;
527	}
528	set_page_order(page, order);
529
530	/*
531	 * If this is not the largest possible page, check if the buddy
532	 * of the next-highest order is free. If it is, it's possible
533	 * that pages are being freed that will coalesce soon. In case,
534	 * that is happening, add the free page to the tail of the list
535	 * so it's less likely to be used soon and more likely to be merged
536	 * as a higher order page
537	 */
538	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
539		struct page *higher_page, *higher_buddy;
540		combined_idx = buddy_idx & page_idx;
541		higher_page = page + (combined_idx - page_idx);
542		buddy_idx = __find_buddy_index(combined_idx, order + 1);
543		higher_buddy = page + (buddy_idx - combined_idx);
544		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
545			list_add_tail(&page->lru,
546				&zone->free_area[order].free_list[migratetype]);
547			goto out;
548		}
549	}
550
551	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
552out:
553	zone->free_area[order].nr_free++;
554}
555
556/*
557 * free_page_mlock() -- clean up attempts to free and mlocked() page.
558 * Page should not be on lru, so no need to fix that up.
559 * free_pages_check() will verify...
560 */
561static inline void free_page_mlock(struct page *page)
562{
563	__dec_zone_page_state(page, NR_MLOCK);
564	__count_vm_event(UNEVICTABLE_MLOCKFREED);
565}
566
567static inline int free_pages_check(struct page *page)
568{
569	if (unlikely(page_mapcount(page) |
570		(page->mapping != NULL)  |
571		(atomic_read(&page->_count) != 0) |
572		(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
573		(mem_cgroup_bad_page_check(page)))) {
574		bad_page(page);
575		return 1;
576	}
577	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
578		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
579	return 0;
580}
581
582/*
583 * Frees a number of pages from the PCP lists
584 * Assumes all pages on list are in same zone, and of same order.
585 * count is the number of pages to free.
586 *
587 * If the zone was previously in an "all pages pinned" state then look to
588 * see if this freeing clears that state.
589 *
590 * And clear the zone's pages_scanned counter, to hold off the "all pages are
591 * pinned" detection logic.
592 */
593static void free_pcppages_bulk(struct zone *zone, int count,
594					struct per_cpu_pages *pcp)
595{
596	int migratetype = 0;
597	int batch_free = 0;
598	int to_free = count;
599
600	spin_lock(&zone->lock);
601	zone->all_unreclaimable = 0;
602	zone->pages_scanned = 0;
603
604	while (to_free) {
605		struct page *page;
606		struct list_head *list;
607
608		/*
609		 * Remove pages from lists in a round-robin fashion. A
610		 * batch_free count is maintained that is incremented when an
611		 * empty list is encountered.  This is so more pages are freed
612		 * off fuller lists instead of spinning excessively around empty
613		 * lists
614		 */
615		do {
616			batch_free++;
617			if (++migratetype == MIGRATE_PCPTYPES)
618				migratetype = 0;
619			list = &pcp->lists[migratetype];
620		} while (list_empty(list));
621
622		/* This is the only non-empty list. Free them all. */
623		if (batch_free == MIGRATE_PCPTYPES)
624			batch_free = to_free;
625
626		do {
627			page = list_entry(list->prev, struct page, lru);
628			/* must delete as __free_one_page list manipulates */
629			list_del(&page->lru);
630			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
631			__free_one_page(page, zone, 0, page_private(page));
632			trace_mm_page_pcpu_drain(page, 0, page_private(page));
633		} while (--to_free && --batch_free && !list_empty(list));
634	}
635	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
636	spin_unlock(&zone->lock);
637}
638
639static void free_one_page(struct zone *zone, struct page *page, int order,
640				int migratetype)
641{
642	spin_lock(&zone->lock);
643	zone->all_unreclaimable = 0;
644	zone->pages_scanned = 0;
645
646	__free_one_page(page, zone, order, migratetype);
647	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
648	spin_unlock(&zone->lock);
649}
650
651static bool free_pages_prepare(struct page *page, unsigned int order)
652{
653	int i;
654	int bad = 0;
655
656	trace_mm_page_free_direct(page, order);
657	kmemcheck_free_shadow(page, order);
658
659	if (PageAnon(page))
660		page->mapping = NULL;
661	for (i = 0; i < (1 << order); i++)
662		bad += free_pages_check(page + i);
663	if (bad)
664		return false;
665
666	if (!PageHighMem(page)) {
667		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
668		debug_check_no_obj_freed(page_address(page),
669					   PAGE_SIZE << order);
670	}
671	arch_free_page(page, order);
672	kernel_map_pages(page, 1 << order, 0);
673
674	return true;
675}
676
677static void __free_pages_ok(struct page *page, unsigned int order)
678{
679	unsigned long flags;
680	int wasMlocked = __TestClearPageMlocked(page);
681
682	if (!free_pages_prepare(page, order))
683		return;
684
685	local_irq_save(flags);
686	if (unlikely(wasMlocked))
687		free_page_mlock(page);
688	__count_vm_events(PGFREE, 1 << order);
689	free_one_page(page_zone(page), page, order,
690					get_pageblock_migratetype(page));
691	local_irq_restore(flags);
692}
693
694/*
695 * permit the bootmem allocator to evade page validation on high-order frees
696 */
697void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
698{
699	if (order == 0) {
700		__ClearPageReserved(page);
701		set_page_count(page, 0);
702		set_page_refcounted(page);
703		__free_page(page);
704	} else {
705		int loop;
706
707		prefetchw(page);
708		for (loop = 0; loop < BITS_PER_LONG; loop++) {
709			struct page *p = &page[loop];
710
711			if (loop + 1 < BITS_PER_LONG)
712				prefetchw(p + 1);
713			__ClearPageReserved(p);
714			set_page_count(p, 0);
715		}
716
717		set_page_refcounted(page);
718		__free_pages(page, order);
719	}
720}
721
722
723/*
724 * The order of subdivision here is critical for the IO subsystem.
725 * Please do not alter this order without good reasons and regression
726 * testing. Specifically, as large blocks of memory are subdivided,
727 * the order in which smaller blocks are delivered depends on the order
728 * they're subdivided in this function. This is the primary factor
729 * influencing the order in which pages are delivered to the IO
730 * subsystem according to empirical testing, and this is also justified
731 * by considering the behavior of a buddy system containing a single
732 * large block of memory acted on by a series of small allocations.
733 * This behavior is a critical factor in sglist merging's success.
734 *
735 * -- wli
736 */
737static inline void expand(struct zone *zone, struct page *page,
738	int low, int high, struct free_area *area,
739	int migratetype)
740{
741	unsigned long size = 1 << high;
742
743	while (high > low) {
744		area--;
745		high--;
746		size >>= 1;
747		VM_BUG_ON(bad_range(zone, &page[size]));
748		list_add(&page[size].lru, &area->free_list[migratetype]);
749		area->nr_free++;
750		set_page_order(&page[size], high);
751	}
752}
753
754/*
755 * This page is about to be returned from the page allocator
756 */
757static inline int check_new_page(struct page *page)
758{
759	if (unlikely(page_mapcount(page) |
760		(page->mapping != NULL)  |
761		(atomic_read(&page->_count) != 0)  |
762		(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
763		(mem_cgroup_bad_page_check(page)))) {
764		bad_page(page);
765		return 1;
766	}
767	return 0;
768}
769
770static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
771{
772	int i;
773
774	for (i = 0; i < (1 << order); i++) {
775		struct page *p = page + i;
776		if (unlikely(check_new_page(p)))
777			return 1;
778	}
779
780	set_page_private(page, 0);
781	set_page_refcounted(page);
782
783	arch_alloc_page(page, order);
784	kernel_map_pages(page, 1 << order, 1);
785
786	if (gfp_flags & __GFP_ZERO)
787		prep_zero_page(page, order, gfp_flags);
788
789	if (order && (gfp_flags & __GFP_COMP))
790		prep_compound_page(page, order);
791
792	return 0;
793}
794
795/*
796 * Go through the free lists for the given migratetype and remove
797 * the smallest available page from the freelists
798 */
799static inline
800struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
801						int migratetype)
802{
803	unsigned int current_order;
804	struct free_area * area;
805	struct page *page;
806
807	/* Find a page of the appropriate size in the preferred list */
808	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
809		area = &(zone->free_area[current_order]);
810		if (list_empty(&area->free_list[migratetype]))
811			continue;
812
813		page = list_entry(area->free_list[migratetype].next,
814							struct page, lru);
815		list_del(&page->lru);
816		rmv_page_order(page);
817		area->nr_free--;
818		expand(zone, page, order, current_order, area, migratetype);
819		return page;
820	}
821
822	return NULL;
823}
824
825
826/*
827 * This array describes the order lists are fallen back to when
828 * the free lists for the desirable migrate type are depleted
829 */
830static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
831	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
832	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
833	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
834	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
835};
836
837/*
838 * Move the free pages in a range to the free lists of the requested type.
839 * Note that start_page and end_pages are not aligned on a pageblock
840 * boundary. If alignment is required, use move_freepages_block()
841 */
842static int move_freepages(struct zone *zone,
843			  struct page *start_page, struct page *end_page,
844			  int migratetype)
845{
846	struct page *page;
847	unsigned long order;
848	int pages_moved = 0;
849
850#ifndef CONFIG_HOLES_IN_ZONE
851	/*
852	 * page_zone is not safe to call in this context when
853	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
854	 * anyway as we check zone boundaries in move_freepages_block().
855	 * Remove at a later date when no bug reports exist related to
856	 * grouping pages by mobility
857	 */
858	BUG_ON(page_zone(start_page) != page_zone(end_page));
859#endif
860
861	for (page = start_page; page <= end_page;) {
862		/* Make sure we are not inadvertently changing nodes */
863		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
864
865		if (!pfn_valid_within(page_to_pfn(page))) {
866			page++;
867			continue;
868		}
869
870		if (!PageBuddy(page)) {
871			page++;
872			continue;
873		}
874
875		order = page_order(page);
876		list_move(&page->lru,
877			  &zone->free_area[order].free_list[migratetype]);
878		page += 1 << order;
879		pages_moved += 1 << order;
880	}
881
882	return pages_moved;
883}
884
885static int move_freepages_block(struct zone *zone, struct page *page,
886				int migratetype)
887{
888	unsigned long start_pfn, end_pfn;
889	struct page *start_page, *end_page;
890
891	start_pfn = page_to_pfn(page);
892	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
893	start_page = pfn_to_page(start_pfn);
894	end_page = start_page + pageblock_nr_pages - 1;
895	end_pfn = start_pfn + pageblock_nr_pages - 1;
896
897	/* Do not cross zone boundaries */
898	if (start_pfn < zone->zone_start_pfn)
899		start_page = page;
900	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
901		return 0;
902
903	return move_freepages(zone, start_page, end_page, migratetype);
904}
905
906static void change_pageblock_range(struct page *pageblock_page,
907					int start_order, int migratetype)
908{
909	int nr_pageblocks = 1 << (start_order - pageblock_order);
910
911	while (nr_pageblocks--) {
912		set_pageblock_migratetype(pageblock_page, migratetype);
913		pageblock_page += pageblock_nr_pages;
914	}
915}
916
917/* Remove an element from the buddy allocator from the fallback list */
918static inline struct page *
919__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
920{
921	struct free_area * area;
922	int current_order;
923	struct page *page;
924	int migratetype, i;
925
926	/* Find the largest possible block of pages in the other list */
927	for (current_order = MAX_ORDER-1; current_order >= order;
928						--current_order) {
929		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
930			migratetype = fallbacks[start_migratetype][i];
931
932			/* MIGRATE_RESERVE handled later if necessary */
933			if (migratetype == MIGRATE_RESERVE)
934				continue;
935
936			area = &(zone->free_area[current_order]);
937			if (list_empty(&area->free_list[migratetype]))
938				continue;
939
940			page = list_entry(area->free_list[migratetype].next,
941					struct page, lru);
942			area->nr_free--;
943
944			/*
945			 * If breaking a large block of pages, move all free
946			 * pages to the preferred allocation list. If falling
947			 * back for a reclaimable kernel allocation, be more
948			 * aggressive about taking ownership of free pages
949			 */
950			if (unlikely(current_order >= (pageblock_order >> 1)) ||
951					start_migratetype == MIGRATE_RECLAIMABLE ||
952					page_group_by_mobility_disabled) {
953				unsigned long pages;
954				pages = move_freepages_block(zone, page,
955								start_migratetype);
956
957				/* Claim the whole block if over half of it is free */
958				if (pages >= (1 << (pageblock_order-1)) ||
959						page_group_by_mobility_disabled)
960					set_pageblock_migratetype(page,
961								start_migratetype);
962
963				migratetype = start_migratetype;
964			}
965
966			/* Remove the page from the freelists */
967			list_del(&page->lru);
968			rmv_page_order(page);
969
970			/* Take ownership for orders >= pageblock_order */
971			if (current_order >= pageblock_order)
972				change_pageblock_range(page, current_order,
973							start_migratetype);
974
975			expand(zone, page, order, current_order, area, migratetype);
976
977			trace_mm_page_alloc_extfrag(page, order, current_order,
978				start_migratetype, migratetype);
979
980			return page;
981		}
982	}
983
984	return NULL;
985}
986
987/*
988 * Do the hard work of removing an element from the buddy allocator.
989 * Call me with the zone->lock already held.
990 */
991static struct page *__rmqueue(struct zone *zone, unsigned int order,
992						int migratetype)
993{
994	struct page *page;
995
996retry_reserve:
997	page = __rmqueue_smallest(zone, order, migratetype);
998
999	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1000		page = __rmqueue_fallback(zone, order, migratetype);
1001
1002		/*
1003		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1004		 * is used because __rmqueue_smallest is an inline function
1005		 * and we want just one call site
1006		 */
1007		if (!page) {
1008			migratetype = MIGRATE_RESERVE;
1009			goto retry_reserve;
1010		}
1011	}
1012
1013	trace_mm_page_alloc_zone_locked(page, order, migratetype);
1014	return page;
1015}
1016
1017/*
1018 * Obtain a specified number of elements from the buddy allocator, all under
1019 * a single hold of the lock, for efficiency.  Add them to the supplied list.
1020 * Returns the number of new pages which were placed at *list.
1021 */
1022static int rmqueue_bulk(struct zone *zone, unsigned int order,
1023			unsigned long count, struct list_head *list,
1024			int migratetype, int cold)
1025{
1026	int i;
1027
1028	spin_lock(&zone->lock);
1029	for (i = 0; i < count; ++i) {
1030		struct page *page = __rmqueue(zone, order, migratetype);
1031		if (unlikely(page == NULL))
1032			break;
1033
1034		/*
1035		 * Split buddy pages returned by expand() are received here
1036		 * in physical page order. The page is added to the callers and
1037		 * list and the list head then moves forward. From the callers
1038		 * perspective, the linked list is ordered by page number in
1039		 * some conditions. This is useful for IO devices that can
1040		 * merge IO requests if the physical pages are ordered
1041		 * properly.
1042		 */
1043		if (likely(cold == 0))
1044			list_add(&page->lru, list);
1045		else
1046			list_add_tail(&page->lru, list);
1047		set_page_private(page, migratetype);
1048		list = &page->lru;
1049	}
1050	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1051	spin_unlock(&zone->lock);
1052	return i;
1053}
1054
1055#ifdef CONFIG_NUMA
1056/*
1057 * Called from the vmstat counter updater to drain pagesets of this
1058 * currently executing processor on remote nodes after they have
1059 * expired.
1060 *
1061 * Note that this function must be called with the thread pinned to
1062 * a single processor.
1063 */
1064void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1065{
1066	unsigned long flags;
1067	int to_drain;
1068
1069	local_irq_save(flags);
1070	if (pcp->count >= pcp->batch)
1071		to_drain = pcp->batch;
1072	else
1073		to_drain = pcp->count;
1074	free_pcppages_bulk(zone, to_drain, pcp);
1075	pcp->count -= to_drain;
1076	local_irq_restore(flags);
1077}
1078#endif
1079
1080/*
1081 * Drain pages of the indicated processor.
1082 *
1083 * The processor must either be the current processor and the
1084 * thread pinned to the current processor or a processor that
1085 * is not online.
1086 */
1087static void drain_pages(unsigned int cpu)
1088{
1089	unsigned long flags;
1090	struct zone *zone;
1091
1092	for_each_populated_zone(zone) {
1093		struct per_cpu_pageset *pset;
1094		struct per_cpu_pages *pcp;
1095
1096		local_irq_save(flags);
1097		pset = per_cpu_ptr(zone->pageset, cpu);
1098
1099		pcp = &pset->pcp;
1100		if (pcp->count) {
1101			free_pcppages_bulk(zone, pcp->count, pcp);
1102			pcp->count = 0;
1103		}
1104		local_irq_restore(flags);
1105	}
1106}
1107
1108/*
1109 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1110 */
1111void drain_local_pages(void *arg)
1112{
1113	drain_pages(smp_processor_id());
1114}
1115
1116/*
1117 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1118 */
1119void drain_all_pages(void)
1120{
1121	on_each_cpu(drain_local_pages, NULL, 1);
1122}
1123
1124#ifdef CONFIG_HIBERNATION
1125
1126void mark_free_pages(struct zone *zone)
1127{
1128	unsigned long pfn, max_zone_pfn;
1129	unsigned long flags;
1130	int order, t;
1131	struct list_head *curr;
1132
1133	if (!zone->spanned_pages)
1134		return;
1135
1136	spin_lock_irqsave(&zone->lock, flags);
1137
1138	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1139	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1140		if (pfn_valid(pfn)) {
1141			struct page *page = pfn_to_page(pfn);
1142
1143			if (!swsusp_page_is_forbidden(page))
1144				swsusp_unset_page_free(page);
1145		}
1146
1147	for_each_migratetype_order(order, t) {
1148		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1149			unsigned long i;
1150
1151			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1152			for (i = 0; i < (1UL << order); i++)
1153				swsusp_set_page_free(pfn_to_page(pfn + i));
1154		}
1155	}
1156	spin_unlock_irqrestore(&zone->lock, flags);
1157}
1158#endif /* CONFIG_PM */
1159
1160/*
1161 * Free a 0-order page
1162 * cold == 1 ? free a cold page : free a hot page
1163 */
1164void free_hot_cold_page(struct page *page, int cold)
1165{
1166	struct zone *zone = page_zone(page);
1167	struct per_cpu_pages *pcp;
1168	unsigned long flags;
1169	int migratetype;
1170	int wasMlocked = __TestClearPageMlocked(page);
1171
1172	if (!free_pages_prepare(page, 0))
1173		return;
1174
1175	migratetype = get_pageblock_migratetype(page);
1176	set_page_private(page, migratetype);
1177	local_irq_save(flags);
1178	if (unlikely(wasMlocked))
1179		free_page_mlock(page);
1180	__count_vm_event(PGFREE);
1181
1182	/*
1183	 * We only track unmovable, reclaimable and movable on pcp lists.
1184	 * Free ISOLATE pages back to the allocator because they are being
1185	 * offlined but treat RESERVE as movable pages so we can get those
1186	 * areas back if necessary. Otherwise, we may have to free
1187	 * excessively into the page allocator
1188	 */
1189	if (migratetype >= MIGRATE_PCPTYPES) {
1190		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1191			free_one_page(zone, page, 0, migratetype);
1192			goto out;
1193		}
1194		migratetype = MIGRATE_MOVABLE;
1195	}
1196
1197	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1198	if (cold)
1199		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1200	else
1201		list_add(&page->lru, &pcp->lists[migratetype]);
1202	pcp->count++;
1203	if (pcp->count >= pcp->high) {
1204		free_pcppages_bulk(zone, pcp->batch, pcp);
1205		pcp->count -= pcp->batch;
1206	}
1207
1208out:
1209	local_irq_restore(flags);
1210}
1211
1212/*
1213 * split_page takes a non-compound higher-order page, and splits it into
1214 * n (1<<order) sub-pages: page[0..n]
1215 * Each sub-page must be freed individually.
1216 *
1217 * Note: this is probably too low level an operation for use in drivers.
1218 * Please consult with lkml before using this in your driver.
1219 */
1220void split_page(struct page *page, unsigned int order)
1221{
1222	int i;
1223
1224	VM_BUG_ON(PageCompound(page));
1225	VM_BUG_ON(!page_count(page));
1226
1227#ifdef CONFIG_KMEMCHECK
1228	/*
1229	 * Split shadow pages too, because free(page[0]) would
1230	 * otherwise free the whole shadow.
1231	 */
1232	if (kmemcheck_page_is_tracked(page))
1233		split_page(virt_to_page(page[0].shadow), order);
1234#endif
1235
1236	for (i = 1; i < (1 << order); i++)
1237		set_page_refcounted(page + i);
1238}
1239
1240/*
1241 * Similar to split_page except the page is already free. As this is only
1242 * being used for migration, the migratetype of the block also changes.
1243 * As this is called with interrupts disabled, the caller is responsible
1244 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1245 * are enabled.
1246 *
1247 * Note: this is probably too low level an operation for use in drivers.
1248 * Please consult with lkml before using this in your driver.
1249 */
1250int split_free_page(struct page *page)
1251{
1252	unsigned int order;
1253	unsigned long watermark;
1254	struct zone *zone;
1255
1256	BUG_ON(!PageBuddy(page));
1257
1258	zone = page_zone(page);
1259	order = page_order(page);
1260
1261	/* Obey watermarks as if the page was being allocated */
1262	watermark = low_wmark_pages(zone) + (1 << order);
1263	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1264		return 0;
1265
1266	/* Remove page from free list */
1267	list_del(&page->lru);
1268	zone->free_area[order].nr_free--;
1269	rmv_page_order(page);
1270	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1271
1272	/* Split into individual pages */
1273	set_page_refcounted(page);
1274	split_page(page, order);
1275
1276	if (order >= pageblock_order - 1) {
1277		struct page *endpage = page + (1 << order) - 1;
1278		for (; page < endpage; page += pageblock_nr_pages)
1279			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1280	}
1281
1282	return 1 << order;
1283}
1284
1285/*
1286 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1287 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1288 * or two.
1289 */
1290static inline
1291struct page *buffered_rmqueue(struct zone *preferred_zone,
1292			struct zone *zone, int order, gfp_t gfp_flags,
1293			int migratetype)
1294{
1295	unsigned long flags;
1296	struct page *page;
1297	int cold = !!(gfp_flags & __GFP_COLD);
1298
1299again:
1300	if (likely(order == 0)) {
1301		struct per_cpu_pages *pcp;
1302		struct list_head *list;
1303
1304		local_irq_save(flags);
1305		pcp = &this_cpu_ptr(zone->pageset)->pcp;
1306		list = &pcp->lists[migratetype];
1307		if (list_empty(list)) {
1308			pcp->count += rmqueue_bulk(zone, 0,
1309					pcp->batch, list,
1310					migratetype, cold);
1311			if (unlikely(list_empty(list)))
1312				goto failed;
1313		}
1314
1315		if (cold)
1316			page = list_entry(list->prev, struct page, lru);
1317		else
1318			page = list_entry(list->next, struct page, lru);
1319
1320		list_del(&page->lru);
1321		pcp->count--;
1322	} else {
1323		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1324			/*
1325			 * __GFP_NOFAIL is not to be used in new code.
1326			 *
1327			 * All __GFP_NOFAIL callers should be fixed so that they
1328			 * properly detect and handle allocation failures.
1329			 *
1330			 * We most definitely don't want callers attempting to
1331			 * allocate greater than order-1 page units with
1332			 * __GFP_NOFAIL.
1333			 */
1334			WARN_ON_ONCE(order > 1);
1335		}
1336		spin_lock_irqsave(&zone->lock, flags);
1337		page = __rmqueue(zone, order, migratetype);
1338		spin_unlock(&zone->lock);
1339		if (!page)
1340			goto failed;
1341		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1342	}
1343
1344	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1345	zone_statistics(preferred_zone, zone, gfp_flags);
1346	local_irq_restore(flags);
1347
1348	VM_BUG_ON(bad_range(zone, page));
1349	if (prep_new_page(page, order, gfp_flags))
1350		goto again;
1351	return page;
1352
1353failed:
1354	local_irq_restore(flags);
1355	return NULL;
1356}
1357
1358/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1359#define ALLOC_WMARK_MIN		WMARK_MIN
1360#define ALLOC_WMARK_LOW		WMARK_LOW
1361#define ALLOC_WMARK_HIGH	WMARK_HIGH
1362#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1363
1364/* Mask to get the watermark bits */
1365#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1366
1367#define ALLOC_HARDER		0x10 /* try to alloc harder */
1368#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1369#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1370
1371#ifdef CONFIG_FAIL_PAGE_ALLOC
1372
1373static struct {
1374	struct fault_attr attr;
1375
1376	u32 ignore_gfp_highmem;
1377	u32 ignore_gfp_wait;
1378	u32 min_order;
1379} fail_page_alloc = {
1380	.attr = FAULT_ATTR_INITIALIZER,
1381	.ignore_gfp_wait = 1,
1382	.ignore_gfp_highmem = 1,
1383	.min_order = 1,
1384};
1385
1386static int __init setup_fail_page_alloc(char *str)
1387{
1388	return setup_fault_attr(&fail_page_alloc.attr, str);
1389}
1390__setup("fail_page_alloc=", setup_fail_page_alloc);
1391
1392static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1393{
1394	if (order < fail_page_alloc.min_order)
1395		return 0;
1396	if (gfp_mask & __GFP_NOFAIL)
1397		return 0;
1398	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1399		return 0;
1400	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1401		return 0;
1402
1403	return should_fail(&fail_page_alloc.attr, 1 << order);
1404}
1405
1406#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1407
1408static int __init fail_page_alloc_debugfs(void)
1409{
1410	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1411	struct dentry *dir;
1412	int err;
1413
1414	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1415				       "fail_page_alloc");
1416	if (err)
1417		return err;
1418
1419	dir = fail_page_alloc.attr.dir;
1420
1421	if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1422				&fail_page_alloc.ignore_gfp_wait))
1423		goto fail;
1424	if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1425				&fail_page_alloc.ignore_gfp_highmem))
1426		goto fail;
1427	if (!debugfs_create_u32("min-order", mode, dir,
1428				&fail_page_alloc.min_order))
1429		goto fail;
1430
1431	return 0;
1432fail:
1433	cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1434
1435	return -ENOMEM;
1436}
1437
1438late_initcall(fail_page_alloc_debugfs);
1439
1440#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1441
1442#else /* CONFIG_FAIL_PAGE_ALLOC */
1443
1444static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1445{
1446	return 0;
1447}
1448
1449#endif /* CONFIG_FAIL_PAGE_ALLOC */
1450
1451/*
1452 * Return true if free pages are above 'mark'. This takes into account the order
1453 * of the allocation.
1454 */
1455static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1456		      int classzone_idx, int alloc_flags, long free_pages)
1457{
1458	/* free_pages my go negative - that's OK */
1459	long min = mark;
1460	int o;
1461
1462	free_pages -= (1 << order) + 1;
1463	if (alloc_flags & ALLOC_HIGH)
1464		min -= min / 2;
1465	if (alloc_flags & ALLOC_HARDER)
1466		min -= min / 4;
1467
1468	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1469		return false;
1470	for (o = 0; o < order; o++) {
1471		/* At the next order, this order's pages become unavailable */
1472		free_pages -= z->free_area[o].nr_free << o;
1473
1474		/* Require fewer higher order pages to be free */
1475		min >>= 1;
1476
1477		if (free_pages <= min)
1478			return false;
1479	}
1480	return true;
1481}
1482
1483bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1484		      int classzone_idx, int alloc_flags)
1485{
1486	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1487					zone_page_state(z, NR_FREE_PAGES));
1488}
1489
1490bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1491		      int classzone_idx, int alloc_flags)
1492{
1493	long free_pages = zone_page_state(z, NR_FREE_PAGES);
1494
1495	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1496		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1497
1498	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1499								free_pages);
1500}
1501
1502#ifdef CONFIG_NUMA
1503/*
1504 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1505 * skip over zones that are not allowed by the cpuset, or that have
1506 * been recently (in last second) found to be nearly full.  See further
1507 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1508 * that have to skip over a lot of full or unallowed zones.
1509 *
1510 * If the zonelist cache is present in the passed in zonelist, then
1511 * returns a pointer to the allowed node mask (either the current
1512 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1513 *
1514 * If the zonelist cache is not available for this zonelist, does
1515 * nothing and returns NULL.
1516 *
1517 * If the fullzones BITMAP in the zonelist cache is stale (more than
1518 * a second since last zap'd) then we zap it out (clear its bits.)
1519 *
1520 * We hold off even calling zlc_setup, until after we've checked the
1521 * first zone in the zonelist, on the theory that most allocations will
1522 * be satisfied from that first zone, so best to examine that zone as
1523 * quickly as we can.
1524 */
1525static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1526{
1527	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1528	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1529
1530	zlc = zonelist->zlcache_ptr;
1531	if (!zlc)
1532		return NULL;
1533
1534	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1535		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1536		zlc->last_full_zap = jiffies;
1537	}
1538
1539	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1540					&cpuset_current_mems_allowed :
1541					&node_states[N_HIGH_MEMORY];
1542	return allowednodes;
1543}
1544
1545/*
1546 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1547 * if it is worth looking at further for free memory:
1548 *  1) Check that the zone isn't thought to be full (doesn't have its
1549 *     bit set in the zonelist_cache fullzones BITMAP).
1550 *  2) Check that the zones node (obtained from the zonelist_cache
1551 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1552 * Return true (non-zero) if zone is worth looking at further, or
1553 * else return false (zero) if it is not.
1554 *
1555 * This check -ignores- the distinction between various watermarks,
1556 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1557 * found to be full for any variation of these watermarks, it will
1558 * be considered full for up to one second by all requests, unless
1559 * we are so low on memory on all allowed nodes that we are forced
1560 * into the second scan of the zonelist.
1561 *
1562 * In the second scan we ignore this zonelist cache and exactly
1563 * apply the watermarks to all zones, even it is slower to do so.
1564 * We are low on memory in the second scan, and should leave no stone
1565 * unturned looking for a free page.
1566 */
1567static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1568						nodemask_t *allowednodes)
1569{
1570	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1571	int i;				/* index of *z in zonelist zones */
1572	int n;				/* node that zone *z is on */
1573
1574	zlc = zonelist->zlcache_ptr;
1575	if (!zlc)
1576		return 1;
1577
1578	i = z - zonelist->_zonerefs;
1579	n = zlc->z_to_n[i];
1580
1581	/* This zone is worth trying if it is allowed but not full */
1582	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1583}
1584
1585/*
1586 * Given 'z' scanning a zonelist, set the corresponding bit in
1587 * zlc->fullzones, so that subsequent attempts to allocate a page
1588 * from that zone don't waste time re-examining it.
1589 */
1590static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1591{
1592	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1593	int i;				/* index of *z in zonelist zones */
1594
1595	zlc = zonelist->zlcache_ptr;
1596	if (!zlc)
1597		return;
1598
1599	i = z - zonelist->_zonerefs;
1600
1601	set_bit(i, zlc->fullzones);
1602}
1603
1604/*
1605 * clear all zones full, called after direct reclaim makes progress so that
1606 * a zone that was recently full is not skipped over for up to a second
1607 */
1608static void zlc_clear_zones_full(struct zonelist *zonelist)
1609{
1610	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1611
1612	zlc = zonelist->zlcache_ptr;
1613	if (!zlc)
1614		return;
1615
1616	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1617}
1618
1619#else	/* CONFIG_NUMA */
1620
1621static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1622{
1623	return NULL;
1624}
1625
1626static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1627				nodemask_t *allowednodes)
1628{
1629	return 1;
1630}
1631
1632static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1633{
1634}
1635
1636static void zlc_clear_zones_full(struct zonelist *zonelist)
1637{
1638}
1639#endif	/* CONFIG_NUMA */
1640
1641/*
1642 * get_page_from_freelist goes through the zonelist trying to allocate
1643 * a page.
1644 */
1645static struct page *
1646get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1647		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1648		struct zone *preferred_zone, int migratetype)
1649{
1650	struct zoneref *z;
1651	struct page *page = NULL;
1652	int classzone_idx;
1653	struct zone *zone;
1654	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1655	int zlc_active = 0;		/* set if using zonelist_cache */
1656	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1657
1658	classzone_idx = zone_idx(preferred_zone);
1659zonelist_scan:
1660	/*
1661	 * Scan zonelist, looking for a zone with enough free.
1662	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1663	 */
1664	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1665						high_zoneidx, nodemask) {
1666		if (NUMA_BUILD && zlc_active &&
1667			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1668				continue;
1669		if ((alloc_flags & ALLOC_CPUSET) &&
1670			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1671				continue;
1672
1673		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1674		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1675			unsigned long mark;
1676			int ret;
1677
1678			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1679			if (zone_watermark_ok(zone, order, mark,
1680				    classzone_idx, alloc_flags))
1681				goto try_this_zone;
1682
1683			if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1684				/*
1685				 * we do zlc_setup if there are multiple nodes
1686				 * and before considering the first zone allowed
1687				 * by the cpuset.
1688				 */
1689				allowednodes = zlc_setup(zonelist, alloc_flags);
1690				zlc_active = 1;
1691				did_zlc_setup = 1;
1692			}
1693
1694			if (zone_reclaim_mode == 0)
1695				goto this_zone_full;
1696
1697			/*
1698			 * As we may have just activated ZLC, check if the first
1699			 * eligible zone has failed zone_reclaim recently.
1700			 */
1701			if (NUMA_BUILD && zlc_active &&
1702				!zlc_zone_worth_trying(zonelist, z, allowednodes))
1703				continue;
1704
1705			ret = zone_reclaim(zone, gfp_mask, order);
1706			switch (ret) {
1707			case ZONE_RECLAIM_NOSCAN:
1708				/* did not scan */
1709				continue;
1710			case ZONE_RECLAIM_FULL:
1711				/* scanned but unreclaimable */
1712				continue;
1713			default:
1714				/* did we reclaim enough */
1715				if (!zone_watermark_ok(zone, order, mark,
1716						classzone_idx, alloc_flags))
1717					goto this_zone_full;
1718			}
1719		}
1720
1721try_this_zone:
1722		page = buffered_rmqueue(preferred_zone, zone, order,
1723						gfp_mask, migratetype);
1724		if (page)
1725			break;
1726this_zone_full:
1727		if (NUMA_BUILD)
1728			zlc_mark_zone_full(zonelist, z);
1729	}
1730
1731	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1732		/* Disable zlc cache for second zonelist scan */
1733		zlc_active = 0;
1734		goto zonelist_scan;
1735	}
1736	return page;
1737}
1738
1739/*
1740 * Large machines with many possible nodes should not always dump per-node
1741 * meminfo in irq context.
1742 */
1743static inline bool should_suppress_show_mem(void)
1744{
1745	bool ret = false;
1746
1747#if NODES_SHIFT > 8
1748	ret = in_interrupt();
1749#endif
1750	return ret;
1751}
1752
1753static DEFINE_RATELIMIT_STATE(nopage_rs,
1754		DEFAULT_RATELIMIT_INTERVAL,
1755		DEFAULT_RATELIMIT_BURST);
1756
1757void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1758{
1759	va_list args;
1760	unsigned int filter = SHOW_MEM_FILTER_NODES;
1761
1762	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
1763		return;
1764
1765	/*
1766	 * This documents exceptions given to allocations in certain
1767	 * contexts that are allowed to allocate outside current's set
1768	 * of allowed nodes.
1769	 */
1770	if (!(gfp_mask & __GFP_NOMEMALLOC))
1771		if (test_thread_flag(TIF_MEMDIE) ||
1772		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
1773			filter &= ~SHOW_MEM_FILTER_NODES;
1774	if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
1775		filter &= ~SHOW_MEM_FILTER_NODES;
1776
1777	if (fmt) {
1778		printk(KERN_WARNING);
1779		va_start(args, fmt);
1780		vprintk(fmt, args);
1781		va_end(args);
1782	}
1783
1784	pr_warning("%s: page allocation failure: order:%d, mode:0x%x\n",
1785		   current->comm, order, gfp_mask);
1786
1787	dump_stack();
1788	if (!should_suppress_show_mem())
1789		show_mem(filter);
1790}
1791
1792static inline int
1793should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1794				unsigned long pages_reclaimed)
1795{
1796	/* Do not loop if specifically requested */
1797	if (gfp_mask & __GFP_NORETRY)
1798		return 0;
1799
1800	/*
1801	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1802	 * means __GFP_NOFAIL, but that may not be true in other
1803	 * implementations.
1804	 */
1805	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1806		return 1;
1807
1808	/*
1809	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1810	 * specified, then we retry until we no longer reclaim any pages
1811	 * (above), or we've reclaimed an order of pages at least as
1812	 * large as the allocation's order. In both cases, if the
1813	 * allocation still fails, we stop retrying.
1814	 */
1815	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1816		return 1;
1817
1818	/*
1819	 * Don't let big-order allocations loop unless the caller
1820	 * explicitly requests that.
1821	 */
1822	if (gfp_mask & __GFP_NOFAIL)
1823		return 1;
1824
1825	return 0;
1826}
1827
1828static inline struct page *
1829__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1830	struct zonelist *zonelist, enum zone_type high_zoneidx,
1831	nodemask_t *nodemask, struct zone *preferred_zone,
1832	int migratetype)
1833{
1834	struct page *page;
1835
1836	/* Acquire the OOM killer lock for the zones in zonelist */
1837	if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1838		schedule_timeout_uninterruptible(1);
1839		return NULL;
1840	}
1841
1842	/*
1843	 * Go through the zonelist yet one more time, keep very high watermark
1844	 * here, this is only to catch a parallel oom killing, we must fail if
1845	 * we're still under heavy pressure.
1846	 */
1847	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1848		order, zonelist, high_zoneidx,
1849		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1850		preferred_zone, migratetype);
1851	if (page)
1852		goto out;
1853
1854	if (!(gfp_mask & __GFP_NOFAIL)) {
1855		/* The OOM killer will not help higher order allocs */
1856		if (order > PAGE_ALLOC_COSTLY_ORDER)
1857			goto out;
1858		/* The OOM killer does not needlessly kill tasks for lowmem */
1859		if (high_zoneidx < ZONE_NORMAL)
1860			goto out;
1861		/*
1862		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1863		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1864		 * The caller should handle page allocation failure by itself if
1865		 * it specifies __GFP_THISNODE.
1866		 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1867		 */
1868		if (gfp_mask & __GFP_THISNODE)
1869			goto out;
1870	}
1871	/* Exhausted what can be done so it's blamo time */
1872	out_of_memory(zonelist, gfp_mask, order, nodemask);
1873
1874out:
1875	clear_zonelist_oom(zonelist, gfp_mask);
1876	return page;
1877}
1878
1879#ifdef CONFIG_COMPACTION
1880/* Try memory compaction for high-order allocations before reclaim */
1881static struct page *
1882__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1883	struct zonelist *zonelist, enum zone_type high_zoneidx,
1884	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1885	int migratetype, unsigned long *did_some_progress,
1886	bool sync_migration)
1887{
1888	struct page *page;
1889
1890	if (!order || compaction_deferred(preferred_zone))
1891		return NULL;
1892
1893	current->flags |= PF_MEMALLOC;
1894	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1895						nodemask, sync_migration);
1896	current->flags &= ~PF_MEMALLOC;
1897	if (*did_some_progress != COMPACT_SKIPPED) {
1898
1899		/* Page migration frees to the PCP lists but we want merging */
1900		drain_pages(get_cpu());
1901		put_cpu();
1902
1903		page = get_page_from_freelist(gfp_mask, nodemask,
1904				order, zonelist, high_zoneidx,
1905				alloc_flags, preferred_zone,
1906				migratetype);
1907		if (page) {
1908			preferred_zone->compact_considered = 0;
1909			preferred_zone->compact_defer_shift = 0;
1910			count_vm_event(COMPACTSUCCESS);
1911			return page;
1912		}
1913
1914		/*
1915		 * It's bad if compaction run occurs and fails.
1916		 * The most likely reason is that pages exist,
1917		 * but not enough to satisfy watermarks.
1918		 */
1919		count_vm_event(COMPACTFAIL);
1920		defer_compaction(preferred_zone);
1921
1922		cond_resched();
1923	}
1924
1925	return NULL;
1926}
1927#else
1928static inline struct page *
1929__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1930	struct zonelist *zonelist, enum zone_type high_zoneidx,
1931	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1932	int migratetype, unsigned long *did_some_progress,
1933	bool sync_migration)
1934{
1935	return NULL;
1936}
1937#endif /* CONFIG_COMPACTION */
1938
1939/* The really slow allocator path where we enter direct reclaim */
1940static inline struct page *
1941__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1942	struct zonelist *zonelist, enum zone_type high_zoneidx,
1943	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1944	int migratetype, unsigned long *did_some_progress)
1945{
1946	struct page *page = NULL;
1947	struct reclaim_state reclaim_state;
1948	bool drained = false;
1949
1950	cond_resched();
1951
1952	/* We now go into synchronous reclaim */
1953	cpuset_memory_pressure_bump();
1954	current->flags |= PF_MEMALLOC;
1955	lockdep_set_current_reclaim_state(gfp_mask);
1956	reclaim_state.reclaimed_slab = 0;
1957	current->reclaim_state = &reclaim_state;
1958
1959	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1960
1961	current->reclaim_state = NULL;
1962	lockdep_clear_current_reclaim_state();
1963	current->flags &= ~PF_MEMALLOC;
1964
1965	cond_resched();
1966
1967	if (unlikely(!(*did_some_progress)))
1968		return NULL;
1969
1970	/* After successful reclaim, reconsider all zones for allocation */
1971	if (NUMA_BUILD)
1972		zlc_clear_zones_full(zonelist);
1973
1974retry:
1975	page = get_page_from_freelist(gfp_mask, nodemask, order,
1976					zonelist, high_zoneidx,
1977					alloc_flags, preferred_zone,
1978					migratetype);
1979
1980	/*
1981	 * If an allocation failed after direct reclaim, it could be because
1982	 * pages are pinned on the per-cpu lists. Drain them and try again
1983	 */
1984	if (!page && !drained) {
1985		drain_all_pages();
1986		drained = true;
1987		goto retry;
1988	}
1989
1990	return page;
1991}
1992
1993/*
1994 * This is called in the allocator slow-path if the allocation request is of
1995 * sufficient urgency to ignore watermarks and take other desperate measures
1996 */
1997static inline struct page *
1998__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1999	struct zonelist *zonelist, enum zone_type high_zoneidx,
2000	nodemask_t *nodemask, struct zone *preferred_zone,
2001	int migratetype)
2002{
2003	struct page *page;
2004
2005	do {
2006		page = get_page_from_freelist(gfp_mask, nodemask, order,
2007			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2008			preferred_zone, migratetype);
2009
2010		if (!page && gfp_mask & __GFP_NOFAIL)
2011			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2012	} while (!page && (gfp_mask & __GFP_NOFAIL));
2013
2014	return page;
2015}
2016
2017static inline
2018void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2019						enum zone_type high_zoneidx,
2020						enum zone_type classzone_idx)
2021{
2022	struct zoneref *z;
2023	struct zone *zone;
2024
2025	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2026		wakeup_kswapd(zone, order, classzone_idx);
2027}
2028
2029static inline int
2030gfp_to_alloc_flags(gfp_t gfp_mask)
2031{
2032	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2033	const gfp_t wait = gfp_mask & __GFP_WAIT;
2034
2035	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2036	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2037
2038	/*
2039	 * The caller may dip into page reserves a bit more if the caller
2040	 * cannot run direct reclaim, or if the caller has realtime scheduling
2041	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2042	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2043	 */
2044	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2045
2046	if (!wait) {
2047		/*
2048		 * Not worth trying to allocate harder for
2049		 * __GFP_NOMEMALLOC even if it can't schedule.
2050		 */
2051		if  (!(gfp_mask & __GFP_NOMEMALLOC))
2052			alloc_flags |= ALLOC_HARDER;
2053		/*
2054		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2055		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2056		 */
2057		alloc_flags &= ~ALLOC_CPUSET;
2058	} else if (unlikely(rt_task(current)) && !in_interrupt())
2059		alloc_flags |= ALLOC_HARDER;
2060
2061	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2062		if (!in_interrupt() &&
2063		    ((current->flags & PF_MEMALLOC) ||
2064		     unlikely(test_thread_flag(TIF_MEMDIE))))
2065			alloc_flags |= ALLOC_NO_WATERMARKS;
2066	}
2067
2068	return alloc_flags;
2069}
2070
2071static inline struct page *
2072__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2073	struct zonelist *zonelist, enum zone_type high_zoneidx,
2074	nodemask_t *nodemask, struct zone *preferred_zone,
2075	int migratetype)
2076{
2077	const gfp_t wait = gfp_mask & __GFP_WAIT;
2078	struct page *page = NULL;
2079	int alloc_flags;
2080	unsigned long pages_reclaimed = 0;
2081	unsigned long did_some_progress;
2082	bool sync_migration = false;
2083
2084	/*
2085	 * In the slowpath, we sanity check order to avoid ever trying to
2086	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2087	 * be using allocators in order of preference for an area that is
2088	 * too large.
2089	 */
2090	if (order >= MAX_ORDER) {
2091		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2092		return NULL;
2093	}
2094
2095	/*
2096	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2097	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2098	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2099	 * using a larger set of nodes after it has established that the
2100	 * allowed per node queues are empty and that nodes are
2101	 * over allocated.
2102	 */
2103	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2104		goto nopage;
2105
2106restart:
2107	if (!(gfp_mask & __GFP_NO_KSWAPD))
2108		wake_all_kswapd(order, zonelist, high_zoneidx,
2109						zone_idx(preferred_zone));
2110
2111	/*
2112	 * OK, we're below the kswapd watermark and have kicked background
2113	 * reclaim. Now things get more complex, so set up alloc_flags according
2114	 * to how we want to proceed.
2115	 */
2116	alloc_flags = gfp_to_alloc_flags(gfp_mask);
2117
2118	/*
2119	 * Find the true preferred zone if the allocation is unconstrained by
2120	 * cpusets.
2121	 */
2122	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2123		first_zones_zonelist(zonelist, high_zoneidx, NULL,
2124					&preferred_zone);
2125
2126rebalance:
2127	/* This is the last chance, in general, before the goto nopage. */
2128	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2129			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2130			preferred_zone, migratetype);
2131	if (page)
2132		goto got_pg;
2133
2134	/* Allocate without watermarks if the context allows */
2135	if (alloc_flags & ALLOC_NO_WATERMARKS) {
2136		page = __alloc_pages_high_priority(gfp_mask, order,
2137				zonelist, high_zoneidx, nodemask,
2138				preferred_zone, migratetype);
2139		if (page)
2140			goto got_pg;
2141	}
2142
2143	/* Atomic allocations - we can't balance anything */
2144	if (!wait)
2145		goto nopage;
2146
2147	/* Avoid recursion of direct reclaim */
2148	if (current->flags & PF_MEMALLOC)
2149		goto nopage;
2150
2151	/* Avoid allocations with no watermarks from looping endlessly */
2152	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2153		goto nopage;
2154
2155	/*
2156	 * Try direct compaction. The first pass is asynchronous. Subsequent
2157	 * attempts after direct reclaim are synchronous
2158	 */
2159	page = __alloc_pages_direct_compact(gfp_mask, order,
2160					zonelist, high_zoneidx,
2161					nodemask,
2162					alloc_flags, preferred_zone,
2163					migratetype, &did_some_progress,
2164					sync_migration);
2165	if (page)
2166		goto got_pg;
2167	sync_migration = true;
2168
2169	/* Try direct reclaim and then allocating */
2170	page = __alloc_pages_direct_reclaim(gfp_mask, order,
2171					zonelist, high_zoneidx,
2172					nodemask,
2173					alloc_flags, preferred_zone,
2174					migratetype, &did_some_progress);
2175	if (page)
2176		goto got_pg;
2177
2178	/*
2179	 * If we failed to make any progress reclaiming, then we are
2180	 * running out of options and have to consider going OOM
2181	 */
2182	if (!did_some_progress) {
2183		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2184			if (oom_killer_disabled)
2185				goto nopage;
2186			page = __alloc_pages_may_oom(gfp_mask, order,
2187					zonelist, high_zoneidx,
2188					nodemask, preferred_zone,
2189					migratetype);
2190			if (page)
2191				goto got_pg;
2192
2193			if (!(gfp_mask & __GFP_NOFAIL)) {
2194				/*
2195				 * The oom killer is not called for high-order
2196				 * allocations that may fail, so if no progress
2197				 * is being made, there are no other options and
2198				 * retrying is unlikely to help.
2199				 */
2200				if (order > PAGE_ALLOC_COSTLY_ORDER)
2201					goto nopage;
2202				/*
2203				 * The oom killer is not called for lowmem
2204				 * allocations to prevent needlessly killing
2205				 * innocent tasks.
2206				 */
2207				if (high_zoneidx < ZONE_NORMAL)
2208					goto nopage;
2209			}
2210
2211			goto restart;
2212		}
2213	}
2214
2215	/* Check if we should retry the allocation */
2216	pages_reclaimed += did_some_progress;
2217	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2218		/* Wait for some write requests to complete then retry */
2219		wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2220		goto rebalance;
2221	} else {
2222		/*
2223		 * High-order allocations do not necessarily loop after
2224		 * direct reclaim and reclaim/compaction depends on compaction
2225		 * being called after reclaim so call directly if necessary
2226		 */
2227		page = __alloc_pages_direct_compact(gfp_mask, order,
2228					zonelist, high_zoneidx,
2229					nodemask,
2230					alloc_flags, preferred_zone,
2231					migratetype, &did_some_progress,
2232					sync_migration);
2233		if (page)
2234			goto got_pg;
2235	}
2236
2237nopage:
2238	warn_alloc_failed(gfp_mask, order, NULL);
2239	return page;
2240got_pg:
2241	if (kmemcheck_enabled)
2242		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2243	return page;
2244
2245}
2246
2247/*
2248 * This is the 'heart' of the zoned buddy allocator.
2249 */
2250struct page *
2251__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2252			struct zonelist *zonelist, nodemask_t *nodemask)
2253{
2254	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2255	struct zone *preferred_zone;
2256	struct page *page;
2257	int migratetype = allocflags_to_migratetype(gfp_mask);
2258
2259	gfp_mask &= gfp_allowed_mask;
2260
2261	lockdep_trace_alloc(gfp_mask);
2262
2263	might_sleep_if(gfp_mask & __GFP_WAIT);
2264
2265	if (should_fail_alloc_page(gfp_mask, order))
2266		return NULL;
2267
2268	/*
2269	 * Check the zones suitable for the gfp_mask contain at least one
2270	 * valid zone. It's possible to have an empty zonelist as a result
2271	 * of GFP_THISNODE and a memoryless node
2272	 */
2273	if (unlikely(!zonelist->_zonerefs->zone))
2274		return NULL;
2275
2276	get_mems_allowed();
2277	/* The preferred zone is used for statistics later */
2278	first_zones_zonelist(zonelist, high_zoneidx,
2279				nodemask ? : &cpuset_current_mems_allowed,
2280				&preferred_zone);
2281	if (!preferred_zone) {
2282		put_mems_allowed();
2283		return NULL;
2284	}
2285
2286	/* First allocation attempt */
2287	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2288			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2289			preferred_zone, migratetype);
2290	if (unlikely(!page))
2291		page = __alloc_pages_slowpath(gfp_mask, order,
2292				zonelist, high_zoneidx, nodemask,
2293				preferred_zone, migratetype);
2294	put_mems_allowed();
2295
2296	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2297	return page;
2298}
2299EXPORT_SYMBOL(__alloc_pages_nodemask);
2300
2301/*
2302 * Common helper functions.
2303 */
2304unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2305{
2306	struct page *page;
2307
2308	/*
2309	 * __get_free_pages() returns a 32-bit address, which cannot represent
2310	 * a highmem page
2311	 */
2312	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2313
2314	page = alloc_pages(gfp_mask, order);
2315	if (!page)
2316		return 0;
2317	return (unsigned long) page_address(page);
2318}
2319EXPORT_SYMBOL(__get_free_pages);
2320
2321unsigned long get_zeroed_page(gfp_t gfp_mask)
2322{
2323	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2324}
2325EXPORT_SYMBOL(get_zeroed_page);
2326
2327void __pagevec_free(struct pagevec *pvec)
2328{
2329	int i = pagevec_count(pvec);
2330
2331	while (--i >= 0) {
2332		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2333		free_hot_cold_page(pvec->pages[i], pvec->cold);
2334	}
2335}
2336
2337void __free_pages(struct page *page, unsigned int order)
2338{
2339	if (put_page_testzero(page)) {
2340		if (order == 0)
2341			free_hot_cold_page(page, 0);
2342		else
2343			__free_pages_ok(page, order);
2344	}
2345}
2346
2347EXPORT_SYMBOL(__free_pages);
2348
2349void free_pages(unsigned long addr, unsigned int order)
2350{
2351	if (addr != 0) {
2352		VM_BUG_ON(!virt_addr_valid((void *)addr));
2353		__free_pages(virt_to_page((void *)addr), order);
2354	}
2355}
2356
2357EXPORT_SYMBOL(free_pages);
2358
2359static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2360{
2361	if (addr) {
2362		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2363		unsigned long used = addr + PAGE_ALIGN(size);
2364
2365		split_page(virt_to_page((void *)addr), order);
2366		while (used < alloc_end) {
2367			free_page(used);
2368			used += PAGE_SIZE;
2369		}
2370	}
2371	return (void *)addr;
2372}
2373
2374/**
2375 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2376 * @size: the number of bytes to allocate
2377 * @gfp_mask: GFP flags for the allocation
2378 *
2379 * This function is similar to alloc_pages(), except that it allocates the
2380 * minimum number of pages to satisfy the request.  alloc_pages() can only
2381 * allocate memory in power-of-two pages.
2382 *
2383 * This function is also limited by MAX_ORDER.
2384 *
2385 * Memory allocated by this function must be released by free_pages_exact().
2386 */
2387void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2388{
2389	unsigned int order = get_order(size);
2390	unsigned long addr;
2391
2392	addr = __get_free_pages(gfp_mask, order);
2393	return make_alloc_exact(addr, order, size);
2394}
2395EXPORT_SYMBOL(alloc_pages_exact);
2396
2397/**
2398 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2399 *			   pages on a node.
2400 * @nid: the preferred node ID where memory should be allocated
2401 * @size: the number of bytes to allocate
2402 * @gfp_mask: GFP flags for the allocation
2403 *
2404 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2405 * back.
2406 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2407 * but is not exact.
2408 */
2409void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2410{
2411	unsigned order = get_order(size);
2412	struct page *p = alloc_pages_node(nid, gfp_mask, order);
2413	if (!p)
2414		return NULL;
2415	return make_alloc_exact((unsigned long)page_address(p), order, size);
2416}
2417EXPORT_SYMBOL(alloc_pages_exact_nid);
2418
2419/**
2420 * free_pages_exact - release memory allocated via alloc_pages_exact()
2421 * @virt: the value returned by alloc_pages_exact.
2422 * @size: size of allocation, same value as passed to alloc_pages_exact().
2423 *
2424 * Release the memory allocated by a previous call to alloc_pages_exact.
2425 */
2426void free_pages_exact(void *virt, size_t size)
2427{
2428	unsigned long addr = (unsigned long)virt;
2429	unsigned long end = addr + PAGE_ALIGN(size);
2430
2431	while (addr < end) {
2432		free_page(addr);
2433		addr += PAGE_SIZE;
2434	}
2435}
2436EXPORT_SYMBOL(free_pages_exact);
2437
2438static unsigned int nr_free_zone_pages(int offset)
2439{
2440	struct zoneref *z;
2441	struct zone *zone;
2442
2443	/* Just pick one node, since fallback list is circular */
2444	unsigned int sum = 0;
2445
2446	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2447
2448	for_each_zone_zonelist(zone, z, zonelist, offset) {
2449		unsigned long size = zone->present_pages;
2450		unsigned long high = high_wmark_pages(zone);
2451		if (size > high)
2452			sum += size - high;
2453	}
2454
2455	return sum;
2456}
2457
2458/*
2459 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2460 */
2461unsigned int nr_free_buffer_pages(void)
2462{
2463	return nr_free_zone_pages(gfp_zone(GFP_USER));
2464}
2465EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2466
2467/*
2468 * Amount of free RAM allocatable within all zones
2469 */
2470unsigned int nr_free_pagecache_pages(void)
2471{
2472	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2473}
2474
2475static inline void show_node(struct zone *zone)
2476{
2477	if (NUMA_BUILD)
2478		printk("Node %d ", zone_to_nid(zone));
2479}
2480
2481void si_meminfo(struct sysinfo *val)
2482{
2483	val->totalram = totalram_pages;
2484	val->sharedram = 0;
2485	val->freeram = global_page_state(NR_FREE_PAGES);
2486	val->bufferram = nr_blockdev_pages();
2487	val->totalhigh = totalhigh_pages;
2488	val->freehigh = nr_free_highpages();
2489	val->mem_unit = PAGE_SIZE;
2490}
2491
2492EXPORT_SYMBOL(si_meminfo);
2493
2494#ifdef CONFIG_NUMA
2495void si_meminfo_node(struct sysinfo *val, int nid)
2496{
2497	pg_data_t *pgdat = NODE_DATA(nid);
2498
2499	val->totalram = pgdat->node_present_pages;
2500	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2501#ifdef CONFIG_HIGHMEM
2502	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2503	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2504			NR_FREE_PAGES);
2505#else
2506	val->totalhigh = 0;
2507	val->freehigh = 0;
2508#endif
2509	val->mem_unit = PAGE_SIZE;
2510}
2511#endif
2512
2513/*
2514 * Determine whether the node should be displayed or not, depending on whether
2515 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2516 */
2517bool skip_free_areas_node(unsigned int flags, int nid)
2518{
2519	bool ret = false;
2520
2521	if (!(flags & SHOW_MEM_FILTER_NODES))
2522		goto out;
2523
2524	get_mems_allowed();
2525	ret = !node_isset(nid, cpuset_current_mems_allowed);
2526	put_mems_allowed();
2527out:
2528	return ret;
2529}
2530
2531#define K(x) ((x) << (PAGE_SHIFT-10))
2532
2533/*
2534 * Show free area list (used inside shift_scroll-lock stuff)
2535 * We also calculate the percentage fragmentation. We do this by counting the
2536 * memory on each free list with the exception of the first item on the list.
2537 * Suppresses nodes that are not allowed by current's cpuset if
2538 * SHOW_MEM_FILTER_NODES is passed.
2539 */
2540void show_free_areas(unsigned int filter)
2541{
2542	int cpu;
2543	struct zone *zone;
2544
2545	for_each_populated_zone(zone) {
2546		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2547			continue;
2548		show_node(zone);
2549		printk("%s per-cpu:\n", zone->name);
2550
2551		for_each_online_cpu(cpu) {
2552			struct per_cpu_pageset *pageset;
2553
2554			pageset = per_cpu_ptr(zone->pageset, cpu);
2555
2556			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2557			       cpu, pageset->pcp.high,
2558			       pageset->pcp.batch, pageset->pcp.count);
2559		}
2560	}
2561
2562	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2563		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2564		" unevictable:%lu"
2565		" dirty:%lu writeback:%lu unstable:%lu\n"
2566		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2567		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2568		global_page_state(NR_ACTIVE_ANON),
2569		global_page_state(NR_INACTIVE_ANON),
2570		global_page_state(NR_ISOLATED_ANON),
2571		global_page_state(NR_ACTIVE_FILE),
2572		global_page_state(NR_INACTIVE_FILE),
2573		global_page_state(NR_ISOLATED_FILE),
2574		global_page_state(NR_UNEVICTABLE),
2575		global_page_state(NR_FILE_DIRTY),
2576		global_page_state(NR_WRITEBACK),
2577		global_page_state(NR_UNSTABLE_NFS),
2578		global_page_state(NR_FREE_PAGES),
2579		global_page_state(NR_SLAB_RECLAIMABLE),
2580		global_page_state(NR_SLAB_UNRECLAIMABLE),
2581		global_page_state(NR_FILE_MAPPED),
2582		global_page_state(NR_SHMEM),
2583		global_page_state(NR_PAGETABLE),
2584		global_page_state(NR_BOUNCE));
2585
2586	for_each_populated_zone(zone) {
2587		int i;
2588
2589		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2590			continue;
2591		show_node(zone);
2592		printk("%s"
2593			" free:%lukB"
2594			" min:%lukB"
2595			" low:%lukB"
2596			" high:%lukB"
2597			" active_anon:%lukB"
2598			" inactive_anon:%lukB"
2599			" active_file:%lukB"
2600			" inactive_file:%lukB"
2601			" unevictable:%lukB"
2602			" isolated(anon):%lukB"
2603			" isolated(file):%lukB"
2604			" present:%lukB"
2605			" mlocked:%lukB"
2606			" dirty:%lukB"
2607			" writeback:%lukB"
2608			" mapped:%lukB"
2609			" shmem:%lukB"
2610			" slab_reclaimable:%lukB"
2611			" slab_unreclaimable:%lukB"
2612			" kernel_stack:%lukB"
2613			" pagetables:%lukB"
2614			" unstable:%lukB"
2615			" bounce:%lukB"
2616			" writeback_tmp:%lukB"
2617			" pages_scanned:%lu"
2618			" all_unreclaimable? %s"
2619			"\n",
2620			zone->name,
2621			K(zone_page_state(zone, NR_FREE_PAGES)),
2622			K(min_wmark_pages(zone)),
2623			K(low_wmark_pages(zone)),
2624			K(high_wmark_pages(zone)),
2625			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2626			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2627			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2628			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2629			K(zone_page_state(zone, NR_UNEVICTABLE)),
2630			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2631			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2632			K(zone->present_pages),
2633			K(zone_page_state(zone, NR_MLOCK)),
2634			K(zone_page_state(zone, NR_FILE_DIRTY)),
2635			K(zone_page_state(zone, NR_WRITEBACK)),
2636			K(zone_page_state(zone, NR_FILE_MAPPED)),
2637			K(zone_page_state(zone, NR_SHMEM)),
2638			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2639			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2640			zone_page_state(zone, NR_KERNEL_STACK) *
2641				THREAD_SIZE / 1024,
2642			K(zone_page_state(zone, NR_PAGETABLE)),
2643			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2644			K(zone_page_state(zone, NR_BOUNCE)),
2645			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2646			zone->pages_scanned,
2647			(zone->all_unreclaimable ? "yes" : "no")
2648			);
2649		printk("lowmem_reserve[]:");
2650		for (i = 0; i < MAX_NR_ZONES; i++)
2651			printk(" %lu", zone->lowmem_reserve[i]);
2652		printk("\n");
2653	}
2654
2655	for_each_populated_zone(zone) {
2656 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2657
2658		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2659			continue;
2660		show_node(zone);
2661		printk("%s: ", zone->name);
2662
2663		spin_lock_irqsave(&zone->lock, flags);
2664		for (order = 0; order < MAX_ORDER; order++) {
2665			nr[order] = zone->free_area[order].nr_free;
2666			total += nr[order] << order;
2667		}
2668		spin_unlock_irqrestore(&zone->lock, flags);
2669		for (order = 0; order < MAX_ORDER; order++)
2670			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2671		printk("= %lukB\n", K(total));
2672	}
2673
2674	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2675
2676	show_swap_cache_info();
2677}
2678
2679static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2680{
2681	zoneref->zone = zone;
2682	zoneref->zone_idx = zone_idx(zone);
2683}
2684
2685/*
2686 * Builds allocation fallback zone lists.
2687 *
2688 * Add all populated zones of a node to the zonelist.
2689 */
2690static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2691				int nr_zones, enum zone_type zone_type)
2692{
2693	struct zone *zone;
2694
2695	BUG_ON(zone_type >= MAX_NR_ZONES);
2696	zone_type++;
2697
2698	do {
2699		zone_type--;
2700		zone = pgdat->node_zones + zone_type;
2701		if (populated_zone(zone)) {
2702			zoneref_set_zone(zone,
2703				&zonelist->_zonerefs[nr_zones++]);
2704			check_highest_zone(zone_type);
2705		}
2706
2707	} while (zone_type);
2708	return nr_zones;
2709}
2710
2711
2712/*
2713 *  zonelist_order:
2714 *  0 = automatic detection of better ordering.
2715 *  1 = order by ([node] distance, -zonetype)
2716 *  2 = order by (-zonetype, [node] distance)
2717 *
2718 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2719 *  the same zonelist. So only NUMA can configure this param.
2720 */
2721#define ZONELIST_ORDER_DEFAULT  0
2722#define ZONELIST_ORDER_NODE     1
2723#define ZONELIST_ORDER_ZONE     2
2724
2725/* zonelist order in the kernel.
2726 * set_zonelist_order() will set this to NODE or ZONE.
2727 */
2728static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2729static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2730
2731
2732#ifdef CONFIG_NUMA
2733/* The value user specified ....changed by config */
2734static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2735/* string for sysctl */
2736#define NUMA_ZONELIST_ORDER_LEN	16
2737char numa_zonelist_order[16] = "default";
2738
2739/*
2740 * interface for configure zonelist ordering.
2741 * command line option "numa_zonelist_order"
2742 *	= "[dD]efault	- default, automatic configuration.
2743 *	= "[nN]ode 	- order by node locality, then by zone within node
2744 *	= "[zZ]one      - order by zone, then by locality within zone
2745 */
2746
2747static int __parse_numa_zonelist_order(char *s)
2748{
2749	if (*s == 'd' || *s == 'D') {
2750		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2751	} else if (*s == 'n' || *s == 'N') {
2752		user_zonelist_order = ZONELIST_ORDER_NODE;
2753	} else if (*s == 'z' || *s == 'Z') {
2754		user_zonelist_order = ZONELIST_ORDER_ZONE;
2755	} else {
2756		printk(KERN_WARNING
2757			"Ignoring invalid numa_zonelist_order value:  "
2758			"%s\n", s);
2759		return -EINVAL;
2760	}
2761	return 0;
2762}
2763
2764static __init int setup_numa_zonelist_order(char *s)
2765{
2766	int ret;
2767
2768	if (!s)
2769		return 0;
2770
2771	ret = __parse_numa_zonelist_order(s);
2772	if (ret == 0)
2773		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2774
2775	return ret;
2776}
2777early_param("numa_zonelist_order", setup_numa_zonelist_order);
2778
2779/*
2780 * sysctl handler for numa_zonelist_order
2781 */
2782int numa_zonelist_order_handler(ctl_table *table, int write,
2783		void __user *buffer, size_t *length,
2784		loff_t *ppos)
2785{
2786	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2787	int ret;
2788	static DEFINE_MUTEX(zl_order_mutex);
2789
2790	mutex_lock(&zl_order_mutex);
2791	if (write)
2792		strcpy(saved_string, (char*)table->data);
2793	ret = proc_dostring(table, write, buffer, length, ppos);
2794	if (ret)
2795		goto out;
2796	if (write) {
2797		int oldval = user_zonelist_order;
2798		if (__parse_numa_zonelist_order((char*)table->data)) {
2799			/*
2800			 * bogus value.  restore saved string
2801			 */
2802			strncpy((char*)table->data, saved_string,
2803				NUMA_ZONELIST_ORDER_LEN);
2804			user_zonelist_order = oldval;
2805		} else if (oldval != user_zonelist_order) {
2806			mutex_lock(&zonelists_mutex);
2807			build_all_zonelists(NULL);
2808			mutex_unlock(&zonelists_mutex);
2809		}
2810	}
2811out:
2812	mutex_unlock(&zl_order_mutex);
2813	return ret;
2814}
2815
2816
2817#define MAX_NODE_LOAD (nr_online_nodes)
2818static int node_load[MAX_NUMNODES];
2819
2820/**
2821 * find_next_best_node - find the next node that should appear in a given node's fallback list
2822 * @node: node whose fallback list we're appending
2823 * @used_node_mask: nodemask_t of already used nodes
2824 *
2825 * We use a number of factors to determine which is the next node that should
2826 * appear on a given node's fallback list.  The node should not have appeared
2827 * already in @node's fallback list, and it should be the next closest node
2828 * according to the distance array (which contains arbitrary distance values
2829 * from each node to each node in the system), and should also prefer nodes
2830 * with no CPUs, since presumably they'll have very little allocation pressure
2831 * on them otherwise.
2832 * It returns -1 if no node is found.
2833 */
2834static int find_next_best_node(int node, nodemask_t *used_node_mask)
2835{
2836	int n, val;
2837	int min_val = INT_MAX;
2838	int best_node = -1;
2839	const struct cpumask *tmp = cpumask_of_node(0);
2840
2841	/* Use the local node if we haven't already */
2842	if (!node_isset(node, *used_node_mask)) {
2843		node_set(node, *used_node_mask);
2844		return node;
2845	}
2846
2847	for_each_node_state(n, N_HIGH_MEMORY) {
2848
2849		/* Don't want a node to appear more than once */
2850		if (node_isset(n, *used_node_mask))
2851			continue;
2852
2853		/* Use the distance array to find the distance */
2854		val = node_distance(node, n);
2855
2856		/* Penalize nodes under us ("prefer the next node") */
2857		val += (n < node);
2858
2859		/* Give preference to headless and unused nodes */
2860		tmp = cpumask_of_node(n);
2861		if (!cpumask_empty(tmp))
2862			val += PENALTY_FOR_NODE_WITH_CPUS;
2863
2864		/* Slight preference for less loaded node */
2865		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2866		val += node_load[n];
2867
2868		if (val < min_val) {
2869			min_val = val;
2870			best_node = n;
2871		}
2872	}
2873
2874	if (best_node >= 0)
2875		node_set(best_node, *used_node_mask);
2876
2877	return best_node;
2878}
2879
2880
2881/*
2882 * Build zonelists ordered by node and zones within node.
2883 * This results in maximum locality--normal zone overflows into local
2884 * DMA zone, if any--but risks exhausting DMA zone.
2885 */
2886static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2887{
2888	int j;
2889	struct zonelist *zonelist;
2890
2891	zonelist = &pgdat->node_zonelists[0];
2892	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2893		;
2894	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2895							MAX_NR_ZONES - 1);
2896	zonelist->_zonerefs[j].zone = NULL;
2897	zonelist->_zonerefs[j].zone_idx = 0;
2898}
2899
2900/*
2901 * Build gfp_thisnode zonelists
2902 */
2903static void build_thisnode_zonelists(pg_data_t *pgdat)
2904{
2905	int j;
2906	struct zonelist *zonelist;
2907
2908	zonelist = &pgdat->node_zonelists[1];
2909	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2910	zonelist->_zonerefs[j].zone = NULL;
2911	zonelist->_zonerefs[j].zone_idx = 0;
2912}
2913
2914/*
2915 * Build zonelists ordered by zone and nodes within zones.
2916 * This results in conserving DMA zone[s] until all Normal memory is
2917 * exhausted, but results in overflowing to remote node while memory
2918 * may still exist in local DMA zone.
2919 */
2920static int node_order[MAX_NUMNODES];
2921
2922static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2923{
2924	int pos, j, node;
2925	int zone_type;		/* needs to be signed */
2926	struct zone *z;
2927	struct zonelist *zonelist;
2928
2929	zonelist = &pgdat->node_zonelists[0];
2930	pos = 0;
2931	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2932		for (j = 0; j < nr_nodes; j++) {
2933			node = node_order[j];
2934			z = &NODE_DATA(node)->node_zones[zone_type];
2935			if (populated_zone(z)) {
2936				zoneref_set_zone(z,
2937					&zonelist->_zonerefs[pos++]);
2938				check_highest_zone(zone_type);
2939			}
2940		}
2941	}
2942	zonelist->_zonerefs[pos].zone = NULL;
2943	zonelist->_zonerefs[pos].zone_idx = 0;
2944}
2945
2946static int default_zonelist_order(void)
2947{
2948	int nid, zone_type;
2949	unsigned long low_kmem_size,total_size;
2950	struct zone *z;
2951	int average_size;
2952	/*
2953         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
2954	 * If they are really small and used heavily, the system can fall
2955	 * into OOM very easily.
2956	 * This function detect ZONE_DMA/DMA32 size and configures zone order.
2957	 */
2958	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2959	low_kmem_size = 0;
2960	total_size = 0;
2961	for_each_online_node(nid) {
2962		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2963			z = &NODE_DATA(nid)->node_zones[zone_type];
2964			if (populated_zone(z)) {
2965				if (zone_type < ZONE_NORMAL)
2966					low_kmem_size += z->present_pages;
2967				total_size += z->present_pages;
2968			} else if (zone_type == ZONE_NORMAL) {
2969				/*
2970				 * If any node has only lowmem, then node order
2971				 * is preferred to allow kernel allocations
2972				 * locally; otherwise, they can easily infringe
2973				 * on other nodes when there is an abundance of
2974				 * lowmem available to allocate from.
2975				 */
2976				return ZONELIST_ORDER_NODE;
2977			}
2978		}
2979	}
2980	if (!low_kmem_size ||  /* there are no DMA area. */
2981	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2982		return ZONELIST_ORDER_NODE;
2983	/*
2984	 * look into each node's config.
2985  	 * If there is a node whose DMA/DMA32 memory is very big area on
2986 	 * local memory, NODE_ORDER may be suitable.
2987         */
2988	average_size = total_size /
2989				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2990	for_each_online_node(nid) {
2991		low_kmem_size = 0;
2992		total_size = 0;
2993		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2994			z = &NODE_DATA(nid)->node_zones[zone_type];
2995			if (populated_zone(z)) {
2996				if (zone_type < ZONE_NORMAL)
2997					low_kmem_size += z->present_pages;
2998				total_size += z->present_pages;
2999			}
3000		}
3001		if (low_kmem_size &&
3002		    total_size > average_size && /* ignore small node */
3003		    low_kmem_size > total_size * 70/100)
3004			return ZONELIST_ORDER_NODE;
3005	}
3006	return ZONELIST_ORDER_ZONE;
3007}
3008
3009static void set_zonelist_order(void)
3010{
3011	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3012		current_zonelist_order = default_zonelist_order();
3013	else
3014		current_zonelist_order = user_zonelist_order;
3015}
3016
3017static void build_zonelists(pg_data_t *pgdat)
3018{
3019	int j, node, load;
3020	enum zone_type i;
3021	nodemask_t used_mask;
3022	int local_node, prev_node;
3023	struct zonelist *zonelist;
3024	int order = current_zonelist_order;
3025
3026	/* initialize zonelists */
3027	for (i = 0; i < MAX_ZONELISTS; i++) {
3028		zonelist = pgdat->node_zonelists + i;
3029		zonelist->_zonerefs[0].zone = NULL;
3030		zonelist->_zonerefs[0].zone_idx = 0;
3031	}
3032
3033	/* NUMA-aware ordering of nodes */
3034	local_node = pgdat->node_id;
3035	load = nr_online_nodes;
3036	prev_node = local_node;
3037	nodes_clear(used_mask);
3038
3039	memset(node_order, 0, sizeof(node_order));
3040	j = 0;
3041
3042	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3043		int distance = node_distance(local_node, node);
3044
3045		/*
3046		 * If another node is sufficiently far away then it is better
3047		 * to reclaim pages in a zone before going off node.
3048		 */
3049		if (distance > RECLAIM_DISTANCE)
3050			zone_reclaim_mode = 1;
3051
3052		/*
3053		 * We don't want to pressure a particular node.
3054		 * So adding penalty to the first node in same
3055		 * distance group to make it round-robin.
3056		 */
3057		if (distance != node_distance(local_node, prev_node))
3058			node_load[node] = load;
3059
3060		prev_node = node;
3061		load--;
3062		if (order == ZONELIST_ORDER_NODE)
3063			build_zonelists_in_node_order(pgdat, node);
3064		else
3065			node_order[j++] = node;	/* remember order */
3066	}
3067
3068	if (order == ZONELIST_ORDER_ZONE) {
3069		/* calculate node order -- i.e., DMA last! */
3070		build_zonelists_in_zone_order(pgdat, j);
3071	}
3072
3073	build_thisnode_zonelists(pgdat);
3074}
3075
3076/* Construct the zonelist performance cache - see further mmzone.h */
3077static void build_zonelist_cache(pg_data_t *pgdat)
3078{
3079	struct zonelist *zonelist;
3080	struct zonelist_cache *zlc;
3081	struct zoneref *z;
3082
3083	zonelist = &pgdat->node_zonelists[0];
3084	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3085	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3086	for (z = zonelist->_zonerefs; z->zone; z++)
3087		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3088}
3089
3090#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3091/*
3092 * Return node id of node used for "local" allocations.
3093 * I.e., first node id of first zone in arg node's generic zonelist.
3094 * Used for initializing percpu 'numa_mem', which is used primarily
3095 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3096 */
3097int local_memory_node(int node)
3098{
3099	struct zone *zone;
3100
3101	(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3102				   gfp_zone(GFP_KERNEL),
3103				   NULL,
3104				   &zone);
3105	return zone->node;
3106}
3107#endif
3108
3109#else	/* CONFIG_NUMA */
3110
3111static void set_zonelist_order(void)
3112{
3113	current_zonelist_order = ZONELIST_ORDER_ZONE;
3114}
3115
3116static void build_zonelists(pg_data_t *pgdat)
3117{
3118	int node, local_node;
3119	enum zone_type j;
3120	struct zonelist *zonelist;
3121
3122	local_node = pgdat->node_id;
3123
3124	zonelist = &pgdat->node_zonelists[0];
3125	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3126
3127	/*
3128	 * Now we build the zonelist so that it contains the zones
3129	 * of all the other nodes.
3130	 * We don't want to pressure a particular node, so when
3131	 * building the zones for node N, we make sure that the
3132	 * zones coming right after the local ones are those from
3133	 * node N+1 (modulo N)
3134	 */
3135	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3136		if (!node_online(node))
3137			continue;
3138		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3139							MAX_NR_ZONES - 1);
3140	}
3141	for (node = 0; node < local_node; node++) {
3142		if (!node_online(node))
3143			continue;
3144		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3145							MAX_NR_ZONES - 1);
3146	}
3147
3148	zonelist->_zonerefs[j].zone = NULL;
3149	zonelist->_zonerefs[j].zone_idx = 0;
3150}
3151
3152/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3153static void build_zonelist_cache(pg_data_t *pgdat)
3154{
3155	pgdat->node_zonelists[0].zlcache_ptr = NULL;
3156}
3157
3158#endif	/* CONFIG_NUMA */
3159
3160/*
3161 * Boot pageset table. One per cpu which is going to be used for all
3162 * zones and all nodes. The parameters will be set in such a way
3163 * that an item put on a list will immediately be handed over to
3164 * the buddy list. This is safe since pageset manipulation is done
3165 * with interrupts disabled.
3166 *
3167 * The boot_pagesets must be kept even after bootup is complete for
3168 * unused processors and/or zones. They do play a role for bootstrapping
3169 * hotplugged processors.
3170 *
3171 * zoneinfo_show() and maybe other functions do
3172 * not check if the processor is online before following the pageset pointer.
3173 * Other parts of the kernel may not check if the zone is available.
3174 */
3175static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3176static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3177static void setup_zone_pageset(struct zone *zone);
3178
3179/*
3180 * Global mutex to protect against size modification of zonelists
3181 * as well as to serialize pageset setup for the new populated zone.
3182 */
3183DEFINE_MUTEX(zonelists_mutex);
3184
3185/* return values int ....just for stop_machine() */
3186static __init_refok int __build_all_zonelists(void *data)
3187{
3188	int nid;
3189	int cpu;
3190
3191#ifdef CONFIG_NUMA
3192	memset(node_load, 0, sizeof(node_load));
3193#endif
3194	for_each_online_node(nid) {
3195		pg_data_t *pgdat = NODE_DATA(nid);
3196
3197		build_zonelists(pgdat);
3198		build_zonelist_cache(pgdat);
3199	}
3200
3201	/*
3202	 * Initialize the boot_pagesets that are going to be used
3203	 * for bootstrapping processors. The real pagesets for
3204	 * each zone will be allocated later when the per cpu
3205	 * allocator is available.
3206	 *
3207	 * boot_pagesets are used also for bootstrapping offline
3208	 * cpus if the system is already booted because the pagesets
3209	 * are needed to initialize allocators on a specific cpu too.
3210	 * F.e. the percpu allocator needs the page allocator which
3211	 * needs the percpu allocator in order to allocate its pagesets
3212	 * (a chicken-egg dilemma).
3213	 */
3214	for_each_possible_cpu(cpu) {
3215		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3216
3217#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3218		/*
3219		 * We now know the "local memory node" for each node--
3220		 * i.e., the node of the first zone in the generic zonelist.
3221		 * Set up numa_mem percpu variable for on-line cpus.  During
3222		 * boot, only the boot cpu should be on-line;  we'll init the
3223		 * secondary cpus' numa_mem as they come on-line.  During
3224		 * node/memory hotplug, we'll fixup all on-line cpus.
3225		 */
3226		if (cpu_online(cpu))
3227			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3228#endif
3229	}
3230
3231	return 0;
3232}
3233
3234/*
3235 * Called with zonelists_mutex held always
3236 * unless system_state == SYSTEM_BOOTING.
3237 */
3238void __ref build_all_zonelists(void *data)
3239{
3240	set_zonelist_order();
3241
3242	if (system_state == SYSTEM_BOOTING) {
3243		__build_all_zonelists(NULL);
3244		mminit_verify_zonelist();
3245		cpuset_init_current_mems_allowed();
3246	} else {
3247		/* we have to stop all cpus to guarantee there is no user
3248		   of zonelist */
3249#ifdef CONFIG_MEMORY_HOTPLUG
3250		if (data)
3251			setup_zone_pageset((struct zone *)data);
3252#endif
3253		stop_machine(__build_all_zonelists, NULL, NULL);
3254		/* cpuset refresh routine should be here */
3255	}
3256	vm_total_pages = nr_free_pagecache_pages();
3257	/*
3258	 * Disable grouping by mobility if the number of pages in the
3259	 * system is too low to allow the mechanism to work. It would be
3260	 * more accurate, but expensive to check per-zone. This check is
3261	 * made on memory-hotadd so a system can start with mobility
3262	 * disabled and enable it later
3263	 */
3264	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3265		page_group_by_mobility_disabled = 1;
3266	else
3267		page_group_by_mobility_disabled = 0;
3268
3269	printk("Built %i zonelists in %s order, mobility grouping %s.  "
3270		"Total pages: %ld\n",
3271			nr_online_nodes,
3272			zonelist_order_name[current_zonelist_order],
3273			page_group_by_mobility_disabled ? "off" : "on",
3274			vm_total_pages);
3275#ifdef CONFIG_NUMA
3276	printk("Policy zone: %s\n", zone_names[policy_zone]);
3277#endif
3278}
3279
3280/*
3281 * Helper functions to size the waitqueue hash table.
3282 * Essentially these want to choose hash table sizes sufficiently
3283 * large so that collisions trying to wait on pages are rare.
3284 * But in fact, the number of active page waitqueues on typical
3285 * systems is ridiculously low, less than 200. So this is even
3286 * conservative, even though it seems large.
3287 *
3288 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3289 * waitqueues, i.e. the size of the waitq table given the number of pages.
3290 */
3291#define PAGES_PER_WAITQUEUE	256
3292
3293#ifndef CONFIG_MEMORY_HOTPLUG
3294static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3295{
3296	unsigned long size = 1;
3297
3298	pages /= PAGES_PER_WAITQUEUE;
3299
3300	while (size < pages)
3301		size <<= 1;
3302
3303	/*
3304	 * Once we have dozens or even hundreds of threads sleeping
3305	 * on IO we've got bigger problems than wait queue collision.
3306	 * Limit the size of the wait table to a reasonable size.
3307	 */
3308	size = min(size, 4096UL);
3309
3310	return max(size, 4UL);
3311}
3312#else
3313/*
3314 * A zone's size might be changed by hot-add, so it is not possible to determine
3315 * a suitable size for its wait_table.  So we use the maximum size now.
3316 *
3317 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3318 *
3319 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3320 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3321 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3322 *
3323 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3324 * or more by the traditional way. (See above).  It equals:
3325 *
3326 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3327 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3328 *    powerpc (64K page size)             : =  (32G +16M)byte.
3329 */
3330static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3331{
3332	return 4096UL;
3333}
3334#endif
3335
3336/*
3337 * This is an integer logarithm so that shifts can be used later
3338 * to extract the more random high bits from the multiplicative
3339 * hash function before the remainder is taken.
3340 */
3341static inline unsigned long wait_table_bits(unsigned long size)
3342{
3343	return ffz(~size);
3344}
3345
3346#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3347
3348/*
3349 * Check if a pageblock contains reserved pages
3350 */
3351static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3352{
3353	unsigned long pfn;
3354
3355	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3356		if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3357			return 1;
3358	}
3359	return 0;
3360}
3361
3362/*
3363 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3364 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3365 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3366 * higher will lead to a bigger reserve which will get freed as contiguous
3367 * blocks as reclaim kicks in
3368 */
3369static void setup_zone_migrate_reserve(struct zone *zone)
3370{
3371	unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3372	struct page *page;
3373	unsigned long block_migratetype;
3374	int reserve;
3375
3376	/* Get the start pfn, end pfn and the number of blocks to reserve */
3377	start_pfn = zone->zone_start_pfn;
3378	end_pfn = start_pfn + zone->spanned_pages;
3379	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3380							pageblock_order;
3381
3382	/*
3383	 * Reserve blocks are generally in place to help high-order atomic
3384	 * allocations that are short-lived. A min_free_kbytes value that
3385	 * would result in more than 2 reserve blocks for atomic allocations
3386	 * is assumed to be in place to help anti-fragmentation for the
3387	 * future allocation of hugepages at runtime.
3388	 */
3389	reserve = min(2, reserve);
3390
3391	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3392		if (!pfn_valid(pfn))
3393			continue;
3394		page = pfn_to_page(pfn);
3395
3396		/* Watch out for overlapping nodes */
3397		if (page_to_nid(page) != zone_to_nid(zone))
3398			continue;
3399
3400		/* Blocks with reserved pages will never free, skip them. */
3401		block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3402		if (pageblock_is_reserved(pfn, block_end_pfn))
3403			continue;
3404
3405		block_migratetype = get_pageblock_migratetype(page);
3406
3407		/* If this block is reserved, account for it */
3408		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3409			reserve--;
3410			continue;
3411		}
3412
3413		/* Suitable for reserving if this block is movable */
3414		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3415			set_pageblock_migratetype(page, MIGRATE_RESERVE);
3416			move_freepages_block(zone, page, MIGRATE_RESERVE);
3417			reserve--;
3418			continue;
3419		}
3420
3421		/*
3422		 * If the reserve is met and this is a previous reserved block,
3423		 * take it back
3424		 */
3425		if (block_migratetype == MIGRATE_RESERVE) {
3426			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3427			move_freepages_block(zone, page, MIGRATE_MOVABLE);
3428		}
3429	}
3430}
3431
3432/*
3433 * Initially all pages are reserved - free ones are freed
3434 * up by free_all_bootmem() once the early boot process is
3435 * done. Non-atomic initialization, single-pass.
3436 */
3437void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3438		unsigned long start_pfn, enum memmap_context context)
3439{
3440	struct page *page;
3441	unsigned long end_pfn = start_pfn + size;
3442	unsigned long pfn;
3443	struct zone *z;
3444
3445	if (highest_memmap_pfn < end_pfn - 1)
3446		highest_memmap_pfn = end_pfn - 1;
3447
3448	z = &NODE_DATA(nid)->node_zones[zone];
3449	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3450		/*
3451		 * There can be holes in boot-time mem_map[]s
3452		 * handed to this function.  They do not
3453		 * exist on hotplugged memory.
3454		 */
3455		if (context == MEMMAP_EARLY) {
3456			if (!early_pfn_valid(pfn))
3457				continue;
3458			if (!early_pfn_in_nid(pfn, nid))
3459				continue;
3460		}
3461		page = pfn_to_page(pfn);
3462		set_page_links(page, zone, nid, pfn);
3463		mminit_verify_page_links(page, zone, nid, pfn);
3464		init_page_count(page);
3465		reset_page_mapcount(page);
3466		SetPageReserved(page);
3467		/*
3468		 * Mark the block movable so that blocks are reserved for
3469		 * movable at startup. This will force kernel allocations
3470		 * to reserve their blocks rather than leaking throughout
3471		 * the address space during boot when many long-lived
3472		 * kernel allocations are made. Later some blocks near
3473		 * the start are marked MIGRATE_RESERVE by
3474		 * setup_zone_migrate_reserve()
3475		 *
3476		 * bitmap is created for zone's valid pfn range. but memmap
3477		 * can be created for invalid pages (for alignment)
3478		 * check here not to call set_pageblock_migratetype() against
3479		 * pfn out of zone.
3480		 */
3481		if ((z->zone_start_pfn <= pfn)
3482		    && (pfn < z->zone_start_pfn + z->spanned_pages)
3483		    && !(pfn & (pageblock_nr_pages - 1)))
3484			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3485
3486		INIT_LIST_HEAD(&page->lru);
3487#ifdef WANT_PAGE_VIRTUAL
3488		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
3489		if (!is_highmem_idx(zone))
3490			set_page_address(page, __va(pfn << PAGE_SHIFT));
3491#endif
3492	}
3493}
3494
3495static void __meminit zone_init_free_lists(struct zone *zone)
3496{
3497	int order, t;
3498	for_each_migratetype_order(order, t) {
3499		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3500		zone->free_area[order].nr_free = 0;
3501	}
3502}
3503
3504#ifndef __HAVE_ARCH_MEMMAP_INIT
3505#define memmap_init(size, nid, zone, start_pfn) \
3506	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3507#endif
3508
3509static int zone_batchsize(struct zone *zone)
3510{
3511#ifdef CONFIG_MMU
3512	int batch;
3513
3514	/*
3515	 * The per-cpu-pages pools are set to around 1000th of the
3516	 * size of the zone.  But no more than 1/2 of a meg.
3517	 *
3518	 * OK, so we don't know how big the cache is.  So guess.
3519	 */
3520	batch = zone->present_pages / 1024;
3521	if (batch * PAGE_SIZE > 512 * 1024)
3522		batch = (512 * 1024) / PAGE_SIZE;
3523	batch /= 4;		/* We effectively *= 4 below */
3524	if (batch < 1)
3525		batch = 1;
3526
3527	/*
3528	 * Clamp the batch to a 2^n - 1 value. Having a power
3529	 * of 2 value was found to be more likely to have
3530	 * suboptimal cache aliasing properties in some cases.
3531	 *
3532	 * For example if 2 tasks are alternately allocating
3533	 * batches of pages, one task can end up with a lot
3534	 * of pages of one half of the possible page colors
3535	 * and the other with pages of the other colors.
3536	 */
3537	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3538
3539	return batch;
3540
3541#else
3542	/* The deferral and batching of frees should be suppressed under NOMMU
3543	 * conditions.
3544	 *
3545	 * The problem is that NOMMU needs to be able to allocate large chunks
3546	 * of contiguous memory as there's no hardware page translation to
3547	 * assemble apparent contiguous memory from discontiguous pages.
3548	 *
3549	 * Queueing large contiguous runs of pages for batching, however,
3550	 * causes the pages to actually be freed in smaller chunks.  As there
3551	 * can be a significant delay between the individual batches being
3552	 * recycled, this leads to the once large chunks of space being
3553	 * fragmented and becoming unavailable for high-order allocations.
3554	 */
3555	return 0;
3556#endif
3557}
3558
3559static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3560{
3561	struct per_cpu_pages *pcp;
3562	int migratetype;
3563
3564	memset(p, 0, sizeof(*p));
3565
3566	pcp = &p->pcp;
3567	pcp->count = 0;
3568	pcp->high = 6 * batch;
3569	pcp->batch = max(1UL, 1 * batch);
3570	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3571		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3572}
3573
3574/*
3575 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3576 * to the value high for the pageset p.
3577 */
3578
3579static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3580				unsigned long high)
3581{
3582	struct per_cpu_pages *pcp;
3583
3584	pcp = &p->pcp;
3585	pcp->high = high;
3586	pcp->batch = max(1UL, high/4);
3587	if ((high/4) > (PAGE_SHIFT * 8))
3588		pcp->batch = PAGE_SHIFT * 8;
3589}
3590
3591static void setup_zone_pageset(struct zone *zone)
3592{
3593	int cpu;
3594
3595	zone->pageset = alloc_percpu(struct per_cpu_pageset);
3596
3597	for_each_possible_cpu(cpu) {
3598		struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3599
3600		setup_pageset(pcp, zone_batchsize(zone));
3601
3602		if (percpu_pagelist_fraction)
3603			setup_pagelist_highmark(pcp,
3604				(zone->present_pages /
3605					percpu_pagelist_fraction));
3606	}
3607}
3608
3609/*
3610 * Allocate per cpu pagesets and initialize them.
3611 * Before this call only boot pagesets were available.
3612 */
3613void __init setup_per_cpu_pageset(void)
3614{
3615	struct zone *zone;
3616
3617	for_each_populated_zone(zone)
3618		setup_zone_pageset(zone);
3619}
3620
3621static noinline __init_refok
3622int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3623{
3624	int i;
3625	struct pglist_data *pgdat = zone->zone_pgdat;
3626	size_t alloc_size;
3627
3628	/*
3629	 * The per-page waitqueue mechanism uses hashed waitqueues
3630	 * per zone.
3631	 */
3632	zone->wait_table_hash_nr_entries =
3633		 wait_table_hash_nr_entries(zone_size_pages);
3634	zone->wait_table_bits =
3635		wait_table_bits(zone->wait_table_hash_nr_entries);
3636	alloc_size = zone->wait_table_hash_nr_entries
3637					* sizeof(wait_queue_head_t);
3638
3639	if (!slab_is_available()) {
3640		zone->wait_table = (wait_queue_head_t *)
3641			alloc_bootmem_node_nopanic(pgdat, alloc_size);
3642	} else {
3643		/*
3644		 * This case means that a zone whose size was 0 gets new memory
3645		 * via memory hot-add.
3646		 * But it may be the case that a new node was hot-added.  In
3647		 * this case vmalloc() will not be able to use this new node's
3648		 * memory - this wait_table must be initialized to use this new
3649		 * node itself as well.
3650		 * To use this new node's memory, further consideration will be
3651		 * necessary.
3652		 */
3653		zone->wait_table = vmalloc(alloc_size);
3654	}
3655	if (!zone->wait_table)
3656		return -ENOMEM;
3657
3658	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3659		init_waitqueue_head(zone->wait_table + i);
3660
3661	return 0;
3662}
3663
3664static int __zone_pcp_update(void *data)
3665{
3666	struct zone *zone = data;
3667	int cpu;
3668	unsigned long batch = zone_batchsize(zone), flags;
3669
3670	for_each_possible_cpu(cpu) {
3671		struct per_cpu_pageset *pset;
3672		struct per_cpu_pages *pcp;
3673
3674		pset = per_cpu_ptr(zone->pageset, cpu);
3675		pcp = &pset->pcp;
3676
3677		local_irq_save(flags);
3678		free_pcppages_bulk(zone, pcp->count, pcp);
3679		setup_pageset(pset, batch);
3680		local_irq_restore(flags);
3681	}
3682	return 0;
3683}
3684
3685void zone_pcp_update(struct zone *zone)
3686{
3687	stop_machine(__zone_pcp_update, zone, NULL);
3688}
3689
3690static __meminit void zone_pcp_init(struct zone *zone)
3691{
3692	/*
3693	 * per cpu subsystem is not up at this point. The following code
3694	 * relies on the ability of the linker to provide the
3695	 * offset of a (static) per cpu variable into the per cpu area.
3696	 */
3697	zone->pageset = &boot_pageset;
3698
3699	if (zone->present_pages)
3700		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
3701			zone->name, zone->present_pages,
3702					 zone_batchsize(zone));
3703}
3704
3705__meminit int init_currently_empty_zone(struct zone *zone,
3706					unsigned long zone_start_pfn,
3707					unsigned long size,
3708					enum memmap_context context)
3709{
3710	struct pglist_data *pgdat = zone->zone_pgdat;
3711	int ret;
3712	ret = zone_wait_table_init(zone, size);
3713	if (ret)
3714		return ret;
3715	pgdat->nr_zones = zone_idx(zone) + 1;
3716
3717	zone->zone_start_pfn = zone_start_pfn;
3718
3719	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3720			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3721			pgdat->node_id,
3722			(unsigned long)zone_idx(zone),
3723			zone_start_pfn, (zone_start_pfn + size));
3724
3725	zone_init_free_lists(zone);
3726
3727	return 0;
3728}
3729
3730#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3731/*
3732 * Basic iterator support. Return the first range of PFNs for a node
3733 * Note: nid == MAX_NUMNODES returns first region regardless of node
3734 */
3735static int __meminit first_active_region_index_in_nid(int nid)
3736{
3737	int i;
3738
3739	for (i = 0; i < nr_nodemap_entries; i++)
3740		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3741			return i;
3742
3743	return -1;
3744}
3745
3746/*
3747 * Basic iterator support. Return the next active range of PFNs for a node
3748 * Note: nid == MAX_NUMNODES returns next region regardless of node
3749 */
3750static int __meminit next_active_region_index_in_nid(int index, int nid)
3751{
3752	for (index = index + 1; index < nr_nodemap_entries; index++)
3753		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3754			return index;
3755
3756	return -1;
3757}
3758
3759#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3760/*
3761 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3762 * Architectures may implement their own version but if add_active_range()
3763 * was used and there are no special requirements, this is a convenient
3764 * alternative
3765 */
3766int __meminit __early_pfn_to_nid(unsigned long pfn)
3767{
3768	int i;
3769
3770	for (i = 0; i < nr_nodemap_entries; i++) {
3771		unsigned long start_pfn = early_node_map[i].start_pfn;
3772		unsigned long end_pfn = early_node_map[i].end_pfn;
3773
3774		if (start_pfn <= pfn && pfn < end_pfn)
3775			return early_node_map[i].nid;
3776	}
3777	/* This is a memory hole */
3778	return -1;
3779}
3780#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3781
3782int __meminit early_pfn_to_nid(unsigned long pfn)
3783{
3784	int nid;
3785
3786	nid = __early_pfn_to_nid(pfn);
3787	if (nid >= 0)
3788		return nid;
3789	/* just returns 0 */
3790	return 0;
3791}
3792
3793#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3794bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3795{
3796	int nid;
3797
3798	nid = __early_pfn_to_nid(pfn);
3799	if (nid >= 0 && nid != node)
3800		return false;
3801	return true;
3802}
3803#endif
3804
3805/* Basic iterator support to walk early_node_map[] */
3806#define for_each_active_range_index_in_nid(i, nid) \
3807	for (i = first_active_region_index_in_nid(nid); i != -1; \
3808				i = next_active_region_index_in_nid(i, nid))
3809
3810/**
3811 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3812 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3813 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3814 *
3815 * If an architecture guarantees that all ranges registered with
3816 * add_active_ranges() contain no holes and may be freed, this
3817 * this function may be used instead of calling free_bootmem() manually.
3818 */
3819void __init free_bootmem_with_active_regions(int nid,
3820						unsigned long max_low_pfn)
3821{
3822	int i;
3823
3824	for_each_active_range_index_in_nid(i, nid) {
3825		unsigned long size_pages = 0;
3826		unsigned long end_pfn = early_node_map[i].end_pfn;
3827
3828		if (early_node_map[i].start_pfn >= max_low_pfn)
3829			continue;
3830
3831		if (end_pfn > max_low_pfn)
3832			end_pfn = max_low_pfn;
3833
3834		size_pages = end_pfn - early_node_map[i].start_pfn;
3835		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3836				PFN_PHYS(early_node_map[i].start_pfn),
3837				size_pages << PAGE_SHIFT);
3838	}
3839}
3840
3841#ifdef CONFIG_HAVE_MEMBLOCK
3842/*
3843 * Basic iterator support. Return the last range of PFNs for a node
3844 * Note: nid == MAX_NUMNODES returns last region regardless of node
3845 */
3846static int __meminit last_active_region_index_in_nid(int nid)
3847{
3848	int i;
3849
3850	for (i = nr_nodemap_entries - 1; i >= 0; i--)
3851		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3852			return i;
3853
3854	return -1;
3855}
3856
3857/*
3858 * Basic iterator support. Return the previous active range of PFNs for a node
3859 * Note: nid == MAX_NUMNODES returns next region regardless of node
3860 */
3861static int __meminit previous_active_region_index_in_nid(int index, int nid)
3862{
3863	for (index = index - 1; index >= 0; index--)
3864		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3865			return index;
3866
3867	return -1;
3868}
3869
3870#define for_each_active_range_index_in_nid_reverse(i, nid) \
3871	for (i = last_active_region_index_in_nid(nid); i != -1; \
3872				i = previous_active_region_index_in_nid(i, nid))
3873
3874u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3875					u64 goal, u64 limit)
3876{
3877	int i;
3878
3879	/* Need to go over early_node_map to find out good range for node */
3880	for_each_active_range_index_in_nid_reverse(i, nid) {
3881		u64 addr;
3882		u64 ei_start, ei_last;
3883		u64 final_start, final_end;
3884
3885		ei_last = early_node_map[i].end_pfn;
3886		ei_last <<= PAGE_SHIFT;
3887		ei_start = early_node_map[i].start_pfn;
3888		ei_start <<= PAGE_SHIFT;
3889
3890		final_start = max(ei_start, goal);
3891		final_end = min(ei_last, limit);
3892
3893		if (final_start >= final_end)
3894			continue;
3895
3896		addr = memblock_find_in_range(final_start, final_end, size, align);
3897
3898		if (addr == MEMBLOCK_ERROR)
3899			continue;
3900
3901		return addr;
3902	}
3903
3904	return MEMBLOCK_ERROR;
3905}
3906#endif
3907
3908int __init add_from_early_node_map(struct range *range, int az,
3909				   int nr_range, int nid)
3910{
3911	int i;
3912	u64 start, end;
3913
3914	/* need to go over early_node_map to find out good range for node */
3915	for_each_active_range_index_in_nid(i, nid) {
3916		start = early_node_map[i].start_pfn;
3917		end = early_node_map[i].end_pfn;
3918		nr_range = add_range(range, az, nr_range, start, end);
3919	}
3920	return nr_range;
3921}
3922
3923void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3924{
3925	int i;
3926	int ret;
3927
3928	for_each_active_range_index_in_nid(i, nid) {
3929		ret = work_fn(early_node_map[i].start_pfn,
3930			      early_node_map[i].end_pfn, data);
3931		if (ret)
3932			break;
3933	}
3934}
3935/**
3936 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3937 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3938 *
3939 * If an architecture guarantees that all ranges registered with
3940 * add_active_ranges() contain no holes and may be freed, this
3941 * function may be used instead of calling memory_present() manually.
3942 */
3943void __init sparse_memory_present_with_active_regions(int nid)
3944{
3945	int i;
3946
3947	for_each_active_range_index_in_nid(i, nid)
3948		memory_present(early_node_map[i].nid,
3949				early_node_map[i].start_pfn,
3950				early_node_map[i].end_pfn);
3951}
3952
3953/**
3954 * get_pfn_range_for_nid - Return the start and end page frames for a node
3955 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3956 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3957 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3958 *
3959 * It returns the start and end page frame of a node based on information
3960 * provided by an arch calling add_active_range(). If called for a node
3961 * with no available memory, a warning is printed and the start and end
3962 * PFNs will be 0.
3963 */
3964void __meminit get_pfn_range_for_nid(unsigned int nid,
3965			unsigned long *start_pfn, unsigned long *end_pfn)
3966{
3967	int i;
3968	*start_pfn = -1UL;
3969	*end_pfn = 0;
3970
3971	for_each_active_range_index_in_nid(i, nid) {
3972		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3973		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3974	}
3975
3976	if (*start_pfn == -1UL)
3977		*start_pfn = 0;
3978}
3979
3980/*
3981 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3982 * assumption is made that zones within a node are ordered in monotonic
3983 * increasing memory addresses so that the "highest" populated zone is used
3984 */
3985static void __init find_usable_zone_for_movable(void)
3986{
3987	int zone_index;
3988	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3989		if (zone_index == ZONE_MOVABLE)
3990			continue;
3991
3992		if (arch_zone_highest_possible_pfn[zone_index] >
3993				arch_zone_lowest_possible_pfn[zone_index])
3994			break;
3995	}
3996
3997	VM_BUG_ON(zone_index == -1);
3998	movable_zone = zone_index;
3999}
4000
4001/*
4002 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4003 * because it is sized independent of architecture. Unlike the other zones,
4004 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4005 * in each node depending on the size of each node and how evenly kernelcore
4006 * is distributed. This helper function adjusts the zone ranges
4007 * provided by the architecture for a given node by using the end of the
4008 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4009 * zones within a node are in order of monotonic increases memory addresses
4010 */
4011static void __meminit adjust_zone_range_for_zone_movable(int nid,
4012					unsigned long zone_type,
4013					unsigned long node_start_pfn,
4014					unsigned long node_end_pfn,
4015					unsigned long *zone_start_pfn,
4016					unsigned long *zone_end_pfn)
4017{
4018	/* Only adjust if ZONE_MOVABLE is on this node */
4019	if (zone_movable_pfn[nid]) {
4020		/* Size ZONE_MOVABLE */
4021		if (zone_type == ZONE_MOVABLE) {
4022			*zone_start_pfn = zone_movable_pfn[nid];
4023			*zone_end_pfn = min(node_end_pfn,
4024				arch_zone_highest_possible_pfn[movable_zone]);
4025
4026		/* Adjust for ZONE_MOVABLE starting within this range */
4027		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4028				*zone_end_pfn > zone_movable_pfn[nid]) {
4029			*zone_end_pfn = zone_movable_pfn[nid];
4030
4031		/* Check if this whole range is within ZONE_MOVABLE */
4032		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
4033			*zone_start_pfn = *zone_end_pfn;
4034	}
4035}
4036
4037/*
4038 * Return the number of pages a zone spans in a node, including holes
4039 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4040 */
4041static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4042					unsigned long zone_type,
4043					unsigned long *ignored)
4044{
4045	unsigned long node_start_pfn, node_end_pfn;
4046	unsigned long zone_start_pfn, zone_end_pfn;
4047
4048	/* Get the start and end of the node and zone */
4049	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4050	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4051	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4052	adjust_zone_range_for_zone_movable(nid, zone_type,
4053				node_start_pfn, node_end_pfn,
4054				&zone_start_pfn, &zone_end_pfn);
4055
4056	/* Check that this node has pages within the zone's required range */
4057	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4058		return 0;
4059
4060	/* Move the zone boundaries inside the node if necessary */
4061	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4062	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4063
4064	/* Return the spanned pages */
4065	return zone_end_pfn - zone_start_pfn;
4066}
4067
4068/*
4069 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4070 * then all holes in the requested range will be accounted for.
4071 */
4072unsigned long __meminit __absent_pages_in_range(int nid,
4073				unsigned long range_start_pfn,
4074				unsigned long range_end_pfn)
4075{
4076	int i = 0;
4077	unsigned long prev_end_pfn = 0, hole_pages = 0;
4078	unsigned long start_pfn;
4079
4080	/* Find the end_pfn of the first active range of pfns in the node */
4081	i = first_active_region_index_in_nid(nid);
4082	if (i == -1)
4083		return 0;
4084
4085	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4086
4087	/* Account for ranges before physical memory on this node */
4088	if (early_node_map[i].start_pfn > range_start_pfn)
4089		hole_pages = prev_end_pfn - range_start_pfn;
4090
4091	/* Find all holes for the zone within the node */
4092	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
4093
4094		/* No need to continue if prev_end_pfn is outside the zone */
4095		if (prev_end_pfn >= range_end_pfn)
4096			break;
4097
4098		/* Make sure the end of the zone is not within the hole */
4099		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4100		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
4101
4102		/* Update the hole size cound and move on */
4103		if (start_pfn > range_start_pfn) {
4104			BUG_ON(prev_end_pfn > start_pfn);
4105			hole_pages += start_pfn - prev_end_pfn;
4106		}
4107		prev_end_pfn = early_node_map[i].end_pfn;
4108	}
4109
4110	/* Account for ranges past physical memory on this node */
4111	if (range_end_pfn > prev_end_pfn)
4112		hole_pages += range_end_pfn -
4113				max(range_start_pfn, prev_end_pfn);
4114
4115	return hole_pages;
4116}
4117
4118/**
4119 * absent_pages_in_range - Return number of page frames in holes within a range
4120 * @start_pfn: The start PFN to start searching for holes
4121 * @end_pfn: The end PFN to stop searching for holes
4122 *
4123 * It returns the number of pages frames in memory holes within a range.
4124 */
4125unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4126							unsigned long end_pfn)
4127{
4128	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4129}
4130
4131/* Return the number of page frames in holes in a zone on a node */
4132static unsigned long __meminit zone_absent_pages_in_node(int nid,
4133					unsigned long zone_type,
4134					unsigned long *ignored)
4135{
4136	unsigned long node_start_pfn, node_end_pfn;
4137	unsigned long zone_start_pfn, zone_end_pfn;
4138
4139	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4140	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
4141							node_start_pfn);
4142	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
4143							node_end_pfn);
4144
4145	adjust_zone_range_for_zone_movable(nid, zone_type,
4146			node_start_pfn, node_end_pfn,
4147			&zone_start_pfn, &zone_end_pfn);
4148	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4149}
4150
4151#else
4152static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4153					unsigned long zone_type,
4154					unsigned long *zones_size)
4155{
4156	return zones_size[zone_type];
4157}
4158
4159static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4160						unsigned long zone_type,
4161						unsigned long *zholes_size)
4162{
4163	if (!zholes_size)
4164		return 0;
4165
4166	return zholes_size[zone_type];
4167}
4168
4169#endif
4170
4171static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4172		unsigned long *zones_size, unsigned long *zholes_size)
4173{
4174	unsigned long realtotalpages, totalpages = 0;
4175	enum zone_type i;
4176
4177	for (i = 0; i < MAX_NR_ZONES; i++)
4178		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4179								zones_size);
4180	pgdat->node_spanned_pages = totalpages;
4181
4182	realtotalpages = totalpages;
4183	for (i = 0; i < MAX_NR_ZONES; i++)
4184		realtotalpages -=
4185			zone_absent_pages_in_node(pgdat->node_id, i,
4186								zholes_size);
4187	pgdat->node_present_pages = realtotalpages;
4188	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4189							realtotalpages);
4190}
4191
4192#ifndef CONFIG_SPARSEMEM
4193/*
4194 * Calculate the size of the zone->blockflags rounded to an unsigned long
4195 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4196 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4197 * round what is now in bits to nearest long in bits, then return it in
4198 * bytes.
4199 */
4200static unsigned long __init usemap_size(unsigned long zonesize)
4201{
4202	unsigned long usemapsize;
4203
4204	usemapsize = roundup(zonesize, pageblock_nr_pages);
4205	usemapsize = usemapsize >> pageblock_order;
4206	usemapsize *= NR_PAGEBLOCK_BITS;
4207	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4208
4209	return usemapsize / 8;
4210}
4211
4212static void __init setup_usemap(struct pglist_data *pgdat,
4213				struct zone *zone, unsigned long zonesize)
4214{
4215	unsigned long usemapsize = usemap_size(zonesize);
4216	zone->pageblock_flags = NULL;
4217	if (usemapsize)
4218		zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4219								   usemapsize);
4220}
4221#else
4222static inline void setup_usemap(struct pglist_data *pgdat,
4223				struct zone *zone, unsigned long zonesize) {}
4224#endif /* CONFIG_SPARSEMEM */
4225
4226#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4227
4228/* Return a sensible default order for the pageblock size. */
4229static inline int pageblock_default_order(void)
4230{
4231	if (HPAGE_SHIFT > PAGE_SHIFT)
4232		return HUGETLB_PAGE_ORDER;
4233
4234	return MAX_ORDER-1;
4235}
4236
4237/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4238static inline void __init set_pageblock_order(unsigned int order)
4239{
4240	/* Check that pageblock_nr_pages has not already been setup */
4241	if (pageblock_order)
4242		return;
4243
4244	/*
4245	 * Assume the largest contiguous order of interest is a huge page.
4246	 * This value may be variable depending on boot parameters on IA64
4247	 */
4248	pageblock_order = order;
4249}
4250#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4251
4252/*
4253 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4254 * and pageblock_default_order() are unused as pageblock_order is set
4255 * at compile-time. See include/linux/pageblock-flags.h for the values of
4256 * pageblock_order based on the kernel config
4257 */
4258static inline int pageblock_default_order(unsigned int order)
4259{
4260	return MAX_ORDER-1;
4261}
4262#define set_pageblock_order(x)	do {} while (0)
4263
4264#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4265
4266/*
4267 * Set up the zone data structures:
4268 *   - mark all pages reserved
4269 *   - mark all memory queues empty
4270 *   - clear the memory bitmaps
4271 */
4272static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4273		unsigned long *zones_size, unsigned long *zholes_size)
4274{
4275	enum zone_type j;
4276	int nid = pgdat->node_id;
4277	unsigned long zone_start_pfn = pgdat->node_start_pfn;
4278	int ret;
4279
4280	pgdat_resize_init(pgdat);
4281	pgdat->nr_zones = 0;
4282	init_waitqueue_head(&pgdat->kswapd_wait);
4283	pgdat->kswapd_max_order = 0;
4284	pgdat_page_cgroup_init(pgdat);
4285
4286	for (j = 0; j < MAX_NR_ZONES; j++) {
4287		struct zone *zone = pgdat->node_zones + j;
4288		unsigned long size, realsize, memmap_pages;
4289		enum lru_list l;
4290
4291		size = zone_spanned_pages_in_node(nid, j, zones_size);
4292		realsize = size - zone_absent_pages_in_node(nid, j,
4293								zholes_size);
4294
4295		/*
4296		 * Adjust realsize so that it accounts for how much memory
4297		 * is used by this zone for memmap. This affects the watermark
4298		 * and per-cpu initialisations
4299		 */
4300		memmap_pages =
4301			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4302		if (realsize >= memmap_pages) {
4303			realsize -= memmap_pages;
4304			if (memmap_pages)
4305				printk(KERN_DEBUG
4306				       "  %s zone: %lu pages used for memmap\n",
4307				       zone_names[j], memmap_pages);
4308		} else
4309			printk(KERN_WARNING
4310				"  %s zone: %lu pages exceeds realsize %lu\n",
4311				zone_names[j], memmap_pages, realsize);
4312
4313		/* Account for reserved pages */
4314		if (j == 0 && realsize > dma_reserve) {
4315			realsize -= dma_reserve;
4316			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4317					zone_names[0], dma_reserve);
4318		}
4319
4320		if (!is_highmem_idx(j))
4321			nr_kernel_pages += realsize;
4322		nr_all_pages += realsize;
4323
4324		zone->spanned_pages = size;
4325		zone->present_pages = realsize;
4326#ifdef CONFIG_NUMA
4327		zone->node = nid;
4328		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4329						/ 100;
4330		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4331#endif
4332		zone->name = zone_names[j];
4333		spin_lock_init(&zone->lock);
4334		spin_lock_init(&zone->lru_lock);
4335		zone_seqlock_init(zone);
4336		zone->zone_pgdat = pgdat;
4337
4338		zone_pcp_init(zone);
4339		for_each_lru(l)
4340			INIT_LIST_HEAD(&zone->lru[l].list);
4341		zone->reclaim_stat.recent_rotated[0] = 0;
4342		zone->reclaim_stat.recent_rotated[1] = 0;
4343		zone->reclaim_stat.recent_scanned[0] = 0;
4344		zone->reclaim_stat.recent_scanned[1] = 0;
4345		zap_zone_vm_stats(zone);
4346		zone->flags = 0;
4347		if (!size)
4348			continue;
4349
4350		set_pageblock_order(pageblock_default_order());
4351		setup_usemap(pgdat, zone, size);
4352		ret = init_currently_empty_zone(zone, zone_start_pfn,
4353						size, MEMMAP_EARLY);
4354		BUG_ON(ret);
4355		memmap_init(size, nid, j, zone_start_pfn);
4356		zone_start_pfn += size;
4357	}
4358}
4359
4360static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4361{
4362	/* Skip empty nodes */
4363	if (!pgdat->node_spanned_pages)
4364		return;
4365
4366#ifdef CONFIG_FLAT_NODE_MEM_MAP
4367	/* ia64 gets its own node_mem_map, before this, without bootmem */
4368	if (!pgdat->node_mem_map) {
4369		unsigned long size, start, end;
4370		struct page *map;
4371
4372		/*
4373		 * The zone's endpoints aren't required to be MAX_ORDER
4374		 * aligned but the node_mem_map endpoints must be in order
4375		 * for the buddy allocator to function correctly.
4376		 */
4377		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4378		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4379		end = ALIGN(end, MAX_ORDER_NR_PAGES);
4380		size =  (end - start) * sizeof(struct page);
4381		map = alloc_remap(pgdat->node_id, size);
4382		if (!map)
4383			map = alloc_bootmem_node_nopanic(pgdat, size);
4384		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4385	}
4386#ifndef CONFIG_NEED_MULTIPLE_NODES
4387	/*
4388	 * With no DISCONTIG, the global mem_map is just set as node 0's
4389	 */
4390	if (pgdat == NODE_DATA(0)) {
4391		mem_map = NODE_DATA(0)->node_mem_map;
4392#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4393		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4394			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4395#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4396	}
4397#endif
4398#endif /* CONFIG_FLAT_NODE_MEM_MAP */
4399}
4400
4401void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4402		unsigned long node_start_pfn, unsigned long *zholes_size)
4403{
4404	pg_data_t *pgdat = NODE_DATA(nid);
4405
4406	pgdat->node_id = nid;
4407	pgdat->node_start_pfn = node_start_pfn;
4408	calculate_node_totalpages(pgdat, zones_size, zholes_size);
4409
4410	alloc_node_mem_map(pgdat);
4411#ifdef CONFIG_FLAT_NODE_MEM_MAP
4412	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4413		nid, (unsigned long)pgdat,
4414		(unsigned long)pgdat->node_mem_map);
4415#endif
4416
4417	free_area_init_core(pgdat, zones_size, zholes_size);
4418}
4419
4420#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4421
4422#if MAX_NUMNODES > 1
4423/*
4424 * Figure out the number of possible node ids.
4425 */
4426static void __init setup_nr_node_ids(void)
4427{
4428	unsigned int node;
4429	unsigned int highest = 0;
4430
4431	for_each_node_mask(node, node_possible_map)
4432		highest = node;
4433	nr_node_ids = highest + 1;
4434}
4435#else
4436static inline void setup_nr_node_ids(void)
4437{
4438}
4439#endif
4440
4441/**
4442 * add_active_range - Register a range of PFNs backed by physical memory
4443 * @nid: The node ID the range resides on
4444 * @start_pfn: The start PFN of the available physical memory
4445 * @end_pfn: The end PFN of the available physical memory
4446 *
4447 * These ranges are stored in an early_node_map[] and later used by
4448 * free_area_init_nodes() to calculate zone sizes and holes. If the
4449 * range spans a memory hole, it is up to the architecture to ensure
4450 * the memory is not freed by the bootmem allocator. If possible
4451 * the range being registered will be merged with existing ranges.
4452 */
4453void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4454						unsigned long end_pfn)
4455{
4456	int i;
4457
4458	mminit_dprintk(MMINIT_TRACE, "memory_register",
4459			"Entering add_active_range(%d, %#lx, %#lx) "
4460			"%d entries of %d used\n",
4461			nid, start_pfn, end_pfn,
4462			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
4463
4464	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4465
4466	/* Merge with existing active regions if possible */
4467	for (i = 0; i < nr_nodemap_entries; i++) {
4468		if (early_node_map[i].nid != nid)
4469			continue;
4470
4471		/* Skip if an existing region covers this new one */
4472		if (start_pfn >= early_node_map[i].start_pfn &&
4473				end_pfn <= early_node_map[i].end_pfn)
4474			return;
4475
4476		/* Merge forward if suitable */
4477		if (start_pfn <= early_node_map[i].end_pfn &&
4478				end_pfn > early_node_map[i].end_pfn) {
4479			early_node_map[i].end_pfn = end_pfn;
4480			return;
4481		}
4482
4483		/* Merge backward if suitable */
4484		if (start_pfn < early_node_map[i].start_pfn &&
4485				end_pfn >= early_node_map[i].start_pfn) {
4486			early_node_map[i].start_pfn = start_pfn;
4487			return;
4488		}
4489	}
4490
4491	/* Check that early_node_map is large enough */
4492	if (i >= MAX_ACTIVE_REGIONS) {
4493		printk(KERN_CRIT "More than %d memory regions, truncating\n",
4494							MAX_ACTIVE_REGIONS);
4495		return;
4496	}
4497
4498	early_node_map[i].nid = nid;
4499	early_node_map[i].start_pfn = start_pfn;
4500	early_node_map[i].end_pfn = end_pfn;
4501	nr_nodemap_entries = i + 1;
4502}
4503
4504/**
4505 * remove_active_range - Shrink an existing registered range of PFNs
4506 * @nid: The node id the range is on that should be shrunk
4507 * @start_pfn: The new PFN of the range
4508 * @end_pfn: The new PFN of the range
4509 *
4510 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4511 * The map is kept near the end physical page range that has already been
4512 * registered. This function allows an arch to shrink an existing registered
4513 * range.
4514 */
4515void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4516				unsigned long end_pfn)
4517{
4518	int i, j;
4519	int removed = 0;
4520
4521	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4522			  nid, start_pfn, end_pfn);
4523
4524	/* Find the old active region end and shrink */
4525	for_each_active_range_index_in_nid(i, nid) {
4526		if (early_node_map[i].start_pfn >= start_pfn &&
4527		    early_node_map[i].end_pfn <= end_pfn) {
4528			/* clear it */
4529			early_node_map[i].start_pfn = 0;
4530			early_node_map[i].end_pfn = 0;
4531			removed = 1;
4532			continue;
4533		}
4534		if (early_node_map[i].start_pfn < start_pfn &&
4535		    early_node_map[i].end_pfn > start_pfn) {
4536			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4537			early_node_map[i].end_pfn = start_pfn;
4538			if (temp_end_pfn > end_pfn)
4539				add_active_range(nid, end_pfn, temp_end_pfn);
4540			continue;
4541		}
4542		if (early_node_map[i].start_pfn >= start_pfn &&
4543		    early_node_map[i].end_pfn > end_pfn &&
4544		    early_node_map[i].start_pfn < end_pfn) {
4545			early_node_map[i].start_pfn = end_pfn;
4546			continue;
4547		}
4548	}
4549
4550	if (!removed)
4551		return;
4552
4553	/* remove the blank ones */
4554	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4555		if (early_node_map[i].nid != nid)
4556			continue;
4557		if (early_node_map[i].end_pfn)
4558			continue;
4559		/* we found it, get rid of it */
4560		for (j = i; j < nr_nodemap_entries - 1; j++)
4561			memcpy(&early_node_map[j], &early_node_map[j+1],
4562				sizeof(early_node_map[j]));
4563		j = nr_nodemap_entries - 1;
4564		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4565		nr_nodemap_entries--;
4566	}
4567}
4568
4569/**
4570 * remove_all_active_ranges - Remove all currently registered regions
4571 *
4572 * During discovery, it may be found that a table like SRAT is invalid
4573 * and an alternative discovery method must be used. This function removes
4574 * all currently registered regions.
4575 */
4576void __init remove_all_active_ranges(void)
4577{
4578	memset(early_node_map, 0, sizeof(early_node_map));
4579	nr_nodemap_entries = 0;
4580}
4581
4582/* Compare two active node_active_regions */
4583static int __init cmp_node_active_region(const void *a, const void *b)
4584{
4585	struct node_active_region *arange = (struct node_active_region *)a;
4586	struct node_active_region *brange = (struct node_active_region *)b;
4587
4588	/* Done this way to avoid overflows */
4589	if (arange->start_pfn > brange->start_pfn)
4590		return 1;
4591	if (arange->start_pfn < brange->start_pfn)
4592		return -1;
4593
4594	return 0;
4595}
4596
4597/* sort the node_map by start_pfn */
4598void __init sort_node_map(void)
4599{
4600	sort(early_node_map, (size_t)nr_nodemap_entries,
4601			sizeof(struct node_active_region),
4602			cmp_node_active_region, NULL);
4603}
4604
4605/**
4606 * node_map_pfn_alignment - determine the maximum internode alignment
4607 *
4608 * This function should be called after node map is populated and sorted.
4609 * It calculates the maximum power of two alignment which can distinguish
4610 * all the nodes.
4611 *
4612 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4613 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
4614 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
4615 * shifted, 1GiB is enough and this function will indicate so.
4616 *
4617 * This is used to test whether pfn -> nid mapping of the chosen memory
4618 * model has fine enough granularity to avoid incorrect mapping for the
4619 * populated node map.
4620 *
4621 * Returns the determined alignment in pfn's.  0 if there is no alignment
4622 * requirement (single node).
4623 */
4624unsigned long __init node_map_pfn_alignment(void)
4625{
4626	unsigned long accl_mask = 0, last_end = 0;
4627	int last_nid = -1;
4628	int i;
4629
4630	for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
4631		int nid = early_node_map[i].nid;
4632		unsigned long start = early_node_map[i].start_pfn;
4633		unsigned long end = early_node_map[i].end_pfn;
4634		unsigned long mask;
4635
4636		if (!start || last_nid < 0 || last_nid == nid) {
4637			last_nid = nid;
4638			last_end = end;
4639			continue;
4640		}
4641
4642		/*
4643		 * Start with a mask granular enough to pin-point to the
4644		 * start pfn and tick off bits one-by-one until it becomes
4645		 * too coarse to separate the current node from the last.
4646		 */
4647		mask = ~((1 << __ffs(start)) - 1);
4648		while (mask && last_end <= (start & (mask << 1)))
4649			mask <<= 1;
4650
4651		/* accumulate all internode masks */
4652		accl_mask |= mask;
4653	}
4654
4655	/* convert mask to number of pages */
4656	return ~accl_mask + 1;
4657}
4658
4659/* Find the lowest pfn for a node */
4660static unsigned long __init find_min_pfn_for_node(int nid)
4661{
4662	int i;
4663	unsigned long min_pfn = ULONG_MAX;
4664
4665	/* Assuming a sorted map, the first range found has the starting pfn */
4666	for_each_active_range_index_in_nid(i, nid)
4667		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4668
4669	if (min_pfn == ULONG_MAX) {
4670		printk(KERN_WARNING
4671			"Could not find start_pfn for node %d\n", nid);
4672		return 0;
4673	}
4674
4675	return min_pfn;
4676}
4677
4678/**
4679 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4680 *
4681 * It returns the minimum PFN based on information provided via
4682 * add_active_range().
4683 */
4684unsigned long __init find_min_pfn_with_active_regions(void)
4685{
4686	return find_min_pfn_for_node(MAX_NUMNODES);
4687}
4688
4689/*
4690 * early_calculate_totalpages()
4691 * Sum pages in active regions for movable zone.
4692 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4693 */
4694static unsigned long __init early_calculate_totalpages(void)
4695{
4696	int i;
4697	unsigned long totalpages = 0;
4698
4699	for (i = 0; i < nr_nodemap_entries; i++) {
4700		unsigned long pages = early_node_map[i].end_pfn -
4701						early_node_map[i].start_pfn;
4702		totalpages += pages;
4703		if (pages)
4704			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4705	}
4706  	return totalpages;
4707}
4708
4709/*
4710 * Find the PFN the Movable zone begins in each node. Kernel memory
4711 * is spread evenly between nodes as long as the nodes have enough
4712 * memory. When they don't, some nodes will have more kernelcore than
4713 * others
4714 */
4715static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4716{
4717	int i, nid;
4718	unsigned long usable_startpfn;
4719	unsigned long kernelcore_node, kernelcore_remaining;
4720	/* save the state before borrow the nodemask */
4721	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4722	unsigned long totalpages = early_calculate_totalpages();
4723	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4724
4725	/*
4726	 * If movablecore was specified, calculate what size of
4727	 * kernelcore that corresponds so that memory usable for
4728	 * any allocation type is evenly spread. If both kernelcore
4729	 * and movablecore are specified, then the value of kernelcore
4730	 * will be used for required_kernelcore if it's greater than
4731	 * what movablecore would have allowed.
4732	 */
4733	if (required_movablecore) {
4734		unsigned long corepages;
4735
4736		/*
4737		 * Round-up so that ZONE_MOVABLE is at least as large as what
4738		 * was requested by the user
4739		 */
4740		required_movablecore =
4741			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4742		corepages = totalpages - required_movablecore;
4743
4744		required_kernelcore = max(required_kernelcore, corepages);
4745	}
4746
4747	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4748	if (!required_kernelcore)
4749		goto out;
4750
4751	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4752	find_usable_zone_for_movable();
4753	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4754
4755restart:
4756	/* Spread kernelcore memory as evenly as possible throughout nodes */
4757	kernelcore_node = required_kernelcore / usable_nodes;
4758	for_each_node_state(nid, N_HIGH_MEMORY) {
4759		/*
4760		 * Recalculate kernelcore_node if the division per node
4761		 * now exceeds what is necessary to satisfy the requested
4762		 * amount of memory for the kernel
4763		 */
4764		if (required_kernelcore < kernelcore_node)
4765			kernelcore_node = required_kernelcore / usable_nodes;
4766
4767		/*
4768		 * As the map is walked, we track how much memory is usable
4769		 * by the kernel using kernelcore_remaining. When it is
4770		 * 0, the rest of the node is usable by ZONE_MOVABLE
4771		 */
4772		kernelcore_remaining = kernelcore_node;
4773
4774		/* Go through each range of PFNs within this node */
4775		for_each_active_range_index_in_nid(i, nid) {
4776			unsigned long start_pfn, end_pfn;
4777			unsigned long size_pages;
4778
4779			start_pfn = max(early_node_map[i].start_pfn,
4780						zone_movable_pfn[nid]);
4781			end_pfn = early_node_map[i].end_pfn;
4782			if (start_pfn >= end_pfn)
4783				continue;
4784
4785			/* Account for what is only usable for kernelcore */
4786			if (start_pfn < usable_startpfn) {
4787				unsigned long kernel_pages;
4788				kernel_pages = min(end_pfn, usable_startpfn)
4789								- start_pfn;
4790
4791				kernelcore_remaining -= min(kernel_pages,
4792							kernelcore_remaining);
4793				required_kernelcore -= min(kernel_pages,
4794							required_kernelcore);
4795
4796				/* Continue if range is now fully accounted */
4797				if (end_pfn <= usable_startpfn) {
4798
4799					/*
4800					 * Push zone_movable_pfn to the end so
4801					 * that if we have to rebalance
4802					 * kernelcore across nodes, we will
4803					 * not double account here
4804					 */
4805					zone_movable_pfn[nid] = end_pfn;
4806					continue;
4807				}
4808				start_pfn = usable_startpfn;
4809			}
4810
4811			/*
4812			 * The usable PFN range for ZONE_MOVABLE is from
4813			 * start_pfn->end_pfn. Calculate size_pages as the
4814			 * number of pages used as kernelcore
4815			 */
4816			size_pages = end_pfn - start_pfn;
4817			if (size_pages > kernelcore_remaining)
4818				size_pages = kernelcore_remaining;
4819			zone_movable_pfn[nid] = start_pfn + size_pages;
4820
4821			/*
4822			 * Some kernelcore has been met, update counts and
4823			 * break if the kernelcore for this node has been
4824			 * satisified
4825			 */
4826			required_kernelcore -= min(required_kernelcore,
4827								size_pages);
4828			kernelcore_remaining -= size_pages;
4829			if (!kernelcore_remaining)
4830				break;
4831		}
4832	}
4833
4834	/*
4835	 * If there is still required_kernelcore, we do another pass with one
4836	 * less node in the count. This will push zone_movable_pfn[nid] further
4837	 * along on the nodes that still have memory until kernelcore is
4838	 * satisified
4839	 */
4840	usable_nodes--;
4841	if (usable_nodes && required_kernelcore > usable_nodes)
4842		goto restart;
4843
4844	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4845	for (nid = 0; nid < MAX_NUMNODES; nid++)
4846		zone_movable_pfn[nid] =
4847			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4848
4849out:
4850	/* restore the node_state */
4851	node_states[N_HIGH_MEMORY] = saved_node_state;
4852}
4853
4854/* Any regular memory on that node ? */
4855static void check_for_regular_memory(pg_data_t *pgdat)
4856{
4857#ifdef CONFIG_HIGHMEM
4858	enum zone_type zone_type;
4859
4860	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4861		struct zone *zone = &pgdat->node_zones[zone_type];
4862		if (zone->present_pages)
4863			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4864	}
4865#endif
4866}
4867
4868/**
4869 * free_area_init_nodes - Initialise all pg_data_t and zone data
4870 * @max_zone_pfn: an array of max PFNs for each zone
4871 *
4872 * This will call free_area_init_node() for each active node in the system.
4873 * Using the page ranges provided by add_active_range(), the size of each
4874 * zone in each node and their holes is calculated. If the maximum PFN
4875 * between two adjacent zones match, it is assumed that the zone is empty.
4876 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4877 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4878 * starts where the previous one ended. For example, ZONE_DMA32 starts
4879 * at arch_max_dma_pfn.
4880 */
4881void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4882{
4883	unsigned long nid;
4884	int i;
4885
4886	/* Sort early_node_map as initialisation assumes it is sorted */
4887	sort_node_map();
4888
4889	/* Record where the zone boundaries are */
4890	memset(arch_zone_lowest_possible_pfn, 0,
4891				sizeof(arch_zone_lowest_possible_pfn));
4892	memset(arch_zone_highest_possible_pfn, 0,
4893				sizeof(arch_zone_highest_possible_pfn));
4894	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4895	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4896	for (i = 1; i < MAX_NR_ZONES; i++) {
4897		if (i == ZONE_MOVABLE)
4898			continue;
4899		arch_zone_lowest_possible_pfn[i] =
4900			arch_zone_highest_possible_pfn[i-1];
4901		arch_zone_highest_possible_pfn[i] =
4902			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4903	}
4904	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4905	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4906
4907	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4908	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4909	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4910
4911	/* Print out the zone ranges */
4912	printk("Zone PFN ranges:\n");
4913	for (i = 0; i < MAX_NR_ZONES; i++) {
4914		if (i == ZONE_MOVABLE)
4915			continue;
4916		printk("  %-8s ", zone_names[i]);
4917		if (arch_zone_lowest_possible_pfn[i] ==
4918				arch_zone_highest_possible_pfn[i])
4919			printk("empty\n");
4920		else
4921			printk("%0#10lx -> %0#10lx\n",
4922				arch_zone_lowest_possible_pfn[i],
4923				arch_zone_highest_possible_pfn[i]);
4924	}
4925
4926	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4927	printk("Movable zone start PFN for each node\n");
4928	for (i = 0; i < MAX_NUMNODES; i++) {
4929		if (zone_movable_pfn[i])
4930			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4931	}
4932
4933	/* Print out the early_node_map[] */
4934	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4935	for (i = 0; i < nr_nodemap_entries; i++)
4936		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4937						early_node_map[i].start_pfn,
4938						early_node_map[i].end_pfn);
4939
4940	/* Initialise every node */
4941	mminit_verify_pageflags_layout();
4942	setup_nr_node_ids();
4943	for_each_online_node(nid) {
4944		pg_data_t *pgdat = NODE_DATA(nid);
4945		free_area_init_node(nid, NULL,
4946				find_min_pfn_for_node(nid), NULL);
4947
4948		/* Any memory on that node */
4949		if (pgdat->node_present_pages)
4950			node_set_state(nid, N_HIGH_MEMORY);
4951		check_for_regular_memory(pgdat);
4952	}
4953}
4954
4955static int __init cmdline_parse_core(char *p, unsigned long *core)
4956{
4957	unsigned long long coremem;
4958	if (!p)
4959		return -EINVAL;
4960
4961	coremem = memparse(p, &p);
4962	*core = coremem >> PAGE_SHIFT;
4963
4964	/* Paranoid check that UL is enough for the coremem value */
4965	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4966
4967	return 0;
4968}
4969
4970/*
4971 * kernelcore=size sets the amount of memory for use for allocations that
4972 * cannot be reclaimed or migrated.
4973 */
4974static int __init cmdline_parse_kernelcore(char *p)
4975{
4976	return cmdline_parse_core(p, &required_kernelcore);
4977}
4978
4979/*
4980 * movablecore=size sets the amount of memory for use for allocations that
4981 * can be reclaimed or migrated.
4982 */
4983static int __init cmdline_parse_movablecore(char *p)
4984{
4985	return cmdline_parse_core(p, &required_movablecore);
4986}
4987
4988early_param("kernelcore", cmdline_parse_kernelcore);
4989early_param("movablecore", cmdline_parse_movablecore);
4990
4991#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4992
4993/**
4994 * set_dma_reserve - set the specified number of pages reserved in the first zone
4995 * @new_dma_reserve: The number of pages to mark reserved
4996 *
4997 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4998 * In the DMA zone, a significant percentage may be consumed by kernel image
4999 * and other unfreeable allocations which can skew the watermarks badly. This
5000 * function may optionally be used to account for unfreeable pages in the
5001 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5002 * smaller per-cpu batchsize.
5003 */
5004void __init set_dma_reserve(unsigned long new_dma_reserve)
5005{
5006	dma_reserve = new_dma_reserve;
5007}
5008
5009void __init free_area_init(unsigned long *zones_size)
5010{
5011	free_area_init_node(0, zones_size,
5012			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5013}
5014
5015static int page_alloc_cpu_notify(struct notifier_block *self,
5016				 unsigned long action, void *hcpu)
5017{
5018	int cpu = (unsigned long)hcpu;
5019
5020	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
5021		drain_pages(cpu);
5022
5023		/*
5024		 * Spill the event counters of the dead processor
5025		 * into the current processors event counters.
5026		 * This artificially elevates the count of the current
5027		 * processor.
5028		 */
5029		vm_events_fold_cpu(cpu);
5030
5031		/*
5032		 * Zero the differential counters of the dead processor
5033		 * so that the vm statistics are consistent.
5034		 *
5035		 * This is only okay since the processor is dead and cannot
5036		 * race with what we are doing.
5037		 */
5038		refresh_cpu_vm_stats(cpu);
5039	}
5040	return NOTIFY_OK;
5041}
5042
5043void __init page_alloc_init(void)
5044{
5045	hotcpu_notifier(page_alloc_cpu_notify, 0);
5046}
5047
5048/*
5049 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5050 *	or min_free_kbytes changes.
5051 */
5052static void calculate_totalreserve_pages(void)
5053{
5054	struct pglist_data *pgdat;
5055	unsigned long reserve_pages = 0;
5056	enum zone_type i, j;
5057
5058	for_each_online_pgdat(pgdat) {
5059		for (i = 0; i < MAX_NR_ZONES; i++) {
5060			struct zone *zone = pgdat->node_zones + i;
5061			unsigned long max = 0;
5062
5063			/* Find valid and maximum lowmem_reserve in the zone */
5064			for (j = i; j < MAX_NR_ZONES; j++) {
5065				if (zone->lowmem_reserve[j] > max)
5066					max = zone->lowmem_reserve[j];
5067			}
5068
5069			/* we treat the high watermark as reserved pages. */
5070			max += high_wmark_pages(zone);
5071
5072			if (max > zone->present_pages)
5073				max = zone->present_pages;
5074			reserve_pages += max;
5075		}
5076	}
5077	totalreserve_pages = reserve_pages;
5078}
5079
5080/*
5081 * setup_per_zone_lowmem_reserve - called whenever
5082 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
5083 *	has a correct pages reserved value, so an adequate number of
5084 *	pages are left in the zone after a successful __alloc_pages().
5085 */
5086static void setup_per_zone_lowmem_reserve(void)
5087{
5088	struct pglist_data *pgdat;
5089	enum zone_type j, idx;
5090
5091	for_each_online_pgdat(pgdat) {
5092		for (j = 0; j < MAX_NR_ZONES; j++) {
5093			struct zone *zone = pgdat->node_zones + j;
5094			unsigned long present_pages = zone->present_pages;
5095
5096			zone->lowmem_reserve[j] = 0;
5097
5098			idx = j;
5099			while (idx) {
5100				struct zone *lower_zone;
5101
5102				idx--;
5103
5104				if (sysctl_lowmem_reserve_ratio[idx] < 1)
5105					sysctl_lowmem_reserve_ratio[idx] = 1;
5106
5107				lower_zone = pgdat->node_zones + idx;
5108				lower_zone->lowmem_reserve[j] = present_pages /
5109					sysctl_lowmem_reserve_ratio[idx];
5110				present_pages += lower_zone->present_pages;
5111			}
5112		}
5113	}
5114
5115	/* update totalreserve_pages */
5116	calculate_totalreserve_pages();
5117}
5118
5119/**
5120 * setup_per_zone_wmarks - called when min_free_kbytes changes
5121 * or when memory is hot-{added|removed}
5122 *
5123 * Ensures that the watermark[min,low,high] values for each zone are set
5124 * correctly with respect to min_free_kbytes.
5125 */
5126void setup_per_zone_wmarks(void)
5127{
5128	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5129	unsigned long lowmem_pages = 0;
5130	struct zone *zone;
5131	unsigned long flags;
5132
5133	/* Calculate total number of !ZONE_HIGHMEM pages */
5134	for_each_zone(zone) {
5135		if (!is_highmem(zone))
5136			lowmem_pages += zone->present_pages;
5137	}
5138
5139	for_each_zone(zone) {
5140		u64 tmp;
5141
5142		spin_lock_irqsave(&zone->lock, flags);
5143		tmp = (u64)pages_min * zone->present_pages;
5144		do_div(tmp, lowmem_pages);
5145		if (is_highmem(zone)) {
5146			/*
5147			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5148			 * need highmem pages, so cap pages_min to a small
5149			 * value here.
5150			 *
5151			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5152			 * deltas controls asynch page reclaim, and so should
5153			 * not be capped for highmem.
5154			 */
5155			int min_pages;
5156
5157			min_pages = zone->present_pages / 1024;
5158			if (min_pages < SWAP_CLUSTER_MAX)
5159				min_pages = SWAP_CLUSTER_MAX;
5160			if (min_pages > 128)
5161				min_pages = 128;
5162			zone->watermark[WMARK_MIN] = min_pages;
5163		} else {
5164			/*
5165			 * If it's a lowmem zone, reserve a number of pages
5166			 * proportionate to the zone's size.
5167			 */
5168			zone->watermark[WMARK_MIN] = tmp;
5169		}
5170
5171		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
5172		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5173		setup_zone_migrate_reserve(zone);
5174		spin_unlock_irqrestore(&zone->lock, flags);
5175	}
5176
5177	/* update totalreserve_pages */
5178	calculate_totalreserve_pages();
5179}
5180
5181/*
5182 * The inactive anon list should be small enough that the VM never has to
5183 * do too much work, but large enough that each inactive page has a chance
5184 * to be referenced again before it is swapped out.
5185 *
5186 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5187 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5188 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5189 * the anonymous pages are kept on the inactive list.
5190 *
5191 * total     target    max
5192 * memory    ratio     inactive anon
5193 * -------------------------------------
5194 *   10MB       1         5MB
5195 *  100MB       1        50MB
5196 *    1GB       3       250MB
5197 *   10GB      10       0.9GB
5198 *  100GB      31         3GB
5199 *    1TB     101        10GB
5200 *   10TB     320        32GB
5201 */
5202static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5203{
5204	unsigned int gb, ratio;
5205
5206	/* Zone size in gigabytes */
5207	gb = zone->present_pages >> (30 - PAGE_SHIFT);
5208	if (gb)
5209		ratio = int_sqrt(10 * gb);
5210	else
5211		ratio = 1;
5212
5213	zone->inactive_ratio = ratio;
5214}
5215
5216static void __meminit setup_per_zone_inactive_ratio(void)
5217{
5218	struct zone *zone;
5219
5220	for_each_zone(zone)
5221		calculate_zone_inactive_ratio(zone);
5222}
5223
5224/*
5225 * Initialise min_free_kbytes.
5226 *
5227 * For small machines we want it small (128k min).  For large machines
5228 * we want it large (64MB max).  But it is not linear, because network
5229 * bandwidth does not increase linearly with machine size.  We use
5230 *
5231 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5232 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
5233 *
5234 * which yields
5235 *
5236 * 16MB:	512k
5237 * 32MB:	724k
5238 * 64MB:	1024k
5239 * 128MB:	1448k
5240 * 256MB:	2048k
5241 * 512MB:	2896k
5242 * 1024MB:	4096k
5243 * 2048MB:	5792k
5244 * 4096MB:	8192k
5245 * 8192MB:	11584k
5246 * 16384MB:	16384k
5247 */
5248int __meminit init_per_zone_wmark_min(void)
5249{
5250	unsigned long lowmem_kbytes;
5251
5252	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5253
5254	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5255	if (min_free_kbytes < 128)
5256		min_free_kbytes = 128;
5257	if (min_free_kbytes > 65536)
5258		min_free_kbytes = 65536;
5259	setup_per_zone_wmarks();
5260	refresh_zone_stat_thresholds();
5261	setup_per_zone_lowmem_reserve();
5262	setup_per_zone_inactive_ratio();
5263	return 0;
5264}
5265module_init(init_per_zone_wmark_min)
5266
5267/*
5268 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5269 *	that we can call two helper functions whenever min_free_kbytes
5270 *	changes.
5271 */
5272int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5273	void __user *buffer, size_t *length, loff_t *ppos)
5274{
5275	proc_dointvec(table, write, buffer, length, ppos);
5276	if (write)
5277		setup_per_zone_wmarks();
5278	return 0;
5279}
5280
5281#ifdef CONFIG_NUMA
5282int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5283	void __user *buffer, size_t *length, loff_t *ppos)
5284{
5285	struct zone *zone;
5286	int rc;
5287
5288	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5289	if (rc)
5290		return rc;
5291
5292	for_each_zone(zone)
5293		zone->min_unmapped_pages = (zone->present_pages *
5294				sysctl_min_unmapped_ratio) / 100;
5295	return 0;
5296}
5297
5298int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5299	void __user *buffer, size_t *length, loff_t *ppos)
5300{
5301	struct zone *zone;
5302	int rc;
5303
5304	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5305	if (rc)
5306		return rc;
5307
5308	for_each_zone(zone)
5309		zone->min_slab_pages = (zone->present_pages *
5310				sysctl_min_slab_ratio) / 100;
5311	return 0;
5312}
5313#endif
5314
5315/*
5316 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5317 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5318 *	whenever sysctl_lowmem_reserve_ratio changes.
5319 *
5320 * The reserve ratio obviously has absolutely no relation with the
5321 * minimum watermarks. The lowmem reserve ratio can only make sense
5322 * if in function of the boot time zone sizes.
5323 */
5324int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5325	void __user *buffer, size_t *length, loff_t *ppos)
5326{
5327	proc_dointvec_minmax(table, write, buffer, length, ppos);
5328	setup_per_zone_lowmem_reserve();
5329	return 0;
5330}
5331
5332/*
5333 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5334 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
5335 * can have before it gets flushed back to buddy allocator.
5336 */
5337
5338int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5339	void __user *buffer, size_t *length, loff_t *ppos)
5340{
5341	struct zone *zone;
5342	unsigned int cpu;
5343	int ret;
5344
5345	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5346	if (!write || (ret == -EINVAL))
5347		return ret;
5348	for_each_populated_zone(zone) {
5349		for_each_possible_cpu(cpu) {
5350			unsigned long  high;
5351			high = zone->present_pages / percpu_pagelist_fraction;
5352			setup_pagelist_highmark(
5353				per_cpu_ptr(zone->pageset, cpu), high);
5354		}
5355	}
5356	return 0;
5357}
5358
5359int hashdist = HASHDIST_DEFAULT;
5360
5361#ifdef CONFIG_NUMA
5362static int __init set_hashdist(char *str)
5363{
5364	if (!str)
5365		return 0;
5366	hashdist = simple_strtoul(str, &str, 0);
5367	return 1;
5368}
5369__setup("hashdist=", set_hashdist);
5370#endif
5371
5372/*
5373 * allocate a large system hash table from bootmem
5374 * - it is assumed that the hash table must contain an exact power-of-2
5375 *   quantity of entries
5376 * - limit is the number of hash buckets, not the total allocation size
5377 */
5378void *__init alloc_large_system_hash(const char *tablename,
5379				     unsigned long bucketsize,
5380				     unsigned long numentries,
5381				     int scale,
5382				     int flags,
5383				     unsigned int *_hash_shift,
5384				     unsigned int *_hash_mask,
5385				     unsigned long limit)
5386{
5387	unsigned long long max = limit;
5388	unsigned long log2qty, size;
5389	void *table = NULL;
5390
5391	/* allow the kernel cmdline to have a say */
5392	if (!numentries) {
5393		/* round applicable memory size up to nearest megabyte */
5394		numentries = nr_kernel_pages;
5395		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5396		numentries >>= 20 - PAGE_SHIFT;
5397		numentries <<= 20 - PAGE_SHIFT;
5398
5399		/* limit to 1 bucket per 2^scale bytes of low memory */
5400		if (scale > PAGE_SHIFT)
5401			numentries >>= (scale - PAGE_SHIFT);
5402		else
5403			numentries <<= (PAGE_SHIFT - scale);
5404
5405		/* Make sure we've got at least a 0-order allocation.. */
5406		if (unlikely(flags & HASH_SMALL)) {
5407			/* Makes no sense without HASH_EARLY */
5408			WARN_ON(!(flags & HASH_EARLY));
5409			if (!(numentries >> *_hash_shift)) {
5410				numentries = 1UL << *_hash_shift;
5411				BUG_ON(!numentries);
5412			}
5413		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5414			numentries = PAGE_SIZE / bucketsize;
5415	}
5416	numentries = roundup_pow_of_two(numentries);
5417
5418	/* limit allocation size to 1/16 total memory by default */
5419	if (max == 0) {
5420		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5421		do_div(max, bucketsize);
5422	}
5423
5424	if (numentries > max)
5425		numentries = max;
5426
5427	log2qty = ilog2(numentries);
5428
5429	do {
5430		size = bucketsize << log2qty;
5431		if (flags & HASH_EARLY)
5432			table = alloc_bootmem_nopanic(size);
5433		else if (hashdist)
5434			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5435		else {
5436			/*
5437			 * If bucketsize is not a power-of-two, we may free
5438			 * some pages at the end of hash table which
5439			 * alloc_pages_exact() automatically does
5440			 */
5441			if (get_order(size) < MAX_ORDER) {
5442				table = alloc_pages_exact(size, GFP_ATOMIC);
5443				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5444			}
5445		}
5446	} while (!table && size > PAGE_SIZE && --log2qty);
5447
5448	if (!table)
5449		panic("Failed to allocate %s hash table\n", tablename);
5450
5451	printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5452	       tablename,
5453	       (1UL << log2qty),
5454	       ilog2(size) - PAGE_SHIFT,
5455	       size);
5456
5457	if (_hash_shift)
5458		*_hash_shift = log2qty;
5459	if (_hash_mask)
5460		*_hash_mask = (1 << log2qty) - 1;
5461
5462	return table;
5463}
5464
5465/* Return a pointer to the bitmap storing bits affecting a block of pages */
5466static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5467							unsigned long pfn)
5468{
5469#ifdef CONFIG_SPARSEMEM
5470	return __pfn_to_section(pfn)->pageblock_flags;
5471#else
5472	return zone->pageblock_flags;
5473#endif /* CONFIG_SPARSEMEM */
5474}
5475
5476static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5477{
5478#ifdef CONFIG_SPARSEMEM
5479	pfn &= (PAGES_PER_SECTION-1);
5480	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5481#else
5482	pfn = pfn - zone->zone_start_pfn;
5483	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5484#endif /* CONFIG_SPARSEMEM */
5485}
5486
5487/**
5488 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5489 * @page: The page within the block of interest
5490 * @start_bitidx: The first bit of interest to retrieve
5491 * @end_bitidx: The last bit of interest
5492 * returns pageblock_bits flags
5493 */
5494unsigned long get_pageblock_flags_group(struct page *page,
5495					int start_bitidx, int end_bitidx)
5496{
5497	struct zone *zone;
5498	unsigned long *bitmap;
5499	unsigned long pfn, bitidx;
5500	unsigned long flags = 0;
5501	unsigned long value = 1;
5502
5503	zone = page_zone(page);
5504	pfn = page_to_pfn(page);
5505	bitmap = get_pageblock_bitmap(zone, pfn);
5506	bitidx = pfn_to_bitidx(zone, pfn);
5507
5508	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5509		if (test_bit(bitidx + start_bitidx, bitmap))
5510			flags |= value;
5511
5512	return flags;
5513}
5514
5515/**
5516 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5517 * @page: The page within the block of interest
5518 * @start_bitidx: The first bit of interest
5519 * @end_bitidx: The last bit of interest
5520 * @flags: The flags to set
5521 */
5522void set_pageblock_flags_group(struct page *page, unsigned long flags,
5523					int start_bitidx, int end_bitidx)
5524{
5525	struct zone *zone;
5526	unsigned long *bitmap;
5527	unsigned long pfn, bitidx;
5528	unsigned long value = 1;
5529
5530	zone = page_zone(page);
5531	pfn = page_to_pfn(page);
5532	bitmap = get_pageblock_bitmap(zone, pfn);
5533	bitidx = pfn_to_bitidx(zone, pfn);
5534	VM_BUG_ON(pfn < zone->zone_start_pfn);
5535	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5536
5537	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5538		if (flags & value)
5539			__set_bit(bitidx + start_bitidx, bitmap);
5540		else
5541			__clear_bit(bitidx + start_bitidx, bitmap);
5542}
5543
5544/*
5545 * This is designed as sub function...plz see page_isolation.c also.
5546 * set/clear page block's type to be ISOLATE.
5547 * page allocater never alloc memory from ISOLATE block.
5548 */
5549
5550static int
5551__count_immobile_pages(struct zone *zone, struct page *page, int count)
5552{
5553	unsigned long pfn, iter, found;
5554	/*
5555	 * For avoiding noise data, lru_add_drain_all() should be called
5556	 * If ZONE_MOVABLE, the zone never contains immobile pages
5557	 */
5558	if (zone_idx(zone) == ZONE_MOVABLE)
5559		return true;
5560
5561	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5562		return true;
5563
5564	pfn = page_to_pfn(page);
5565	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5566		unsigned long check = pfn + iter;
5567
5568		if (!pfn_valid_within(check))
5569			continue;
5570
5571		page = pfn_to_page(check);
5572		if (!page_count(page)) {
5573			if (PageBuddy(page))
5574				iter += (1 << page_order(page)) - 1;
5575			continue;
5576		}
5577		if (!PageLRU(page))
5578			found++;
5579		/*
5580		 * If there are RECLAIMABLE pages, we need to check it.
5581		 * But now, memory offline itself doesn't call shrink_slab()
5582		 * and it still to be fixed.
5583		 */
5584		/*
5585		 * If the page is not RAM, page_count()should be 0.
5586		 * we don't need more check. This is an _used_ not-movable page.
5587		 *
5588		 * The problematic thing here is PG_reserved pages. PG_reserved
5589		 * is set to both of a memory hole page and a _used_ kernel
5590		 * page at boot.
5591		 */
5592		if (found > count)
5593			return false;
5594	}
5595	return true;
5596}
5597
5598bool is_pageblock_removable_nolock(struct page *page)
5599{
5600	struct zone *zone = page_zone(page);
5601	return __count_immobile_pages(zone, page, 0);
5602}
5603
5604int set_migratetype_isolate(struct page *page)
5605{
5606	struct zone *zone;
5607	unsigned long flags, pfn;
5608	struct memory_isolate_notify arg;
5609	int notifier_ret;
5610	int ret = -EBUSY;
5611
5612	zone = page_zone(page);
5613
5614	spin_lock_irqsave(&zone->lock, flags);
5615
5616	pfn = page_to_pfn(page);
5617	arg.start_pfn = pfn;
5618	arg.nr_pages = pageblock_nr_pages;
5619	arg.pages_found = 0;
5620
5621	/*
5622	 * It may be possible to isolate a pageblock even if the
5623	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5624	 * notifier chain is used by balloon drivers to return the
5625	 * number of pages in a range that are held by the balloon
5626	 * driver to shrink memory. If all the pages are accounted for
5627	 * by balloons, are free, or on the LRU, isolation can continue.
5628	 * Later, for example, when memory hotplug notifier runs, these
5629	 * pages reported as "can be isolated" should be isolated(freed)
5630	 * by the balloon driver through the memory notifier chain.
5631	 */
5632	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5633	notifier_ret = notifier_to_errno(notifier_ret);
5634	if (notifier_ret)
5635		goto out;
5636	/*
5637	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5638	 * We just check MOVABLE pages.
5639	 */
5640	if (__count_immobile_pages(zone, page, arg.pages_found))
5641		ret = 0;
5642
5643	/*
5644	 * immobile means "not-on-lru" paes. If immobile is larger than
5645	 * removable-by-driver pages reported by notifier, we'll fail.
5646	 */
5647
5648out:
5649	if (!ret) {
5650		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5651		move_freepages_block(zone, page, MIGRATE_ISOLATE);
5652	}
5653
5654	spin_unlock_irqrestore(&zone->lock, flags);
5655	if (!ret)
5656		drain_all_pages();
5657	return ret;
5658}
5659
5660void unset_migratetype_isolate(struct page *page)
5661{
5662	struct zone *zone;
5663	unsigned long flags;
5664	zone = page_zone(page);
5665	spin_lock_irqsave(&zone->lock, flags);
5666	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5667		goto out;
5668	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5669	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5670out:
5671	spin_unlock_irqrestore(&zone->lock, flags);
5672}
5673
5674#ifdef CONFIG_MEMORY_HOTREMOVE
5675/*
5676 * All pages in the range must be isolated before calling this.
5677 */
5678void
5679__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5680{
5681	struct page *page;
5682	struct zone *zone;
5683	int order, i;
5684	unsigned long pfn;
5685	unsigned long flags;
5686	/* find the first valid pfn */
5687	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5688		if (pfn_valid(pfn))
5689			break;
5690	if (pfn == end_pfn)
5691		return;
5692	zone = page_zone(pfn_to_page(pfn));
5693	spin_lock_irqsave(&zone->lock, flags);
5694	pfn = start_pfn;
5695	while (pfn < end_pfn) {
5696		if (!pfn_valid(pfn)) {
5697			pfn++;
5698			continue;
5699		}
5700		page = pfn_to_page(pfn);
5701		BUG_ON(page_count(page));
5702		BUG_ON(!PageBuddy(page));
5703		order = page_order(page);
5704#ifdef CONFIG_DEBUG_VM
5705		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5706		       pfn, 1 << order, end_pfn);
5707#endif
5708		list_del(&page->lru);
5709		rmv_page_order(page);
5710		zone->free_area[order].nr_free--;
5711		__mod_zone_page_state(zone, NR_FREE_PAGES,
5712				      - (1UL << order));
5713		for (i = 0; i < (1 << order); i++)
5714			SetPageReserved((page+i));
5715		pfn += (1 << order);
5716	}
5717	spin_unlock_irqrestore(&zone->lock, flags);
5718}
5719#endif
5720
5721#ifdef CONFIG_MEMORY_FAILURE
5722bool is_free_buddy_page(struct page *page)
5723{
5724	struct zone *zone = page_zone(page);
5725	unsigned long pfn = page_to_pfn(page);
5726	unsigned long flags;
5727	int order;
5728
5729	spin_lock_irqsave(&zone->lock, flags);
5730	for (order = 0; order < MAX_ORDER; order++) {
5731		struct page *page_head = page - (pfn & ((1 << order) - 1));
5732
5733		if (PageBuddy(page_head) && page_order(page_head) >= order)
5734			break;
5735	}
5736	spin_unlock_irqrestore(&zone->lock, flags);
5737
5738	return order < MAX_ORDER;
5739}
5740#endif
5741
5742static struct trace_print_flags pageflag_names[] = {
5743	{1UL << PG_locked,		"locked"	},
5744	{1UL << PG_error,		"error"		},
5745	{1UL << PG_referenced,		"referenced"	},
5746	{1UL << PG_uptodate,		"uptodate"	},
5747	{1UL << PG_dirty,		"dirty"		},
5748	{1UL << PG_lru,			"lru"		},
5749	{1UL << PG_active,		"active"	},
5750	{1UL << PG_slab,		"slab"		},
5751	{1UL << PG_owner_priv_1,	"owner_priv_1"	},
5752	{1UL << PG_arch_1,		"arch_1"	},
5753	{1UL << PG_reserved,		"reserved"	},
5754	{1UL << PG_private,		"private"	},
5755	{1UL << PG_private_2,		"private_2"	},
5756	{1UL << PG_writeback,		"writeback"	},
5757#ifdef CONFIG_PAGEFLAGS_EXTENDED
5758	{1UL << PG_head,		"head"		},
5759	{1UL << PG_tail,		"tail"		},
5760#else
5761	{1UL << PG_compound,		"compound"	},
5762#endif
5763	{1UL << PG_swapcache,		"swapcache"	},
5764	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
5765	{1UL << PG_reclaim,		"reclaim"	},
5766	{1UL << PG_swapbacked,		"swapbacked"	},
5767	{1UL << PG_unevictable,		"unevictable"	},
5768#ifdef CONFIG_MMU
5769	{1UL << PG_mlocked,		"mlocked"	},
5770#endif
5771#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5772	{1UL << PG_uncached,		"uncached"	},
5773#endif
5774#ifdef CONFIG_MEMORY_FAILURE
5775	{1UL << PG_hwpoison,		"hwpoison"	},
5776#endif
5777	{-1UL,				NULL		},
5778};
5779
5780static void dump_page_flags(unsigned long flags)
5781{
5782	const char *delim = "";
5783	unsigned long mask;
5784	int i;
5785
5786	printk(KERN_ALERT "page flags: %#lx(", flags);
5787
5788	/* remove zone id */
5789	flags &= (1UL << NR_PAGEFLAGS) - 1;
5790
5791	for (i = 0; pageflag_names[i].name && flags; i++) {
5792
5793		mask = pageflag_names[i].mask;
5794		if ((flags & mask) != mask)
5795			continue;
5796
5797		flags &= ~mask;
5798		printk("%s%s", delim, pageflag_names[i].name);
5799		delim = "|";
5800	}
5801
5802	/* check for left over flags */
5803	if (flags)
5804		printk("%s%#lx", delim, flags);
5805
5806	printk(")\n");
5807}
5808
5809void dump_page(struct page *page)
5810{
5811	printk(KERN_ALERT
5812	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5813		page, atomic_read(&page->_count), page_mapcount(page),
5814		page->mapping, page->index);
5815	dump_page_flags(page->flags);
5816	mem_cgroup_print_bad_page(page);
5817}
5818