page_alloc.c revision 1f522509c77a5dea8dc384b735314f03908a6415
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/kmemcheck.h>
27#include <linux/module.h>
28#include <linux/suspend.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h>
32#include <linux/oom.h>
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/sysctl.h>
36#include <linux/cpu.h>
37#include <linux/cpuset.h>
38#include <linux/memory_hotplug.h>
39#include <linux/nodemask.h>
40#include <linux/vmalloc.h>
41#include <linux/mempolicy.h>
42#include <linux/stop_machine.h>
43#include <linux/sort.h>
44#include <linux/pfn.h>
45#include <linux/backing-dev.h>
46#include <linux/fault-inject.h>
47#include <linux/page-isolation.h>
48#include <linux/page_cgroup.h>
49#include <linux/debugobjects.h>
50#include <linux/kmemleak.h>
51#include <linux/memory.h>
52#include <linux/compaction.h>
53#include <trace/events/kmem.h>
54#include <linux/ftrace_event.h>
55
56#include <asm/tlbflush.h>
57#include <asm/div64.h>
58#include "internal.h"
59
60/*
61 * Array of node states.
62 */
63nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
64	[N_POSSIBLE] = NODE_MASK_ALL,
65	[N_ONLINE] = { { [0] = 1UL } },
66#ifndef CONFIG_NUMA
67	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
68#ifdef CONFIG_HIGHMEM
69	[N_HIGH_MEMORY] = { { [0] = 1UL } },
70#endif
71	[N_CPU] = { { [0] = 1UL } },
72#endif	/* NUMA */
73};
74EXPORT_SYMBOL(node_states);
75
76unsigned long totalram_pages __read_mostly;
77unsigned long totalreserve_pages __read_mostly;
78int percpu_pagelist_fraction;
79gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
80
81#ifdef CONFIG_PM_SLEEP
82/*
83 * The following functions are used by the suspend/hibernate code to temporarily
84 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
85 * while devices are suspended.  To avoid races with the suspend/hibernate code,
86 * they should always be called with pm_mutex held (gfp_allowed_mask also should
87 * only be modified with pm_mutex held, unless the suspend/hibernate code is
88 * guaranteed not to run in parallel with that modification).
89 */
90void set_gfp_allowed_mask(gfp_t mask)
91{
92	WARN_ON(!mutex_is_locked(&pm_mutex));
93	gfp_allowed_mask = mask;
94}
95
96gfp_t clear_gfp_allowed_mask(gfp_t mask)
97{
98	gfp_t ret = gfp_allowed_mask;
99
100	WARN_ON(!mutex_is_locked(&pm_mutex));
101	gfp_allowed_mask &= ~mask;
102	return ret;
103}
104#endif /* CONFIG_PM_SLEEP */
105
106#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
107int pageblock_order __read_mostly;
108#endif
109
110static void __free_pages_ok(struct page *page, unsigned int order);
111
112/*
113 * results with 256, 32 in the lowmem_reserve sysctl:
114 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
115 *	1G machine -> (16M dma, 784M normal, 224M high)
116 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
117 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
118 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
119 *
120 * TBD: should special case ZONE_DMA32 machines here - in those we normally
121 * don't need any ZONE_NORMAL reservation
122 */
123int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
124#ifdef CONFIG_ZONE_DMA
125	 256,
126#endif
127#ifdef CONFIG_ZONE_DMA32
128	 256,
129#endif
130#ifdef CONFIG_HIGHMEM
131	 32,
132#endif
133	 32,
134};
135
136EXPORT_SYMBOL(totalram_pages);
137
138static char * const zone_names[MAX_NR_ZONES] = {
139#ifdef CONFIG_ZONE_DMA
140	 "DMA",
141#endif
142#ifdef CONFIG_ZONE_DMA32
143	 "DMA32",
144#endif
145	 "Normal",
146#ifdef CONFIG_HIGHMEM
147	 "HighMem",
148#endif
149	 "Movable",
150};
151
152int min_free_kbytes = 1024;
153
154static unsigned long __meminitdata nr_kernel_pages;
155static unsigned long __meminitdata nr_all_pages;
156static unsigned long __meminitdata dma_reserve;
157
158#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
159  /*
160   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
161   * ranges of memory (RAM) that may be registered with add_active_range().
162   * Ranges passed to add_active_range() will be merged if possible
163   * so the number of times add_active_range() can be called is
164   * related to the number of nodes and the number of holes
165   */
166  #ifdef CONFIG_MAX_ACTIVE_REGIONS
167    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
168    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
169  #else
170    #if MAX_NUMNODES >= 32
171      /* If there can be many nodes, allow up to 50 holes per node */
172      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
173    #else
174      /* By default, allow up to 256 distinct regions */
175      #define MAX_ACTIVE_REGIONS 256
176    #endif
177  #endif
178
179  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
180  static int __meminitdata nr_nodemap_entries;
181  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
182  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
183  static unsigned long __initdata required_kernelcore;
184  static unsigned long __initdata required_movablecore;
185  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
186
187  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
188  int movable_zone;
189  EXPORT_SYMBOL(movable_zone);
190#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
191
192#if MAX_NUMNODES > 1
193int nr_node_ids __read_mostly = MAX_NUMNODES;
194int nr_online_nodes __read_mostly = 1;
195EXPORT_SYMBOL(nr_node_ids);
196EXPORT_SYMBOL(nr_online_nodes);
197#endif
198
199int page_group_by_mobility_disabled __read_mostly;
200
201static void set_pageblock_migratetype(struct page *page, int migratetype)
202{
203
204	if (unlikely(page_group_by_mobility_disabled))
205		migratetype = MIGRATE_UNMOVABLE;
206
207	set_pageblock_flags_group(page, (unsigned long)migratetype,
208					PB_migrate, PB_migrate_end);
209}
210
211bool oom_killer_disabled __read_mostly;
212
213#ifdef CONFIG_DEBUG_VM
214static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
215{
216	int ret = 0;
217	unsigned seq;
218	unsigned long pfn = page_to_pfn(page);
219
220	do {
221		seq = zone_span_seqbegin(zone);
222		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
223			ret = 1;
224		else if (pfn < zone->zone_start_pfn)
225			ret = 1;
226	} while (zone_span_seqretry(zone, seq));
227
228	return ret;
229}
230
231static int page_is_consistent(struct zone *zone, struct page *page)
232{
233	if (!pfn_valid_within(page_to_pfn(page)))
234		return 0;
235	if (zone != page_zone(page))
236		return 0;
237
238	return 1;
239}
240/*
241 * Temporary debugging check for pages not lying within a given zone.
242 */
243static int bad_range(struct zone *zone, struct page *page)
244{
245	if (page_outside_zone_boundaries(zone, page))
246		return 1;
247	if (!page_is_consistent(zone, page))
248		return 1;
249
250	return 0;
251}
252#else
253static inline int bad_range(struct zone *zone, struct page *page)
254{
255	return 0;
256}
257#endif
258
259static void bad_page(struct page *page)
260{
261	static unsigned long resume;
262	static unsigned long nr_shown;
263	static unsigned long nr_unshown;
264
265	/* Don't complain about poisoned pages */
266	if (PageHWPoison(page)) {
267		__ClearPageBuddy(page);
268		return;
269	}
270
271	/*
272	 * Allow a burst of 60 reports, then keep quiet for that minute;
273	 * or allow a steady drip of one report per second.
274	 */
275	if (nr_shown == 60) {
276		if (time_before(jiffies, resume)) {
277			nr_unshown++;
278			goto out;
279		}
280		if (nr_unshown) {
281			printk(KERN_ALERT
282			      "BUG: Bad page state: %lu messages suppressed\n",
283				nr_unshown);
284			nr_unshown = 0;
285		}
286		nr_shown = 0;
287	}
288	if (nr_shown++ == 0)
289		resume = jiffies + 60 * HZ;
290
291	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
292		current->comm, page_to_pfn(page));
293	dump_page(page);
294
295	dump_stack();
296out:
297	/* Leave bad fields for debug, except PageBuddy could make trouble */
298	__ClearPageBuddy(page);
299	add_taint(TAINT_BAD_PAGE);
300}
301
302/*
303 * Higher-order pages are called "compound pages".  They are structured thusly:
304 *
305 * The first PAGE_SIZE page is called the "head page".
306 *
307 * The remaining PAGE_SIZE pages are called "tail pages".
308 *
309 * All pages have PG_compound set.  All pages have their ->private pointing at
310 * the head page (even the head page has this).
311 *
312 * The first tail page's ->lru.next holds the address of the compound page's
313 * put_page() function.  Its ->lru.prev holds the order of allocation.
314 * This usage means that zero-order pages may not be compound.
315 */
316
317static void free_compound_page(struct page *page)
318{
319	__free_pages_ok(page, compound_order(page));
320}
321
322void prep_compound_page(struct page *page, unsigned long order)
323{
324	int i;
325	int nr_pages = 1 << order;
326
327	set_compound_page_dtor(page, free_compound_page);
328	set_compound_order(page, order);
329	__SetPageHead(page);
330	for (i = 1; i < nr_pages; i++) {
331		struct page *p = page + i;
332
333		__SetPageTail(p);
334		p->first_page = page;
335	}
336}
337
338static int destroy_compound_page(struct page *page, unsigned long order)
339{
340	int i;
341	int nr_pages = 1 << order;
342	int bad = 0;
343
344	if (unlikely(compound_order(page) != order) ||
345	    unlikely(!PageHead(page))) {
346		bad_page(page);
347		bad++;
348	}
349
350	__ClearPageHead(page);
351
352	for (i = 1; i < nr_pages; i++) {
353		struct page *p = page + i;
354
355		if (unlikely(!PageTail(p) || (p->first_page != page))) {
356			bad_page(page);
357			bad++;
358		}
359		__ClearPageTail(p);
360	}
361
362	return bad;
363}
364
365static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
366{
367	int i;
368
369	/*
370	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
371	 * and __GFP_HIGHMEM from hard or soft interrupt context.
372	 */
373	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
374	for (i = 0; i < (1 << order); i++)
375		clear_highpage(page + i);
376}
377
378static inline void set_page_order(struct page *page, int order)
379{
380	set_page_private(page, order);
381	__SetPageBuddy(page);
382}
383
384static inline void rmv_page_order(struct page *page)
385{
386	__ClearPageBuddy(page);
387	set_page_private(page, 0);
388}
389
390/*
391 * Locate the struct page for both the matching buddy in our
392 * pair (buddy1) and the combined O(n+1) page they form (page).
393 *
394 * 1) Any buddy B1 will have an order O twin B2 which satisfies
395 * the following equation:
396 *     B2 = B1 ^ (1 << O)
397 * For example, if the starting buddy (buddy2) is #8 its order
398 * 1 buddy is #10:
399 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
400 *
401 * 2) Any buddy B will have an order O+1 parent P which
402 * satisfies the following equation:
403 *     P = B & ~(1 << O)
404 *
405 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
406 */
407static inline struct page *
408__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
409{
410	unsigned long buddy_idx = page_idx ^ (1 << order);
411
412	return page + (buddy_idx - page_idx);
413}
414
415static inline unsigned long
416__find_combined_index(unsigned long page_idx, unsigned int order)
417{
418	return (page_idx & ~(1 << order));
419}
420
421/*
422 * This function checks whether a page is free && is the buddy
423 * we can do coalesce a page and its buddy if
424 * (a) the buddy is not in a hole &&
425 * (b) the buddy is in the buddy system &&
426 * (c) a page and its buddy have the same order &&
427 * (d) a page and its buddy are in the same zone.
428 *
429 * For recording whether a page is in the buddy system, we use PG_buddy.
430 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
431 *
432 * For recording page's order, we use page_private(page).
433 */
434static inline int page_is_buddy(struct page *page, struct page *buddy,
435								int order)
436{
437	if (!pfn_valid_within(page_to_pfn(buddy)))
438		return 0;
439
440	if (page_zone_id(page) != page_zone_id(buddy))
441		return 0;
442
443	if (PageBuddy(buddy) && page_order(buddy) == order) {
444		VM_BUG_ON(page_count(buddy) != 0);
445		return 1;
446	}
447	return 0;
448}
449
450/*
451 * Freeing function for a buddy system allocator.
452 *
453 * The concept of a buddy system is to maintain direct-mapped table
454 * (containing bit values) for memory blocks of various "orders".
455 * The bottom level table contains the map for the smallest allocatable
456 * units of memory (here, pages), and each level above it describes
457 * pairs of units from the levels below, hence, "buddies".
458 * At a high level, all that happens here is marking the table entry
459 * at the bottom level available, and propagating the changes upward
460 * as necessary, plus some accounting needed to play nicely with other
461 * parts of the VM system.
462 * At each level, we keep a list of pages, which are heads of continuous
463 * free pages of length of (1 << order) and marked with PG_buddy. Page's
464 * order is recorded in page_private(page) field.
465 * So when we are allocating or freeing one, we can derive the state of the
466 * other.  That is, if we allocate a small block, and both were
467 * free, the remainder of the region must be split into blocks.
468 * If a block is freed, and its buddy is also free, then this
469 * triggers coalescing into a block of larger size.
470 *
471 * -- wli
472 */
473
474static inline void __free_one_page(struct page *page,
475		struct zone *zone, unsigned int order,
476		int migratetype)
477{
478	unsigned long page_idx;
479	unsigned long combined_idx;
480	struct page *buddy;
481
482	if (unlikely(PageCompound(page)))
483		if (unlikely(destroy_compound_page(page, order)))
484			return;
485
486	VM_BUG_ON(migratetype == -1);
487
488	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
489
490	VM_BUG_ON(page_idx & ((1 << order) - 1));
491	VM_BUG_ON(bad_range(zone, page));
492
493	while (order < MAX_ORDER-1) {
494		buddy = __page_find_buddy(page, page_idx, order);
495		if (!page_is_buddy(page, buddy, order))
496			break;
497
498		/* Our buddy is free, merge with it and move up one order. */
499		list_del(&buddy->lru);
500		zone->free_area[order].nr_free--;
501		rmv_page_order(buddy);
502		combined_idx = __find_combined_index(page_idx, order);
503		page = page + (combined_idx - page_idx);
504		page_idx = combined_idx;
505		order++;
506	}
507	set_page_order(page, order);
508
509	/*
510	 * If this is not the largest possible page, check if the buddy
511	 * of the next-highest order is free. If it is, it's possible
512	 * that pages are being freed that will coalesce soon. In case,
513	 * that is happening, add the free page to the tail of the list
514	 * so it's less likely to be used soon and more likely to be merged
515	 * as a higher order page
516	 */
517	if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
518		struct page *higher_page, *higher_buddy;
519		combined_idx = __find_combined_index(page_idx, order);
520		higher_page = page + combined_idx - page_idx;
521		higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
522		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
523			list_add_tail(&page->lru,
524				&zone->free_area[order].free_list[migratetype]);
525			goto out;
526		}
527	}
528
529	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
530out:
531	zone->free_area[order].nr_free++;
532}
533
534/*
535 * free_page_mlock() -- clean up attempts to free and mlocked() page.
536 * Page should not be on lru, so no need to fix that up.
537 * free_pages_check() will verify...
538 */
539static inline void free_page_mlock(struct page *page)
540{
541	__dec_zone_page_state(page, NR_MLOCK);
542	__count_vm_event(UNEVICTABLE_MLOCKFREED);
543}
544
545static inline int free_pages_check(struct page *page)
546{
547	if (unlikely(page_mapcount(page) |
548		(page->mapping != NULL)  |
549		(atomic_read(&page->_count) != 0) |
550		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
551		bad_page(page);
552		return 1;
553	}
554	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
555		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
556	return 0;
557}
558
559/*
560 * Frees a number of pages from the PCP lists
561 * Assumes all pages on list are in same zone, and of same order.
562 * count is the number of pages to free.
563 *
564 * If the zone was previously in an "all pages pinned" state then look to
565 * see if this freeing clears that state.
566 *
567 * And clear the zone's pages_scanned counter, to hold off the "all pages are
568 * pinned" detection logic.
569 */
570static void free_pcppages_bulk(struct zone *zone, int count,
571					struct per_cpu_pages *pcp)
572{
573	int migratetype = 0;
574	int batch_free = 0;
575
576	spin_lock(&zone->lock);
577	zone->all_unreclaimable = 0;
578	zone->pages_scanned = 0;
579
580	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
581	while (count) {
582		struct page *page;
583		struct list_head *list;
584
585		/*
586		 * Remove pages from lists in a round-robin fashion. A
587		 * batch_free count is maintained that is incremented when an
588		 * empty list is encountered.  This is so more pages are freed
589		 * off fuller lists instead of spinning excessively around empty
590		 * lists
591		 */
592		do {
593			batch_free++;
594			if (++migratetype == MIGRATE_PCPTYPES)
595				migratetype = 0;
596			list = &pcp->lists[migratetype];
597		} while (list_empty(list));
598
599		do {
600			page = list_entry(list->prev, struct page, lru);
601			/* must delete as __free_one_page list manipulates */
602			list_del(&page->lru);
603			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
604			__free_one_page(page, zone, 0, page_private(page));
605			trace_mm_page_pcpu_drain(page, 0, page_private(page));
606		} while (--count && --batch_free && !list_empty(list));
607	}
608	spin_unlock(&zone->lock);
609}
610
611static void free_one_page(struct zone *zone, struct page *page, int order,
612				int migratetype)
613{
614	spin_lock(&zone->lock);
615	zone->all_unreclaimable = 0;
616	zone->pages_scanned = 0;
617
618	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
619	__free_one_page(page, zone, order, migratetype);
620	spin_unlock(&zone->lock);
621}
622
623static bool free_pages_prepare(struct page *page, unsigned int order)
624{
625	int i;
626	int bad = 0;
627
628	trace_mm_page_free_direct(page, order);
629	kmemcheck_free_shadow(page, order);
630
631	for (i = 0; i < (1 << order); i++) {
632		struct page *pg = page + i;
633
634		if (PageAnon(pg))
635			pg->mapping = NULL;
636		bad += free_pages_check(pg);
637	}
638	if (bad)
639		return false;
640
641	if (!PageHighMem(page)) {
642		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
643		debug_check_no_obj_freed(page_address(page),
644					   PAGE_SIZE << order);
645	}
646	arch_free_page(page, order);
647	kernel_map_pages(page, 1 << order, 0);
648
649	return true;
650}
651
652static void __free_pages_ok(struct page *page, unsigned int order)
653{
654	unsigned long flags;
655	int wasMlocked = __TestClearPageMlocked(page);
656
657	if (!free_pages_prepare(page, order))
658		return;
659
660	local_irq_save(flags);
661	if (unlikely(wasMlocked))
662		free_page_mlock(page);
663	__count_vm_events(PGFREE, 1 << order);
664	free_one_page(page_zone(page), page, order,
665					get_pageblock_migratetype(page));
666	local_irq_restore(flags);
667}
668
669/*
670 * permit the bootmem allocator to evade page validation on high-order frees
671 */
672void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
673{
674	if (order == 0) {
675		__ClearPageReserved(page);
676		set_page_count(page, 0);
677		set_page_refcounted(page);
678		__free_page(page);
679	} else {
680		int loop;
681
682		prefetchw(page);
683		for (loop = 0; loop < BITS_PER_LONG; loop++) {
684			struct page *p = &page[loop];
685
686			if (loop + 1 < BITS_PER_LONG)
687				prefetchw(p + 1);
688			__ClearPageReserved(p);
689			set_page_count(p, 0);
690		}
691
692		set_page_refcounted(page);
693		__free_pages(page, order);
694	}
695}
696
697
698/*
699 * The order of subdivision here is critical for the IO subsystem.
700 * Please do not alter this order without good reasons and regression
701 * testing. Specifically, as large blocks of memory are subdivided,
702 * the order in which smaller blocks are delivered depends on the order
703 * they're subdivided in this function. This is the primary factor
704 * influencing the order in which pages are delivered to the IO
705 * subsystem according to empirical testing, and this is also justified
706 * by considering the behavior of a buddy system containing a single
707 * large block of memory acted on by a series of small allocations.
708 * This behavior is a critical factor in sglist merging's success.
709 *
710 * -- wli
711 */
712static inline void expand(struct zone *zone, struct page *page,
713	int low, int high, struct free_area *area,
714	int migratetype)
715{
716	unsigned long size = 1 << high;
717
718	while (high > low) {
719		area--;
720		high--;
721		size >>= 1;
722		VM_BUG_ON(bad_range(zone, &page[size]));
723		list_add(&page[size].lru, &area->free_list[migratetype]);
724		area->nr_free++;
725		set_page_order(&page[size], high);
726	}
727}
728
729/*
730 * This page is about to be returned from the page allocator
731 */
732static inline int check_new_page(struct page *page)
733{
734	if (unlikely(page_mapcount(page) |
735		(page->mapping != NULL)  |
736		(atomic_read(&page->_count) != 0)  |
737		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
738		bad_page(page);
739		return 1;
740	}
741	return 0;
742}
743
744static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
745{
746	int i;
747
748	for (i = 0; i < (1 << order); i++) {
749		struct page *p = page + i;
750		if (unlikely(check_new_page(p)))
751			return 1;
752	}
753
754	set_page_private(page, 0);
755	set_page_refcounted(page);
756
757	arch_alloc_page(page, order);
758	kernel_map_pages(page, 1 << order, 1);
759
760	if (gfp_flags & __GFP_ZERO)
761		prep_zero_page(page, order, gfp_flags);
762
763	if (order && (gfp_flags & __GFP_COMP))
764		prep_compound_page(page, order);
765
766	return 0;
767}
768
769/*
770 * Go through the free lists for the given migratetype and remove
771 * the smallest available page from the freelists
772 */
773static inline
774struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
775						int migratetype)
776{
777	unsigned int current_order;
778	struct free_area * area;
779	struct page *page;
780
781	/* Find a page of the appropriate size in the preferred list */
782	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
783		area = &(zone->free_area[current_order]);
784		if (list_empty(&area->free_list[migratetype]))
785			continue;
786
787		page = list_entry(area->free_list[migratetype].next,
788							struct page, lru);
789		list_del(&page->lru);
790		rmv_page_order(page);
791		area->nr_free--;
792		expand(zone, page, order, current_order, area, migratetype);
793		return page;
794	}
795
796	return NULL;
797}
798
799
800/*
801 * This array describes the order lists are fallen back to when
802 * the free lists for the desirable migrate type are depleted
803 */
804static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
805	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
806	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
807	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
808	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
809};
810
811/*
812 * Move the free pages in a range to the free lists of the requested type.
813 * Note that start_page and end_pages are not aligned on a pageblock
814 * boundary. If alignment is required, use move_freepages_block()
815 */
816static int move_freepages(struct zone *zone,
817			  struct page *start_page, struct page *end_page,
818			  int migratetype)
819{
820	struct page *page;
821	unsigned long order;
822	int pages_moved = 0;
823
824#ifndef CONFIG_HOLES_IN_ZONE
825	/*
826	 * page_zone is not safe to call in this context when
827	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
828	 * anyway as we check zone boundaries in move_freepages_block().
829	 * Remove at a later date when no bug reports exist related to
830	 * grouping pages by mobility
831	 */
832	BUG_ON(page_zone(start_page) != page_zone(end_page));
833#endif
834
835	for (page = start_page; page <= end_page;) {
836		/* Make sure we are not inadvertently changing nodes */
837		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
838
839		if (!pfn_valid_within(page_to_pfn(page))) {
840			page++;
841			continue;
842		}
843
844		if (!PageBuddy(page)) {
845			page++;
846			continue;
847		}
848
849		order = page_order(page);
850		list_del(&page->lru);
851		list_add(&page->lru,
852			&zone->free_area[order].free_list[migratetype]);
853		page += 1 << order;
854		pages_moved += 1 << order;
855	}
856
857	return pages_moved;
858}
859
860static int move_freepages_block(struct zone *zone, struct page *page,
861				int migratetype)
862{
863	unsigned long start_pfn, end_pfn;
864	struct page *start_page, *end_page;
865
866	start_pfn = page_to_pfn(page);
867	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
868	start_page = pfn_to_page(start_pfn);
869	end_page = start_page + pageblock_nr_pages - 1;
870	end_pfn = start_pfn + pageblock_nr_pages - 1;
871
872	/* Do not cross zone boundaries */
873	if (start_pfn < zone->zone_start_pfn)
874		start_page = page;
875	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
876		return 0;
877
878	return move_freepages(zone, start_page, end_page, migratetype);
879}
880
881static void change_pageblock_range(struct page *pageblock_page,
882					int start_order, int migratetype)
883{
884	int nr_pageblocks = 1 << (start_order - pageblock_order);
885
886	while (nr_pageblocks--) {
887		set_pageblock_migratetype(pageblock_page, migratetype);
888		pageblock_page += pageblock_nr_pages;
889	}
890}
891
892/* Remove an element from the buddy allocator from the fallback list */
893static inline struct page *
894__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
895{
896	struct free_area * area;
897	int current_order;
898	struct page *page;
899	int migratetype, i;
900
901	/* Find the largest possible block of pages in the other list */
902	for (current_order = MAX_ORDER-1; current_order >= order;
903						--current_order) {
904		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
905			migratetype = fallbacks[start_migratetype][i];
906
907			/* MIGRATE_RESERVE handled later if necessary */
908			if (migratetype == MIGRATE_RESERVE)
909				continue;
910
911			area = &(zone->free_area[current_order]);
912			if (list_empty(&area->free_list[migratetype]))
913				continue;
914
915			page = list_entry(area->free_list[migratetype].next,
916					struct page, lru);
917			area->nr_free--;
918
919			/*
920			 * If breaking a large block of pages, move all free
921			 * pages to the preferred allocation list. If falling
922			 * back for a reclaimable kernel allocation, be more
923			 * agressive about taking ownership of free pages
924			 */
925			if (unlikely(current_order >= (pageblock_order >> 1)) ||
926					start_migratetype == MIGRATE_RECLAIMABLE ||
927					page_group_by_mobility_disabled) {
928				unsigned long pages;
929				pages = move_freepages_block(zone, page,
930								start_migratetype);
931
932				/* Claim the whole block if over half of it is free */
933				if (pages >= (1 << (pageblock_order-1)) ||
934						page_group_by_mobility_disabled)
935					set_pageblock_migratetype(page,
936								start_migratetype);
937
938				migratetype = start_migratetype;
939			}
940
941			/* Remove the page from the freelists */
942			list_del(&page->lru);
943			rmv_page_order(page);
944
945			/* Take ownership for orders >= pageblock_order */
946			if (current_order >= pageblock_order)
947				change_pageblock_range(page, current_order,
948							start_migratetype);
949
950			expand(zone, page, order, current_order, area, migratetype);
951
952			trace_mm_page_alloc_extfrag(page, order, current_order,
953				start_migratetype, migratetype);
954
955			return page;
956		}
957	}
958
959	return NULL;
960}
961
962/*
963 * Do the hard work of removing an element from the buddy allocator.
964 * Call me with the zone->lock already held.
965 */
966static struct page *__rmqueue(struct zone *zone, unsigned int order,
967						int migratetype)
968{
969	struct page *page;
970
971retry_reserve:
972	page = __rmqueue_smallest(zone, order, migratetype);
973
974	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
975		page = __rmqueue_fallback(zone, order, migratetype);
976
977		/*
978		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
979		 * is used because __rmqueue_smallest is an inline function
980		 * and we want just one call site
981		 */
982		if (!page) {
983			migratetype = MIGRATE_RESERVE;
984			goto retry_reserve;
985		}
986	}
987
988	trace_mm_page_alloc_zone_locked(page, order, migratetype);
989	return page;
990}
991
992/*
993 * Obtain a specified number of elements from the buddy allocator, all under
994 * a single hold of the lock, for efficiency.  Add them to the supplied list.
995 * Returns the number of new pages which were placed at *list.
996 */
997static int rmqueue_bulk(struct zone *zone, unsigned int order,
998			unsigned long count, struct list_head *list,
999			int migratetype, int cold)
1000{
1001	int i;
1002
1003	spin_lock(&zone->lock);
1004	for (i = 0; i < count; ++i) {
1005		struct page *page = __rmqueue(zone, order, migratetype);
1006		if (unlikely(page == NULL))
1007			break;
1008
1009		/*
1010		 * Split buddy pages returned by expand() are received here
1011		 * in physical page order. The page is added to the callers and
1012		 * list and the list head then moves forward. From the callers
1013		 * perspective, the linked list is ordered by page number in
1014		 * some conditions. This is useful for IO devices that can
1015		 * merge IO requests if the physical pages are ordered
1016		 * properly.
1017		 */
1018		if (likely(cold == 0))
1019			list_add(&page->lru, list);
1020		else
1021			list_add_tail(&page->lru, list);
1022		set_page_private(page, migratetype);
1023		list = &page->lru;
1024	}
1025	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1026	spin_unlock(&zone->lock);
1027	return i;
1028}
1029
1030#ifdef CONFIG_NUMA
1031/*
1032 * Called from the vmstat counter updater to drain pagesets of this
1033 * currently executing processor on remote nodes after they have
1034 * expired.
1035 *
1036 * Note that this function must be called with the thread pinned to
1037 * a single processor.
1038 */
1039void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1040{
1041	unsigned long flags;
1042	int to_drain;
1043
1044	local_irq_save(flags);
1045	if (pcp->count >= pcp->batch)
1046		to_drain = pcp->batch;
1047	else
1048		to_drain = pcp->count;
1049	free_pcppages_bulk(zone, to_drain, pcp);
1050	pcp->count -= to_drain;
1051	local_irq_restore(flags);
1052}
1053#endif
1054
1055/*
1056 * Drain pages of the indicated processor.
1057 *
1058 * The processor must either be the current processor and the
1059 * thread pinned to the current processor or a processor that
1060 * is not online.
1061 */
1062static void drain_pages(unsigned int cpu)
1063{
1064	unsigned long flags;
1065	struct zone *zone;
1066
1067	for_each_populated_zone(zone) {
1068		struct per_cpu_pageset *pset;
1069		struct per_cpu_pages *pcp;
1070
1071		local_irq_save(flags);
1072		pset = per_cpu_ptr(zone->pageset, cpu);
1073
1074		pcp = &pset->pcp;
1075		free_pcppages_bulk(zone, pcp->count, pcp);
1076		pcp->count = 0;
1077		local_irq_restore(flags);
1078	}
1079}
1080
1081/*
1082 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1083 */
1084void drain_local_pages(void *arg)
1085{
1086	drain_pages(smp_processor_id());
1087}
1088
1089/*
1090 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1091 */
1092void drain_all_pages(void)
1093{
1094	on_each_cpu(drain_local_pages, NULL, 1);
1095}
1096
1097#ifdef CONFIG_HIBERNATION
1098
1099void mark_free_pages(struct zone *zone)
1100{
1101	unsigned long pfn, max_zone_pfn;
1102	unsigned long flags;
1103	int order, t;
1104	struct list_head *curr;
1105
1106	if (!zone->spanned_pages)
1107		return;
1108
1109	spin_lock_irqsave(&zone->lock, flags);
1110
1111	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1112	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1113		if (pfn_valid(pfn)) {
1114			struct page *page = pfn_to_page(pfn);
1115
1116			if (!swsusp_page_is_forbidden(page))
1117				swsusp_unset_page_free(page);
1118		}
1119
1120	for_each_migratetype_order(order, t) {
1121		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1122			unsigned long i;
1123
1124			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1125			for (i = 0; i < (1UL << order); i++)
1126				swsusp_set_page_free(pfn_to_page(pfn + i));
1127		}
1128	}
1129	spin_unlock_irqrestore(&zone->lock, flags);
1130}
1131#endif /* CONFIG_PM */
1132
1133/*
1134 * Free a 0-order page
1135 * cold == 1 ? free a cold page : free a hot page
1136 */
1137void free_hot_cold_page(struct page *page, int cold)
1138{
1139	struct zone *zone = page_zone(page);
1140	struct per_cpu_pages *pcp;
1141	unsigned long flags;
1142	int migratetype;
1143	int wasMlocked = __TestClearPageMlocked(page);
1144
1145	if (!free_pages_prepare(page, 0))
1146		return;
1147
1148	migratetype = get_pageblock_migratetype(page);
1149	set_page_private(page, migratetype);
1150	local_irq_save(flags);
1151	if (unlikely(wasMlocked))
1152		free_page_mlock(page);
1153	__count_vm_event(PGFREE);
1154
1155	/*
1156	 * We only track unmovable, reclaimable and movable on pcp lists.
1157	 * Free ISOLATE pages back to the allocator because they are being
1158	 * offlined but treat RESERVE as movable pages so we can get those
1159	 * areas back if necessary. Otherwise, we may have to free
1160	 * excessively into the page allocator
1161	 */
1162	if (migratetype >= MIGRATE_PCPTYPES) {
1163		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1164			free_one_page(zone, page, 0, migratetype);
1165			goto out;
1166		}
1167		migratetype = MIGRATE_MOVABLE;
1168	}
1169
1170	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1171	if (cold)
1172		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1173	else
1174		list_add(&page->lru, &pcp->lists[migratetype]);
1175	pcp->count++;
1176	if (pcp->count >= pcp->high) {
1177		free_pcppages_bulk(zone, pcp->batch, pcp);
1178		pcp->count -= pcp->batch;
1179	}
1180
1181out:
1182	local_irq_restore(flags);
1183}
1184
1185/*
1186 * split_page takes a non-compound higher-order page, and splits it into
1187 * n (1<<order) sub-pages: page[0..n]
1188 * Each sub-page must be freed individually.
1189 *
1190 * Note: this is probably too low level an operation for use in drivers.
1191 * Please consult with lkml before using this in your driver.
1192 */
1193void split_page(struct page *page, unsigned int order)
1194{
1195	int i;
1196
1197	VM_BUG_ON(PageCompound(page));
1198	VM_BUG_ON(!page_count(page));
1199
1200#ifdef CONFIG_KMEMCHECK
1201	/*
1202	 * Split shadow pages too, because free(page[0]) would
1203	 * otherwise free the whole shadow.
1204	 */
1205	if (kmemcheck_page_is_tracked(page))
1206		split_page(virt_to_page(page[0].shadow), order);
1207#endif
1208
1209	for (i = 1; i < (1 << order); i++)
1210		set_page_refcounted(page + i);
1211}
1212
1213/*
1214 * Similar to split_page except the page is already free. As this is only
1215 * being used for migration, the migratetype of the block also changes.
1216 * As this is called with interrupts disabled, the caller is responsible
1217 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1218 * are enabled.
1219 *
1220 * Note: this is probably too low level an operation for use in drivers.
1221 * Please consult with lkml before using this in your driver.
1222 */
1223int split_free_page(struct page *page)
1224{
1225	unsigned int order;
1226	unsigned long watermark;
1227	struct zone *zone;
1228
1229	BUG_ON(!PageBuddy(page));
1230
1231	zone = page_zone(page);
1232	order = page_order(page);
1233
1234	/* Obey watermarks as if the page was being allocated */
1235	watermark = low_wmark_pages(zone) + (1 << order);
1236	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1237		return 0;
1238
1239	/* Remove page from free list */
1240	list_del(&page->lru);
1241	zone->free_area[order].nr_free--;
1242	rmv_page_order(page);
1243	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1244
1245	/* Split into individual pages */
1246	set_page_refcounted(page);
1247	split_page(page, order);
1248
1249	if (order >= pageblock_order - 1) {
1250		struct page *endpage = page + (1 << order) - 1;
1251		for (; page < endpage; page += pageblock_nr_pages)
1252			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1253	}
1254
1255	return 1 << order;
1256}
1257
1258/*
1259 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1260 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1261 * or two.
1262 */
1263static inline
1264struct page *buffered_rmqueue(struct zone *preferred_zone,
1265			struct zone *zone, int order, gfp_t gfp_flags,
1266			int migratetype)
1267{
1268	unsigned long flags;
1269	struct page *page;
1270	int cold = !!(gfp_flags & __GFP_COLD);
1271
1272again:
1273	if (likely(order == 0)) {
1274		struct per_cpu_pages *pcp;
1275		struct list_head *list;
1276
1277		local_irq_save(flags);
1278		pcp = &this_cpu_ptr(zone->pageset)->pcp;
1279		list = &pcp->lists[migratetype];
1280		if (list_empty(list)) {
1281			pcp->count += rmqueue_bulk(zone, 0,
1282					pcp->batch, list,
1283					migratetype, cold);
1284			if (unlikely(list_empty(list)))
1285				goto failed;
1286		}
1287
1288		if (cold)
1289			page = list_entry(list->prev, struct page, lru);
1290		else
1291			page = list_entry(list->next, struct page, lru);
1292
1293		list_del(&page->lru);
1294		pcp->count--;
1295	} else {
1296		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1297			/*
1298			 * __GFP_NOFAIL is not to be used in new code.
1299			 *
1300			 * All __GFP_NOFAIL callers should be fixed so that they
1301			 * properly detect and handle allocation failures.
1302			 *
1303			 * We most definitely don't want callers attempting to
1304			 * allocate greater than order-1 page units with
1305			 * __GFP_NOFAIL.
1306			 */
1307			WARN_ON_ONCE(order > 1);
1308		}
1309		spin_lock_irqsave(&zone->lock, flags);
1310		page = __rmqueue(zone, order, migratetype);
1311		spin_unlock(&zone->lock);
1312		if (!page)
1313			goto failed;
1314		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1315	}
1316
1317	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1318	zone_statistics(preferred_zone, zone);
1319	local_irq_restore(flags);
1320
1321	VM_BUG_ON(bad_range(zone, page));
1322	if (prep_new_page(page, order, gfp_flags))
1323		goto again;
1324	return page;
1325
1326failed:
1327	local_irq_restore(flags);
1328	return NULL;
1329}
1330
1331/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1332#define ALLOC_WMARK_MIN		WMARK_MIN
1333#define ALLOC_WMARK_LOW		WMARK_LOW
1334#define ALLOC_WMARK_HIGH	WMARK_HIGH
1335#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1336
1337/* Mask to get the watermark bits */
1338#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1339
1340#define ALLOC_HARDER		0x10 /* try to alloc harder */
1341#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1342#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1343
1344#ifdef CONFIG_FAIL_PAGE_ALLOC
1345
1346static struct fail_page_alloc_attr {
1347	struct fault_attr attr;
1348
1349	u32 ignore_gfp_highmem;
1350	u32 ignore_gfp_wait;
1351	u32 min_order;
1352
1353#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1354
1355	struct dentry *ignore_gfp_highmem_file;
1356	struct dentry *ignore_gfp_wait_file;
1357	struct dentry *min_order_file;
1358
1359#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1360
1361} fail_page_alloc = {
1362	.attr = FAULT_ATTR_INITIALIZER,
1363	.ignore_gfp_wait = 1,
1364	.ignore_gfp_highmem = 1,
1365	.min_order = 1,
1366};
1367
1368static int __init setup_fail_page_alloc(char *str)
1369{
1370	return setup_fault_attr(&fail_page_alloc.attr, str);
1371}
1372__setup("fail_page_alloc=", setup_fail_page_alloc);
1373
1374static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1375{
1376	if (order < fail_page_alloc.min_order)
1377		return 0;
1378	if (gfp_mask & __GFP_NOFAIL)
1379		return 0;
1380	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1381		return 0;
1382	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1383		return 0;
1384
1385	return should_fail(&fail_page_alloc.attr, 1 << order);
1386}
1387
1388#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1389
1390static int __init fail_page_alloc_debugfs(void)
1391{
1392	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1393	struct dentry *dir;
1394	int err;
1395
1396	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1397				       "fail_page_alloc");
1398	if (err)
1399		return err;
1400	dir = fail_page_alloc.attr.dentries.dir;
1401
1402	fail_page_alloc.ignore_gfp_wait_file =
1403		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1404				      &fail_page_alloc.ignore_gfp_wait);
1405
1406	fail_page_alloc.ignore_gfp_highmem_file =
1407		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1408				      &fail_page_alloc.ignore_gfp_highmem);
1409	fail_page_alloc.min_order_file =
1410		debugfs_create_u32("min-order", mode, dir,
1411				   &fail_page_alloc.min_order);
1412
1413	if (!fail_page_alloc.ignore_gfp_wait_file ||
1414            !fail_page_alloc.ignore_gfp_highmem_file ||
1415            !fail_page_alloc.min_order_file) {
1416		err = -ENOMEM;
1417		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1418		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1419		debugfs_remove(fail_page_alloc.min_order_file);
1420		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1421	}
1422
1423	return err;
1424}
1425
1426late_initcall(fail_page_alloc_debugfs);
1427
1428#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1429
1430#else /* CONFIG_FAIL_PAGE_ALLOC */
1431
1432static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1433{
1434	return 0;
1435}
1436
1437#endif /* CONFIG_FAIL_PAGE_ALLOC */
1438
1439/*
1440 * Return 1 if free pages are above 'mark'. This takes into account the order
1441 * of the allocation.
1442 */
1443int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1444		      int classzone_idx, int alloc_flags)
1445{
1446	/* free_pages my go negative - that's OK */
1447	long min = mark;
1448	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1449	int o;
1450
1451	if (alloc_flags & ALLOC_HIGH)
1452		min -= min / 2;
1453	if (alloc_flags & ALLOC_HARDER)
1454		min -= min / 4;
1455
1456	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1457		return 0;
1458	for (o = 0; o < order; o++) {
1459		/* At the next order, this order's pages become unavailable */
1460		free_pages -= z->free_area[o].nr_free << o;
1461
1462		/* Require fewer higher order pages to be free */
1463		min >>= 1;
1464
1465		if (free_pages <= min)
1466			return 0;
1467	}
1468	return 1;
1469}
1470
1471#ifdef CONFIG_NUMA
1472/*
1473 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1474 * skip over zones that are not allowed by the cpuset, or that have
1475 * been recently (in last second) found to be nearly full.  See further
1476 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1477 * that have to skip over a lot of full or unallowed zones.
1478 *
1479 * If the zonelist cache is present in the passed in zonelist, then
1480 * returns a pointer to the allowed node mask (either the current
1481 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1482 *
1483 * If the zonelist cache is not available for this zonelist, does
1484 * nothing and returns NULL.
1485 *
1486 * If the fullzones BITMAP in the zonelist cache is stale (more than
1487 * a second since last zap'd) then we zap it out (clear its bits.)
1488 *
1489 * We hold off even calling zlc_setup, until after we've checked the
1490 * first zone in the zonelist, on the theory that most allocations will
1491 * be satisfied from that first zone, so best to examine that zone as
1492 * quickly as we can.
1493 */
1494static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1495{
1496	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1497	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1498
1499	zlc = zonelist->zlcache_ptr;
1500	if (!zlc)
1501		return NULL;
1502
1503	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1504		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1505		zlc->last_full_zap = jiffies;
1506	}
1507
1508	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1509					&cpuset_current_mems_allowed :
1510					&node_states[N_HIGH_MEMORY];
1511	return allowednodes;
1512}
1513
1514/*
1515 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1516 * if it is worth looking at further for free memory:
1517 *  1) Check that the zone isn't thought to be full (doesn't have its
1518 *     bit set in the zonelist_cache fullzones BITMAP).
1519 *  2) Check that the zones node (obtained from the zonelist_cache
1520 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1521 * Return true (non-zero) if zone is worth looking at further, or
1522 * else return false (zero) if it is not.
1523 *
1524 * This check -ignores- the distinction between various watermarks,
1525 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1526 * found to be full for any variation of these watermarks, it will
1527 * be considered full for up to one second by all requests, unless
1528 * we are so low on memory on all allowed nodes that we are forced
1529 * into the second scan of the zonelist.
1530 *
1531 * In the second scan we ignore this zonelist cache and exactly
1532 * apply the watermarks to all zones, even it is slower to do so.
1533 * We are low on memory in the second scan, and should leave no stone
1534 * unturned looking for a free page.
1535 */
1536static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1537						nodemask_t *allowednodes)
1538{
1539	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1540	int i;				/* index of *z in zonelist zones */
1541	int n;				/* node that zone *z is on */
1542
1543	zlc = zonelist->zlcache_ptr;
1544	if (!zlc)
1545		return 1;
1546
1547	i = z - zonelist->_zonerefs;
1548	n = zlc->z_to_n[i];
1549
1550	/* This zone is worth trying if it is allowed but not full */
1551	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1552}
1553
1554/*
1555 * Given 'z' scanning a zonelist, set the corresponding bit in
1556 * zlc->fullzones, so that subsequent attempts to allocate a page
1557 * from that zone don't waste time re-examining it.
1558 */
1559static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1560{
1561	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1562	int i;				/* index of *z in zonelist zones */
1563
1564	zlc = zonelist->zlcache_ptr;
1565	if (!zlc)
1566		return;
1567
1568	i = z - zonelist->_zonerefs;
1569
1570	set_bit(i, zlc->fullzones);
1571}
1572
1573#else	/* CONFIG_NUMA */
1574
1575static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1576{
1577	return NULL;
1578}
1579
1580static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1581				nodemask_t *allowednodes)
1582{
1583	return 1;
1584}
1585
1586static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1587{
1588}
1589#endif	/* CONFIG_NUMA */
1590
1591/*
1592 * get_page_from_freelist goes through the zonelist trying to allocate
1593 * a page.
1594 */
1595static struct page *
1596get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1597		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1598		struct zone *preferred_zone, int migratetype)
1599{
1600	struct zoneref *z;
1601	struct page *page = NULL;
1602	int classzone_idx;
1603	struct zone *zone;
1604	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1605	int zlc_active = 0;		/* set if using zonelist_cache */
1606	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1607
1608	classzone_idx = zone_idx(preferred_zone);
1609zonelist_scan:
1610	/*
1611	 * Scan zonelist, looking for a zone with enough free.
1612	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1613	 */
1614	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1615						high_zoneidx, nodemask) {
1616		if (NUMA_BUILD && zlc_active &&
1617			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1618				continue;
1619		if ((alloc_flags & ALLOC_CPUSET) &&
1620			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1621				goto try_next_zone;
1622
1623		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1624		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1625			unsigned long mark;
1626			int ret;
1627
1628			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1629			if (zone_watermark_ok(zone, order, mark,
1630				    classzone_idx, alloc_flags))
1631				goto try_this_zone;
1632
1633			if (zone_reclaim_mode == 0)
1634				goto this_zone_full;
1635
1636			ret = zone_reclaim(zone, gfp_mask, order);
1637			switch (ret) {
1638			case ZONE_RECLAIM_NOSCAN:
1639				/* did not scan */
1640				goto try_next_zone;
1641			case ZONE_RECLAIM_FULL:
1642				/* scanned but unreclaimable */
1643				goto this_zone_full;
1644			default:
1645				/* did we reclaim enough */
1646				if (!zone_watermark_ok(zone, order, mark,
1647						classzone_idx, alloc_flags))
1648					goto this_zone_full;
1649			}
1650		}
1651
1652try_this_zone:
1653		page = buffered_rmqueue(preferred_zone, zone, order,
1654						gfp_mask, migratetype);
1655		if (page)
1656			break;
1657this_zone_full:
1658		if (NUMA_BUILD)
1659			zlc_mark_zone_full(zonelist, z);
1660try_next_zone:
1661		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1662			/*
1663			 * we do zlc_setup after the first zone is tried but only
1664			 * if there are multiple nodes make it worthwhile
1665			 */
1666			allowednodes = zlc_setup(zonelist, alloc_flags);
1667			zlc_active = 1;
1668			did_zlc_setup = 1;
1669		}
1670	}
1671
1672	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1673		/* Disable zlc cache for second zonelist scan */
1674		zlc_active = 0;
1675		goto zonelist_scan;
1676	}
1677	return page;
1678}
1679
1680static inline int
1681should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1682				unsigned long pages_reclaimed)
1683{
1684	/* Do not loop if specifically requested */
1685	if (gfp_mask & __GFP_NORETRY)
1686		return 0;
1687
1688	/*
1689	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1690	 * means __GFP_NOFAIL, but that may not be true in other
1691	 * implementations.
1692	 */
1693	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1694		return 1;
1695
1696	/*
1697	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1698	 * specified, then we retry until we no longer reclaim any pages
1699	 * (above), or we've reclaimed an order of pages at least as
1700	 * large as the allocation's order. In both cases, if the
1701	 * allocation still fails, we stop retrying.
1702	 */
1703	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1704		return 1;
1705
1706	/*
1707	 * Don't let big-order allocations loop unless the caller
1708	 * explicitly requests that.
1709	 */
1710	if (gfp_mask & __GFP_NOFAIL)
1711		return 1;
1712
1713	return 0;
1714}
1715
1716static inline struct page *
1717__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1718	struct zonelist *zonelist, enum zone_type high_zoneidx,
1719	nodemask_t *nodemask, struct zone *preferred_zone,
1720	int migratetype)
1721{
1722	struct page *page;
1723
1724	/* Acquire the OOM killer lock for the zones in zonelist */
1725	if (!try_set_zone_oom(zonelist, gfp_mask)) {
1726		schedule_timeout_uninterruptible(1);
1727		return NULL;
1728	}
1729
1730	/*
1731	 * Go through the zonelist yet one more time, keep very high watermark
1732	 * here, this is only to catch a parallel oom killing, we must fail if
1733	 * we're still under heavy pressure.
1734	 */
1735	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1736		order, zonelist, high_zoneidx,
1737		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1738		preferred_zone, migratetype);
1739	if (page)
1740		goto out;
1741
1742	if (!(gfp_mask & __GFP_NOFAIL)) {
1743		/* The OOM killer will not help higher order allocs */
1744		if (order > PAGE_ALLOC_COSTLY_ORDER)
1745			goto out;
1746		/*
1747		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1748		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1749		 * The caller should handle page allocation failure by itself if
1750		 * it specifies __GFP_THISNODE.
1751		 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1752		 */
1753		if (gfp_mask & __GFP_THISNODE)
1754			goto out;
1755	}
1756	/* Exhausted what can be done so it's blamo time */
1757	out_of_memory(zonelist, gfp_mask, order, nodemask);
1758
1759out:
1760	clear_zonelist_oom(zonelist, gfp_mask);
1761	return page;
1762}
1763
1764#ifdef CONFIG_COMPACTION
1765/* Try memory compaction for high-order allocations before reclaim */
1766static struct page *
1767__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1768	struct zonelist *zonelist, enum zone_type high_zoneidx,
1769	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1770	int migratetype, unsigned long *did_some_progress)
1771{
1772	struct page *page;
1773
1774	if (!order || compaction_deferred(preferred_zone))
1775		return NULL;
1776
1777	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1778								nodemask);
1779	if (*did_some_progress != COMPACT_SKIPPED) {
1780
1781		/* Page migration frees to the PCP lists but we want merging */
1782		drain_pages(get_cpu());
1783		put_cpu();
1784
1785		page = get_page_from_freelist(gfp_mask, nodemask,
1786				order, zonelist, high_zoneidx,
1787				alloc_flags, preferred_zone,
1788				migratetype);
1789		if (page) {
1790			preferred_zone->compact_considered = 0;
1791			preferred_zone->compact_defer_shift = 0;
1792			count_vm_event(COMPACTSUCCESS);
1793			return page;
1794		}
1795
1796		/*
1797		 * It's bad if compaction run occurs and fails.
1798		 * The most likely reason is that pages exist,
1799		 * but not enough to satisfy watermarks.
1800		 */
1801		count_vm_event(COMPACTFAIL);
1802		defer_compaction(preferred_zone);
1803
1804		cond_resched();
1805	}
1806
1807	return NULL;
1808}
1809#else
1810static inline struct page *
1811__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1812	struct zonelist *zonelist, enum zone_type high_zoneidx,
1813	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1814	int migratetype, unsigned long *did_some_progress)
1815{
1816	return NULL;
1817}
1818#endif /* CONFIG_COMPACTION */
1819
1820/* The really slow allocator path where we enter direct reclaim */
1821static inline struct page *
1822__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1823	struct zonelist *zonelist, enum zone_type high_zoneidx,
1824	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1825	int migratetype, unsigned long *did_some_progress)
1826{
1827	struct page *page = NULL;
1828	struct reclaim_state reclaim_state;
1829	struct task_struct *p = current;
1830
1831	cond_resched();
1832
1833	/* We now go into synchronous reclaim */
1834	cpuset_memory_pressure_bump();
1835	p->flags |= PF_MEMALLOC;
1836	lockdep_set_current_reclaim_state(gfp_mask);
1837	reclaim_state.reclaimed_slab = 0;
1838	p->reclaim_state = &reclaim_state;
1839
1840	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1841
1842	p->reclaim_state = NULL;
1843	lockdep_clear_current_reclaim_state();
1844	p->flags &= ~PF_MEMALLOC;
1845
1846	cond_resched();
1847
1848	if (order != 0)
1849		drain_all_pages();
1850
1851	if (likely(*did_some_progress))
1852		page = get_page_from_freelist(gfp_mask, nodemask, order,
1853					zonelist, high_zoneidx,
1854					alloc_flags, preferred_zone,
1855					migratetype);
1856	return page;
1857}
1858
1859/*
1860 * This is called in the allocator slow-path if the allocation request is of
1861 * sufficient urgency to ignore watermarks and take other desperate measures
1862 */
1863static inline struct page *
1864__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1865	struct zonelist *zonelist, enum zone_type high_zoneidx,
1866	nodemask_t *nodemask, struct zone *preferred_zone,
1867	int migratetype)
1868{
1869	struct page *page;
1870
1871	do {
1872		page = get_page_from_freelist(gfp_mask, nodemask, order,
1873			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1874			preferred_zone, migratetype);
1875
1876		if (!page && gfp_mask & __GFP_NOFAIL)
1877			congestion_wait(BLK_RW_ASYNC, HZ/50);
1878	} while (!page && (gfp_mask & __GFP_NOFAIL));
1879
1880	return page;
1881}
1882
1883static inline
1884void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1885						enum zone_type high_zoneidx)
1886{
1887	struct zoneref *z;
1888	struct zone *zone;
1889
1890	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1891		wakeup_kswapd(zone, order);
1892}
1893
1894static inline int
1895gfp_to_alloc_flags(gfp_t gfp_mask)
1896{
1897	struct task_struct *p = current;
1898	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1899	const gfp_t wait = gfp_mask & __GFP_WAIT;
1900
1901	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1902	BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1903
1904	/*
1905	 * The caller may dip into page reserves a bit more if the caller
1906	 * cannot run direct reclaim, or if the caller has realtime scheduling
1907	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1908	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1909	 */
1910	alloc_flags |= (gfp_mask & __GFP_HIGH);
1911
1912	if (!wait) {
1913		alloc_flags |= ALLOC_HARDER;
1914		/*
1915		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1916		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1917		 */
1918		alloc_flags &= ~ALLOC_CPUSET;
1919	} else if (unlikely(rt_task(p)) && !in_interrupt())
1920		alloc_flags |= ALLOC_HARDER;
1921
1922	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1923		if (!in_interrupt() &&
1924		    ((p->flags & PF_MEMALLOC) ||
1925		     unlikely(test_thread_flag(TIF_MEMDIE))))
1926			alloc_flags |= ALLOC_NO_WATERMARKS;
1927	}
1928
1929	return alloc_flags;
1930}
1931
1932static inline struct page *
1933__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1934	struct zonelist *zonelist, enum zone_type high_zoneidx,
1935	nodemask_t *nodemask, struct zone *preferred_zone,
1936	int migratetype)
1937{
1938	const gfp_t wait = gfp_mask & __GFP_WAIT;
1939	struct page *page = NULL;
1940	int alloc_flags;
1941	unsigned long pages_reclaimed = 0;
1942	unsigned long did_some_progress;
1943	struct task_struct *p = current;
1944
1945	/*
1946	 * In the slowpath, we sanity check order to avoid ever trying to
1947	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1948	 * be using allocators in order of preference for an area that is
1949	 * too large.
1950	 */
1951	if (order >= MAX_ORDER) {
1952		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1953		return NULL;
1954	}
1955
1956	/*
1957	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1958	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1959	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1960	 * using a larger set of nodes after it has established that the
1961	 * allowed per node queues are empty and that nodes are
1962	 * over allocated.
1963	 */
1964	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1965		goto nopage;
1966
1967restart:
1968	wake_all_kswapd(order, zonelist, high_zoneidx);
1969
1970	/*
1971	 * OK, we're below the kswapd watermark and have kicked background
1972	 * reclaim. Now things get more complex, so set up alloc_flags according
1973	 * to how we want to proceed.
1974	 */
1975	alloc_flags = gfp_to_alloc_flags(gfp_mask);
1976
1977	/* This is the last chance, in general, before the goto nopage. */
1978	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1979			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1980			preferred_zone, migratetype);
1981	if (page)
1982		goto got_pg;
1983
1984rebalance:
1985	/* Allocate without watermarks if the context allows */
1986	if (alloc_flags & ALLOC_NO_WATERMARKS) {
1987		page = __alloc_pages_high_priority(gfp_mask, order,
1988				zonelist, high_zoneidx, nodemask,
1989				preferred_zone, migratetype);
1990		if (page)
1991			goto got_pg;
1992	}
1993
1994	/* Atomic allocations - we can't balance anything */
1995	if (!wait)
1996		goto nopage;
1997
1998	/* Avoid recursion of direct reclaim */
1999	if (p->flags & PF_MEMALLOC)
2000		goto nopage;
2001
2002	/* Avoid allocations with no watermarks from looping endlessly */
2003	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2004		goto nopage;
2005
2006	/* Try direct compaction */
2007	page = __alloc_pages_direct_compact(gfp_mask, order,
2008					zonelist, high_zoneidx,
2009					nodemask,
2010					alloc_flags, preferred_zone,
2011					migratetype, &did_some_progress);
2012	if (page)
2013		goto got_pg;
2014
2015	/* Try direct reclaim and then allocating */
2016	page = __alloc_pages_direct_reclaim(gfp_mask, order,
2017					zonelist, high_zoneidx,
2018					nodemask,
2019					alloc_flags, preferred_zone,
2020					migratetype, &did_some_progress);
2021	if (page)
2022		goto got_pg;
2023
2024	/*
2025	 * If we failed to make any progress reclaiming, then we are
2026	 * running out of options and have to consider going OOM
2027	 */
2028	if (!did_some_progress) {
2029		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2030			if (oom_killer_disabled)
2031				goto nopage;
2032			page = __alloc_pages_may_oom(gfp_mask, order,
2033					zonelist, high_zoneidx,
2034					nodemask, preferred_zone,
2035					migratetype);
2036			if (page)
2037				goto got_pg;
2038
2039			/*
2040			 * The OOM killer does not trigger for high-order
2041			 * ~__GFP_NOFAIL allocations so if no progress is being
2042			 * made, there are no other options and retrying is
2043			 * unlikely to help.
2044			 */
2045			if (order > PAGE_ALLOC_COSTLY_ORDER &&
2046						!(gfp_mask & __GFP_NOFAIL))
2047				goto nopage;
2048
2049			goto restart;
2050		}
2051	}
2052
2053	/* Check if we should retry the allocation */
2054	pages_reclaimed += did_some_progress;
2055	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2056		/* Wait for some write requests to complete then retry */
2057		congestion_wait(BLK_RW_ASYNC, HZ/50);
2058		goto rebalance;
2059	}
2060
2061nopage:
2062	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2063		printk(KERN_WARNING "%s: page allocation failure."
2064			" order:%d, mode:0x%x\n",
2065			p->comm, order, gfp_mask);
2066		dump_stack();
2067		show_mem();
2068	}
2069	return page;
2070got_pg:
2071	if (kmemcheck_enabled)
2072		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2073	return page;
2074
2075}
2076
2077/*
2078 * This is the 'heart' of the zoned buddy allocator.
2079 */
2080struct page *
2081__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2082			struct zonelist *zonelist, nodemask_t *nodemask)
2083{
2084	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2085	struct zone *preferred_zone;
2086	struct page *page;
2087	int migratetype = allocflags_to_migratetype(gfp_mask);
2088
2089	gfp_mask &= gfp_allowed_mask;
2090
2091	lockdep_trace_alloc(gfp_mask);
2092
2093	might_sleep_if(gfp_mask & __GFP_WAIT);
2094
2095	if (should_fail_alloc_page(gfp_mask, order))
2096		return NULL;
2097
2098	/*
2099	 * Check the zones suitable for the gfp_mask contain at least one
2100	 * valid zone. It's possible to have an empty zonelist as a result
2101	 * of GFP_THISNODE and a memoryless node
2102	 */
2103	if (unlikely(!zonelist->_zonerefs->zone))
2104		return NULL;
2105
2106	get_mems_allowed();
2107	/* The preferred zone is used for statistics later */
2108	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
2109	if (!preferred_zone) {
2110		put_mems_allowed();
2111		return NULL;
2112	}
2113
2114	/* First allocation attempt */
2115	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2116			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2117			preferred_zone, migratetype);
2118	if (unlikely(!page))
2119		page = __alloc_pages_slowpath(gfp_mask, order,
2120				zonelist, high_zoneidx, nodemask,
2121				preferred_zone, migratetype);
2122	put_mems_allowed();
2123
2124	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2125	return page;
2126}
2127EXPORT_SYMBOL(__alloc_pages_nodemask);
2128
2129/*
2130 * Common helper functions.
2131 */
2132unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2133{
2134	struct page *page;
2135
2136	/*
2137	 * __get_free_pages() returns a 32-bit address, which cannot represent
2138	 * a highmem page
2139	 */
2140	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2141
2142	page = alloc_pages(gfp_mask, order);
2143	if (!page)
2144		return 0;
2145	return (unsigned long) page_address(page);
2146}
2147EXPORT_SYMBOL(__get_free_pages);
2148
2149unsigned long get_zeroed_page(gfp_t gfp_mask)
2150{
2151	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2152}
2153EXPORT_SYMBOL(get_zeroed_page);
2154
2155void __pagevec_free(struct pagevec *pvec)
2156{
2157	int i = pagevec_count(pvec);
2158
2159	while (--i >= 0) {
2160		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2161		free_hot_cold_page(pvec->pages[i], pvec->cold);
2162	}
2163}
2164
2165void __free_pages(struct page *page, unsigned int order)
2166{
2167	if (put_page_testzero(page)) {
2168		if (order == 0)
2169			free_hot_cold_page(page, 0);
2170		else
2171			__free_pages_ok(page, order);
2172	}
2173}
2174
2175EXPORT_SYMBOL(__free_pages);
2176
2177void free_pages(unsigned long addr, unsigned int order)
2178{
2179	if (addr != 0) {
2180		VM_BUG_ON(!virt_addr_valid((void *)addr));
2181		__free_pages(virt_to_page((void *)addr), order);
2182	}
2183}
2184
2185EXPORT_SYMBOL(free_pages);
2186
2187/**
2188 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2189 * @size: the number of bytes to allocate
2190 * @gfp_mask: GFP flags for the allocation
2191 *
2192 * This function is similar to alloc_pages(), except that it allocates the
2193 * minimum number of pages to satisfy the request.  alloc_pages() can only
2194 * allocate memory in power-of-two pages.
2195 *
2196 * This function is also limited by MAX_ORDER.
2197 *
2198 * Memory allocated by this function must be released by free_pages_exact().
2199 */
2200void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2201{
2202	unsigned int order = get_order(size);
2203	unsigned long addr;
2204
2205	addr = __get_free_pages(gfp_mask, order);
2206	if (addr) {
2207		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2208		unsigned long used = addr + PAGE_ALIGN(size);
2209
2210		split_page(virt_to_page((void *)addr), order);
2211		while (used < alloc_end) {
2212			free_page(used);
2213			used += PAGE_SIZE;
2214		}
2215	}
2216
2217	return (void *)addr;
2218}
2219EXPORT_SYMBOL(alloc_pages_exact);
2220
2221/**
2222 * free_pages_exact - release memory allocated via alloc_pages_exact()
2223 * @virt: the value returned by alloc_pages_exact.
2224 * @size: size of allocation, same value as passed to alloc_pages_exact().
2225 *
2226 * Release the memory allocated by a previous call to alloc_pages_exact.
2227 */
2228void free_pages_exact(void *virt, size_t size)
2229{
2230	unsigned long addr = (unsigned long)virt;
2231	unsigned long end = addr + PAGE_ALIGN(size);
2232
2233	while (addr < end) {
2234		free_page(addr);
2235		addr += PAGE_SIZE;
2236	}
2237}
2238EXPORT_SYMBOL(free_pages_exact);
2239
2240static unsigned int nr_free_zone_pages(int offset)
2241{
2242	struct zoneref *z;
2243	struct zone *zone;
2244
2245	/* Just pick one node, since fallback list is circular */
2246	unsigned int sum = 0;
2247
2248	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2249
2250	for_each_zone_zonelist(zone, z, zonelist, offset) {
2251		unsigned long size = zone->present_pages;
2252		unsigned long high = high_wmark_pages(zone);
2253		if (size > high)
2254			sum += size - high;
2255	}
2256
2257	return sum;
2258}
2259
2260/*
2261 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2262 */
2263unsigned int nr_free_buffer_pages(void)
2264{
2265	return nr_free_zone_pages(gfp_zone(GFP_USER));
2266}
2267EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2268
2269/*
2270 * Amount of free RAM allocatable within all zones
2271 */
2272unsigned int nr_free_pagecache_pages(void)
2273{
2274	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2275}
2276
2277static inline void show_node(struct zone *zone)
2278{
2279	if (NUMA_BUILD)
2280		printk("Node %d ", zone_to_nid(zone));
2281}
2282
2283void si_meminfo(struct sysinfo *val)
2284{
2285	val->totalram = totalram_pages;
2286	val->sharedram = 0;
2287	val->freeram = global_page_state(NR_FREE_PAGES);
2288	val->bufferram = nr_blockdev_pages();
2289	val->totalhigh = totalhigh_pages;
2290	val->freehigh = nr_free_highpages();
2291	val->mem_unit = PAGE_SIZE;
2292}
2293
2294EXPORT_SYMBOL(si_meminfo);
2295
2296#ifdef CONFIG_NUMA
2297void si_meminfo_node(struct sysinfo *val, int nid)
2298{
2299	pg_data_t *pgdat = NODE_DATA(nid);
2300
2301	val->totalram = pgdat->node_present_pages;
2302	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2303#ifdef CONFIG_HIGHMEM
2304	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2305	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2306			NR_FREE_PAGES);
2307#else
2308	val->totalhigh = 0;
2309	val->freehigh = 0;
2310#endif
2311	val->mem_unit = PAGE_SIZE;
2312}
2313#endif
2314
2315#define K(x) ((x) << (PAGE_SHIFT-10))
2316
2317/*
2318 * Show free area list (used inside shift_scroll-lock stuff)
2319 * We also calculate the percentage fragmentation. We do this by counting the
2320 * memory on each free list with the exception of the first item on the list.
2321 */
2322void show_free_areas(void)
2323{
2324	int cpu;
2325	struct zone *zone;
2326
2327	for_each_populated_zone(zone) {
2328		show_node(zone);
2329		printk("%s per-cpu:\n", zone->name);
2330
2331		for_each_online_cpu(cpu) {
2332			struct per_cpu_pageset *pageset;
2333
2334			pageset = per_cpu_ptr(zone->pageset, cpu);
2335
2336			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2337			       cpu, pageset->pcp.high,
2338			       pageset->pcp.batch, pageset->pcp.count);
2339		}
2340	}
2341
2342	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2343		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2344		" unevictable:%lu"
2345		" dirty:%lu writeback:%lu unstable:%lu\n"
2346		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2347		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2348		global_page_state(NR_ACTIVE_ANON),
2349		global_page_state(NR_INACTIVE_ANON),
2350		global_page_state(NR_ISOLATED_ANON),
2351		global_page_state(NR_ACTIVE_FILE),
2352		global_page_state(NR_INACTIVE_FILE),
2353		global_page_state(NR_ISOLATED_FILE),
2354		global_page_state(NR_UNEVICTABLE),
2355		global_page_state(NR_FILE_DIRTY),
2356		global_page_state(NR_WRITEBACK),
2357		global_page_state(NR_UNSTABLE_NFS),
2358		global_page_state(NR_FREE_PAGES),
2359		global_page_state(NR_SLAB_RECLAIMABLE),
2360		global_page_state(NR_SLAB_UNRECLAIMABLE),
2361		global_page_state(NR_FILE_MAPPED),
2362		global_page_state(NR_SHMEM),
2363		global_page_state(NR_PAGETABLE),
2364		global_page_state(NR_BOUNCE));
2365
2366	for_each_populated_zone(zone) {
2367		int i;
2368
2369		show_node(zone);
2370		printk("%s"
2371			" free:%lukB"
2372			" min:%lukB"
2373			" low:%lukB"
2374			" high:%lukB"
2375			" active_anon:%lukB"
2376			" inactive_anon:%lukB"
2377			" active_file:%lukB"
2378			" inactive_file:%lukB"
2379			" unevictable:%lukB"
2380			" isolated(anon):%lukB"
2381			" isolated(file):%lukB"
2382			" present:%lukB"
2383			" mlocked:%lukB"
2384			" dirty:%lukB"
2385			" writeback:%lukB"
2386			" mapped:%lukB"
2387			" shmem:%lukB"
2388			" slab_reclaimable:%lukB"
2389			" slab_unreclaimable:%lukB"
2390			" kernel_stack:%lukB"
2391			" pagetables:%lukB"
2392			" unstable:%lukB"
2393			" bounce:%lukB"
2394			" writeback_tmp:%lukB"
2395			" pages_scanned:%lu"
2396			" all_unreclaimable? %s"
2397			"\n",
2398			zone->name,
2399			K(zone_page_state(zone, NR_FREE_PAGES)),
2400			K(min_wmark_pages(zone)),
2401			K(low_wmark_pages(zone)),
2402			K(high_wmark_pages(zone)),
2403			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2404			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2405			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2406			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2407			K(zone_page_state(zone, NR_UNEVICTABLE)),
2408			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2409			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2410			K(zone->present_pages),
2411			K(zone_page_state(zone, NR_MLOCK)),
2412			K(zone_page_state(zone, NR_FILE_DIRTY)),
2413			K(zone_page_state(zone, NR_WRITEBACK)),
2414			K(zone_page_state(zone, NR_FILE_MAPPED)),
2415			K(zone_page_state(zone, NR_SHMEM)),
2416			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2417			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2418			zone_page_state(zone, NR_KERNEL_STACK) *
2419				THREAD_SIZE / 1024,
2420			K(zone_page_state(zone, NR_PAGETABLE)),
2421			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2422			K(zone_page_state(zone, NR_BOUNCE)),
2423			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2424			zone->pages_scanned,
2425			(zone->all_unreclaimable ? "yes" : "no")
2426			);
2427		printk("lowmem_reserve[]:");
2428		for (i = 0; i < MAX_NR_ZONES; i++)
2429			printk(" %lu", zone->lowmem_reserve[i]);
2430		printk("\n");
2431	}
2432
2433	for_each_populated_zone(zone) {
2434 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2435
2436		show_node(zone);
2437		printk("%s: ", zone->name);
2438
2439		spin_lock_irqsave(&zone->lock, flags);
2440		for (order = 0; order < MAX_ORDER; order++) {
2441			nr[order] = zone->free_area[order].nr_free;
2442			total += nr[order] << order;
2443		}
2444		spin_unlock_irqrestore(&zone->lock, flags);
2445		for (order = 0; order < MAX_ORDER; order++)
2446			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2447		printk("= %lukB\n", K(total));
2448	}
2449
2450	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2451
2452	show_swap_cache_info();
2453}
2454
2455static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2456{
2457	zoneref->zone = zone;
2458	zoneref->zone_idx = zone_idx(zone);
2459}
2460
2461/*
2462 * Builds allocation fallback zone lists.
2463 *
2464 * Add all populated zones of a node to the zonelist.
2465 */
2466static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2467				int nr_zones, enum zone_type zone_type)
2468{
2469	struct zone *zone;
2470
2471	BUG_ON(zone_type >= MAX_NR_ZONES);
2472	zone_type++;
2473
2474	do {
2475		zone_type--;
2476		zone = pgdat->node_zones + zone_type;
2477		if (populated_zone(zone)) {
2478			zoneref_set_zone(zone,
2479				&zonelist->_zonerefs[nr_zones++]);
2480			check_highest_zone(zone_type);
2481		}
2482
2483	} while (zone_type);
2484	return nr_zones;
2485}
2486
2487
2488/*
2489 *  zonelist_order:
2490 *  0 = automatic detection of better ordering.
2491 *  1 = order by ([node] distance, -zonetype)
2492 *  2 = order by (-zonetype, [node] distance)
2493 *
2494 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2495 *  the same zonelist. So only NUMA can configure this param.
2496 */
2497#define ZONELIST_ORDER_DEFAULT  0
2498#define ZONELIST_ORDER_NODE     1
2499#define ZONELIST_ORDER_ZONE     2
2500
2501/* zonelist order in the kernel.
2502 * set_zonelist_order() will set this to NODE or ZONE.
2503 */
2504static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2505static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2506
2507
2508#ifdef CONFIG_NUMA
2509/* The value user specified ....changed by config */
2510static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2511/* string for sysctl */
2512#define NUMA_ZONELIST_ORDER_LEN	16
2513char numa_zonelist_order[16] = "default";
2514
2515/*
2516 * interface for configure zonelist ordering.
2517 * command line option "numa_zonelist_order"
2518 *	= "[dD]efault	- default, automatic configuration.
2519 *	= "[nN]ode 	- order by node locality, then by zone within node
2520 *	= "[zZ]one      - order by zone, then by locality within zone
2521 */
2522
2523static int __parse_numa_zonelist_order(char *s)
2524{
2525	if (*s == 'd' || *s == 'D') {
2526		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2527	} else if (*s == 'n' || *s == 'N') {
2528		user_zonelist_order = ZONELIST_ORDER_NODE;
2529	} else if (*s == 'z' || *s == 'Z') {
2530		user_zonelist_order = ZONELIST_ORDER_ZONE;
2531	} else {
2532		printk(KERN_WARNING
2533			"Ignoring invalid numa_zonelist_order value:  "
2534			"%s\n", s);
2535		return -EINVAL;
2536	}
2537	return 0;
2538}
2539
2540static __init int setup_numa_zonelist_order(char *s)
2541{
2542	if (s)
2543		return __parse_numa_zonelist_order(s);
2544	return 0;
2545}
2546early_param("numa_zonelist_order", setup_numa_zonelist_order);
2547
2548/*
2549 * sysctl handler for numa_zonelist_order
2550 */
2551int numa_zonelist_order_handler(ctl_table *table, int write,
2552		void __user *buffer, size_t *length,
2553		loff_t *ppos)
2554{
2555	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2556	int ret;
2557	static DEFINE_MUTEX(zl_order_mutex);
2558
2559	mutex_lock(&zl_order_mutex);
2560	if (write)
2561		strcpy(saved_string, (char*)table->data);
2562	ret = proc_dostring(table, write, buffer, length, ppos);
2563	if (ret)
2564		goto out;
2565	if (write) {
2566		int oldval = user_zonelist_order;
2567		if (__parse_numa_zonelist_order((char*)table->data)) {
2568			/*
2569			 * bogus value.  restore saved string
2570			 */
2571			strncpy((char*)table->data, saved_string,
2572				NUMA_ZONELIST_ORDER_LEN);
2573			user_zonelist_order = oldval;
2574		} else if (oldval != user_zonelist_order)
2575			build_all_zonelists(NULL);
2576	}
2577out:
2578	mutex_unlock(&zl_order_mutex);
2579	return ret;
2580}
2581
2582
2583#define MAX_NODE_LOAD (nr_online_nodes)
2584static int node_load[MAX_NUMNODES];
2585
2586/**
2587 * find_next_best_node - find the next node that should appear in a given node's fallback list
2588 * @node: node whose fallback list we're appending
2589 * @used_node_mask: nodemask_t of already used nodes
2590 *
2591 * We use a number of factors to determine which is the next node that should
2592 * appear on a given node's fallback list.  The node should not have appeared
2593 * already in @node's fallback list, and it should be the next closest node
2594 * according to the distance array (which contains arbitrary distance values
2595 * from each node to each node in the system), and should also prefer nodes
2596 * with no CPUs, since presumably they'll have very little allocation pressure
2597 * on them otherwise.
2598 * It returns -1 if no node is found.
2599 */
2600static int find_next_best_node(int node, nodemask_t *used_node_mask)
2601{
2602	int n, val;
2603	int min_val = INT_MAX;
2604	int best_node = -1;
2605	const struct cpumask *tmp = cpumask_of_node(0);
2606
2607	/* Use the local node if we haven't already */
2608	if (!node_isset(node, *used_node_mask)) {
2609		node_set(node, *used_node_mask);
2610		return node;
2611	}
2612
2613	for_each_node_state(n, N_HIGH_MEMORY) {
2614
2615		/* Don't want a node to appear more than once */
2616		if (node_isset(n, *used_node_mask))
2617			continue;
2618
2619		/* Use the distance array to find the distance */
2620		val = node_distance(node, n);
2621
2622		/* Penalize nodes under us ("prefer the next node") */
2623		val += (n < node);
2624
2625		/* Give preference to headless and unused nodes */
2626		tmp = cpumask_of_node(n);
2627		if (!cpumask_empty(tmp))
2628			val += PENALTY_FOR_NODE_WITH_CPUS;
2629
2630		/* Slight preference for less loaded node */
2631		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2632		val += node_load[n];
2633
2634		if (val < min_val) {
2635			min_val = val;
2636			best_node = n;
2637		}
2638	}
2639
2640	if (best_node >= 0)
2641		node_set(best_node, *used_node_mask);
2642
2643	return best_node;
2644}
2645
2646
2647/*
2648 * Build zonelists ordered by node and zones within node.
2649 * This results in maximum locality--normal zone overflows into local
2650 * DMA zone, if any--but risks exhausting DMA zone.
2651 */
2652static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2653{
2654	int j;
2655	struct zonelist *zonelist;
2656
2657	zonelist = &pgdat->node_zonelists[0];
2658	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2659		;
2660	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2661							MAX_NR_ZONES - 1);
2662	zonelist->_zonerefs[j].zone = NULL;
2663	zonelist->_zonerefs[j].zone_idx = 0;
2664}
2665
2666/*
2667 * Build gfp_thisnode zonelists
2668 */
2669static void build_thisnode_zonelists(pg_data_t *pgdat)
2670{
2671	int j;
2672	struct zonelist *zonelist;
2673
2674	zonelist = &pgdat->node_zonelists[1];
2675	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2676	zonelist->_zonerefs[j].zone = NULL;
2677	zonelist->_zonerefs[j].zone_idx = 0;
2678}
2679
2680/*
2681 * Build zonelists ordered by zone and nodes within zones.
2682 * This results in conserving DMA zone[s] until all Normal memory is
2683 * exhausted, but results in overflowing to remote node while memory
2684 * may still exist in local DMA zone.
2685 */
2686static int node_order[MAX_NUMNODES];
2687
2688static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2689{
2690	int pos, j, node;
2691	int zone_type;		/* needs to be signed */
2692	struct zone *z;
2693	struct zonelist *zonelist;
2694
2695	zonelist = &pgdat->node_zonelists[0];
2696	pos = 0;
2697	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2698		for (j = 0; j < nr_nodes; j++) {
2699			node = node_order[j];
2700			z = &NODE_DATA(node)->node_zones[zone_type];
2701			if (populated_zone(z)) {
2702				zoneref_set_zone(z,
2703					&zonelist->_zonerefs[pos++]);
2704				check_highest_zone(zone_type);
2705			}
2706		}
2707	}
2708	zonelist->_zonerefs[pos].zone = NULL;
2709	zonelist->_zonerefs[pos].zone_idx = 0;
2710}
2711
2712static int default_zonelist_order(void)
2713{
2714	int nid, zone_type;
2715	unsigned long low_kmem_size,total_size;
2716	struct zone *z;
2717	int average_size;
2718	/*
2719         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
2720	 * If they are really small and used heavily, the system can fall
2721	 * into OOM very easily.
2722	 * This function detect ZONE_DMA/DMA32 size and configures zone order.
2723	 */
2724	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2725	low_kmem_size = 0;
2726	total_size = 0;
2727	for_each_online_node(nid) {
2728		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2729			z = &NODE_DATA(nid)->node_zones[zone_type];
2730			if (populated_zone(z)) {
2731				if (zone_type < ZONE_NORMAL)
2732					low_kmem_size += z->present_pages;
2733				total_size += z->present_pages;
2734			} else if (zone_type == ZONE_NORMAL) {
2735				/*
2736				 * If any node has only lowmem, then node order
2737				 * is preferred to allow kernel allocations
2738				 * locally; otherwise, they can easily infringe
2739				 * on other nodes when there is an abundance of
2740				 * lowmem available to allocate from.
2741				 */
2742				return ZONELIST_ORDER_NODE;
2743			}
2744		}
2745	}
2746	if (!low_kmem_size ||  /* there are no DMA area. */
2747	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2748		return ZONELIST_ORDER_NODE;
2749	/*
2750	 * look into each node's config.
2751  	 * If there is a node whose DMA/DMA32 memory is very big area on
2752 	 * local memory, NODE_ORDER may be suitable.
2753         */
2754	average_size = total_size /
2755				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2756	for_each_online_node(nid) {
2757		low_kmem_size = 0;
2758		total_size = 0;
2759		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2760			z = &NODE_DATA(nid)->node_zones[zone_type];
2761			if (populated_zone(z)) {
2762				if (zone_type < ZONE_NORMAL)
2763					low_kmem_size += z->present_pages;
2764				total_size += z->present_pages;
2765			}
2766		}
2767		if (low_kmem_size &&
2768		    total_size > average_size && /* ignore small node */
2769		    low_kmem_size > total_size * 70/100)
2770			return ZONELIST_ORDER_NODE;
2771	}
2772	return ZONELIST_ORDER_ZONE;
2773}
2774
2775static void set_zonelist_order(void)
2776{
2777	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2778		current_zonelist_order = default_zonelist_order();
2779	else
2780		current_zonelist_order = user_zonelist_order;
2781}
2782
2783static void build_zonelists(pg_data_t *pgdat)
2784{
2785	int j, node, load;
2786	enum zone_type i;
2787	nodemask_t used_mask;
2788	int local_node, prev_node;
2789	struct zonelist *zonelist;
2790	int order = current_zonelist_order;
2791
2792	/* initialize zonelists */
2793	for (i = 0; i < MAX_ZONELISTS; i++) {
2794		zonelist = pgdat->node_zonelists + i;
2795		zonelist->_zonerefs[0].zone = NULL;
2796		zonelist->_zonerefs[0].zone_idx = 0;
2797	}
2798
2799	/* NUMA-aware ordering of nodes */
2800	local_node = pgdat->node_id;
2801	load = nr_online_nodes;
2802	prev_node = local_node;
2803	nodes_clear(used_mask);
2804
2805	memset(node_order, 0, sizeof(node_order));
2806	j = 0;
2807
2808	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2809		int distance = node_distance(local_node, node);
2810
2811		/*
2812		 * If another node is sufficiently far away then it is better
2813		 * to reclaim pages in a zone before going off node.
2814		 */
2815		if (distance > RECLAIM_DISTANCE)
2816			zone_reclaim_mode = 1;
2817
2818		/*
2819		 * We don't want to pressure a particular node.
2820		 * So adding penalty to the first node in same
2821		 * distance group to make it round-robin.
2822		 */
2823		if (distance != node_distance(local_node, prev_node))
2824			node_load[node] = load;
2825
2826		prev_node = node;
2827		load--;
2828		if (order == ZONELIST_ORDER_NODE)
2829			build_zonelists_in_node_order(pgdat, node);
2830		else
2831			node_order[j++] = node;	/* remember order */
2832	}
2833
2834	if (order == ZONELIST_ORDER_ZONE) {
2835		/* calculate node order -- i.e., DMA last! */
2836		build_zonelists_in_zone_order(pgdat, j);
2837	}
2838
2839	build_thisnode_zonelists(pgdat);
2840}
2841
2842/* Construct the zonelist performance cache - see further mmzone.h */
2843static void build_zonelist_cache(pg_data_t *pgdat)
2844{
2845	struct zonelist *zonelist;
2846	struct zonelist_cache *zlc;
2847	struct zoneref *z;
2848
2849	zonelist = &pgdat->node_zonelists[0];
2850	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2851	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2852	for (z = zonelist->_zonerefs; z->zone; z++)
2853		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2854}
2855
2856
2857#else	/* CONFIG_NUMA */
2858
2859static void set_zonelist_order(void)
2860{
2861	current_zonelist_order = ZONELIST_ORDER_ZONE;
2862}
2863
2864static void build_zonelists(pg_data_t *pgdat)
2865{
2866	int node, local_node;
2867	enum zone_type j;
2868	struct zonelist *zonelist;
2869
2870	local_node = pgdat->node_id;
2871
2872	zonelist = &pgdat->node_zonelists[0];
2873	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2874
2875	/*
2876	 * Now we build the zonelist so that it contains the zones
2877	 * of all the other nodes.
2878	 * We don't want to pressure a particular node, so when
2879	 * building the zones for node N, we make sure that the
2880	 * zones coming right after the local ones are those from
2881	 * node N+1 (modulo N)
2882	 */
2883	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2884		if (!node_online(node))
2885			continue;
2886		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2887							MAX_NR_ZONES - 1);
2888	}
2889	for (node = 0; node < local_node; node++) {
2890		if (!node_online(node))
2891			continue;
2892		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2893							MAX_NR_ZONES - 1);
2894	}
2895
2896	zonelist->_zonerefs[j].zone = NULL;
2897	zonelist->_zonerefs[j].zone_idx = 0;
2898}
2899
2900/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2901static void build_zonelist_cache(pg_data_t *pgdat)
2902{
2903	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2904}
2905
2906#endif	/* CONFIG_NUMA */
2907
2908/*
2909 * Boot pageset table. One per cpu which is going to be used for all
2910 * zones and all nodes. The parameters will be set in such a way
2911 * that an item put on a list will immediately be handed over to
2912 * the buddy list. This is safe since pageset manipulation is done
2913 * with interrupts disabled.
2914 *
2915 * The boot_pagesets must be kept even after bootup is complete for
2916 * unused processors and/or zones. They do play a role for bootstrapping
2917 * hotplugged processors.
2918 *
2919 * zoneinfo_show() and maybe other functions do
2920 * not check if the processor is online before following the pageset pointer.
2921 * Other parts of the kernel may not check if the zone is available.
2922 */
2923static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
2924static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
2925static void setup_zone_pageset(struct zone *zone);
2926
2927/* return values int ....just for stop_machine() */
2928static __init_refok int __build_all_zonelists(void *data)
2929{
2930	int nid;
2931	int cpu;
2932
2933#ifdef CONFIG_NUMA
2934	memset(node_load, 0, sizeof(node_load));
2935#endif
2936	for_each_online_node(nid) {
2937		pg_data_t *pgdat = NODE_DATA(nid);
2938
2939		build_zonelists(pgdat);
2940		build_zonelist_cache(pgdat);
2941	}
2942
2943#ifdef CONFIG_MEMORY_HOTPLUG
2944	/* Setup real pagesets for the new zone */
2945	if (data) {
2946		struct zone *zone = data;
2947		setup_zone_pageset(zone);
2948	}
2949#endif
2950
2951	/*
2952	 * Initialize the boot_pagesets that are going to be used
2953	 * for bootstrapping processors. The real pagesets for
2954	 * each zone will be allocated later when the per cpu
2955	 * allocator is available.
2956	 *
2957	 * boot_pagesets are used also for bootstrapping offline
2958	 * cpus if the system is already booted because the pagesets
2959	 * are needed to initialize allocators on a specific cpu too.
2960	 * F.e. the percpu allocator needs the page allocator which
2961	 * needs the percpu allocator in order to allocate its pagesets
2962	 * (a chicken-egg dilemma).
2963	 */
2964	for_each_possible_cpu(cpu)
2965		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
2966
2967	return 0;
2968}
2969
2970void build_all_zonelists(void *data)
2971{
2972	set_zonelist_order();
2973
2974	if (system_state == SYSTEM_BOOTING) {
2975		__build_all_zonelists(NULL);
2976		mminit_verify_zonelist();
2977		cpuset_init_current_mems_allowed();
2978	} else {
2979		/* we have to stop all cpus to guarantee there is no user
2980		   of zonelist */
2981		stop_machine(__build_all_zonelists, data, NULL);
2982		/* cpuset refresh routine should be here */
2983	}
2984	vm_total_pages = nr_free_pagecache_pages();
2985	/*
2986	 * Disable grouping by mobility if the number of pages in the
2987	 * system is too low to allow the mechanism to work. It would be
2988	 * more accurate, but expensive to check per-zone. This check is
2989	 * made on memory-hotadd so a system can start with mobility
2990	 * disabled and enable it later
2991	 */
2992	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2993		page_group_by_mobility_disabled = 1;
2994	else
2995		page_group_by_mobility_disabled = 0;
2996
2997	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2998		"Total pages: %ld\n",
2999			nr_online_nodes,
3000			zonelist_order_name[current_zonelist_order],
3001			page_group_by_mobility_disabled ? "off" : "on",
3002			vm_total_pages);
3003#ifdef CONFIG_NUMA
3004	printk("Policy zone: %s\n", zone_names[policy_zone]);
3005#endif
3006}
3007
3008/*
3009 * Helper functions to size the waitqueue hash table.
3010 * Essentially these want to choose hash table sizes sufficiently
3011 * large so that collisions trying to wait on pages are rare.
3012 * But in fact, the number of active page waitqueues on typical
3013 * systems is ridiculously low, less than 200. So this is even
3014 * conservative, even though it seems large.
3015 *
3016 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3017 * waitqueues, i.e. the size of the waitq table given the number of pages.
3018 */
3019#define PAGES_PER_WAITQUEUE	256
3020
3021#ifndef CONFIG_MEMORY_HOTPLUG
3022static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3023{
3024	unsigned long size = 1;
3025
3026	pages /= PAGES_PER_WAITQUEUE;
3027
3028	while (size < pages)
3029		size <<= 1;
3030
3031	/*
3032	 * Once we have dozens or even hundreds of threads sleeping
3033	 * on IO we've got bigger problems than wait queue collision.
3034	 * Limit the size of the wait table to a reasonable size.
3035	 */
3036	size = min(size, 4096UL);
3037
3038	return max(size, 4UL);
3039}
3040#else
3041/*
3042 * A zone's size might be changed by hot-add, so it is not possible to determine
3043 * a suitable size for its wait_table.  So we use the maximum size now.
3044 *
3045 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3046 *
3047 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3048 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3049 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3050 *
3051 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3052 * or more by the traditional way. (See above).  It equals:
3053 *
3054 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3055 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3056 *    powerpc (64K page size)             : =  (32G +16M)byte.
3057 */
3058static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3059{
3060	return 4096UL;
3061}
3062#endif
3063
3064/*
3065 * This is an integer logarithm so that shifts can be used later
3066 * to extract the more random high bits from the multiplicative
3067 * hash function before the remainder is taken.
3068 */
3069static inline unsigned long wait_table_bits(unsigned long size)
3070{
3071	return ffz(~size);
3072}
3073
3074#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3075
3076/*
3077 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3078 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3079 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3080 * higher will lead to a bigger reserve which will get freed as contiguous
3081 * blocks as reclaim kicks in
3082 */
3083static void setup_zone_migrate_reserve(struct zone *zone)
3084{
3085	unsigned long start_pfn, pfn, end_pfn;
3086	struct page *page;
3087	unsigned long block_migratetype;
3088	int reserve;
3089
3090	/* Get the start pfn, end pfn and the number of blocks to reserve */
3091	start_pfn = zone->zone_start_pfn;
3092	end_pfn = start_pfn + zone->spanned_pages;
3093	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3094							pageblock_order;
3095
3096	/*
3097	 * Reserve blocks are generally in place to help high-order atomic
3098	 * allocations that are short-lived. A min_free_kbytes value that
3099	 * would result in more than 2 reserve blocks for atomic allocations
3100	 * is assumed to be in place to help anti-fragmentation for the
3101	 * future allocation of hugepages at runtime.
3102	 */
3103	reserve = min(2, reserve);
3104
3105	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3106		if (!pfn_valid(pfn))
3107			continue;
3108		page = pfn_to_page(pfn);
3109
3110		/* Watch out for overlapping nodes */
3111		if (page_to_nid(page) != zone_to_nid(zone))
3112			continue;
3113
3114		/* Blocks with reserved pages will never free, skip them. */
3115		if (PageReserved(page))
3116			continue;
3117
3118		block_migratetype = get_pageblock_migratetype(page);
3119
3120		/* If this block is reserved, account for it */
3121		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3122			reserve--;
3123			continue;
3124		}
3125
3126		/* Suitable for reserving if this block is movable */
3127		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3128			set_pageblock_migratetype(page, MIGRATE_RESERVE);
3129			move_freepages_block(zone, page, MIGRATE_RESERVE);
3130			reserve--;
3131			continue;
3132		}
3133
3134		/*
3135		 * If the reserve is met and this is a previous reserved block,
3136		 * take it back
3137		 */
3138		if (block_migratetype == MIGRATE_RESERVE) {
3139			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3140			move_freepages_block(zone, page, MIGRATE_MOVABLE);
3141		}
3142	}
3143}
3144
3145/*
3146 * Initially all pages are reserved - free ones are freed
3147 * up by free_all_bootmem() once the early boot process is
3148 * done. Non-atomic initialization, single-pass.
3149 */
3150void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3151		unsigned long start_pfn, enum memmap_context context)
3152{
3153	struct page *page;
3154	unsigned long end_pfn = start_pfn + size;
3155	unsigned long pfn;
3156	struct zone *z;
3157
3158	if (highest_memmap_pfn < end_pfn - 1)
3159		highest_memmap_pfn = end_pfn - 1;
3160
3161	z = &NODE_DATA(nid)->node_zones[zone];
3162	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3163		/*
3164		 * There can be holes in boot-time mem_map[]s
3165		 * handed to this function.  They do not
3166		 * exist on hotplugged memory.
3167		 */
3168		if (context == MEMMAP_EARLY) {
3169			if (!early_pfn_valid(pfn))
3170				continue;
3171			if (!early_pfn_in_nid(pfn, nid))
3172				continue;
3173		}
3174		page = pfn_to_page(pfn);
3175		set_page_links(page, zone, nid, pfn);
3176		mminit_verify_page_links(page, zone, nid, pfn);
3177		init_page_count(page);
3178		reset_page_mapcount(page);
3179		SetPageReserved(page);
3180		/*
3181		 * Mark the block movable so that blocks are reserved for
3182		 * movable at startup. This will force kernel allocations
3183		 * to reserve their blocks rather than leaking throughout
3184		 * the address space during boot when many long-lived
3185		 * kernel allocations are made. Later some blocks near
3186		 * the start are marked MIGRATE_RESERVE by
3187		 * setup_zone_migrate_reserve()
3188		 *
3189		 * bitmap is created for zone's valid pfn range. but memmap
3190		 * can be created for invalid pages (for alignment)
3191		 * check here not to call set_pageblock_migratetype() against
3192		 * pfn out of zone.
3193		 */
3194		if ((z->zone_start_pfn <= pfn)
3195		    && (pfn < z->zone_start_pfn + z->spanned_pages)
3196		    && !(pfn & (pageblock_nr_pages - 1)))
3197			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3198
3199		INIT_LIST_HEAD(&page->lru);
3200#ifdef WANT_PAGE_VIRTUAL
3201		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
3202		if (!is_highmem_idx(zone))
3203			set_page_address(page, __va(pfn << PAGE_SHIFT));
3204#endif
3205	}
3206}
3207
3208static void __meminit zone_init_free_lists(struct zone *zone)
3209{
3210	int order, t;
3211	for_each_migratetype_order(order, t) {
3212		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3213		zone->free_area[order].nr_free = 0;
3214	}
3215}
3216
3217#ifndef __HAVE_ARCH_MEMMAP_INIT
3218#define memmap_init(size, nid, zone, start_pfn) \
3219	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3220#endif
3221
3222static int zone_batchsize(struct zone *zone)
3223{
3224#ifdef CONFIG_MMU
3225	int batch;
3226
3227	/*
3228	 * The per-cpu-pages pools are set to around 1000th of the
3229	 * size of the zone.  But no more than 1/2 of a meg.
3230	 *
3231	 * OK, so we don't know how big the cache is.  So guess.
3232	 */
3233	batch = zone->present_pages / 1024;
3234	if (batch * PAGE_SIZE > 512 * 1024)
3235		batch = (512 * 1024) / PAGE_SIZE;
3236	batch /= 4;		/* We effectively *= 4 below */
3237	if (batch < 1)
3238		batch = 1;
3239
3240	/*
3241	 * Clamp the batch to a 2^n - 1 value. Having a power
3242	 * of 2 value was found to be more likely to have
3243	 * suboptimal cache aliasing properties in some cases.
3244	 *
3245	 * For example if 2 tasks are alternately allocating
3246	 * batches of pages, one task can end up with a lot
3247	 * of pages of one half of the possible page colors
3248	 * and the other with pages of the other colors.
3249	 */
3250	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3251
3252	return batch;
3253
3254#else
3255	/* The deferral and batching of frees should be suppressed under NOMMU
3256	 * conditions.
3257	 *
3258	 * The problem is that NOMMU needs to be able to allocate large chunks
3259	 * of contiguous memory as there's no hardware page translation to
3260	 * assemble apparent contiguous memory from discontiguous pages.
3261	 *
3262	 * Queueing large contiguous runs of pages for batching, however,
3263	 * causes the pages to actually be freed in smaller chunks.  As there
3264	 * can be a significant delay between the individual batches being
3265	 * recycled, this leads to the once large chunks of space being
3266	 * fragmented and becoming unavailable for high-order allocations.
3267	 */
3268	return 0;
3269#endif
3270}
3271
3272static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3273{
3274	struct per_cpu_pages *pcp;
3275	int migratetype;
3276
3277	memset(p, 0, sizeof(*p));
3278
3279	pcp = &p->pcp;
3280	pcp->count = 0;
3281	pcp->high = 6 * batch;
3282	pcp->batch = max(1UL, 1 * batch);
3283	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3284		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3285}
3286
3287/*
3288 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3289 * to the value high for the pageset p.
3290 */
3291
3292static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3293				unsigned long high)
3294{
3295	struct per_cpu_pages *pcp;
3296
3297	pcp = &p->pcp;
3298	pcp->high = high;
3299	pcp->batch = max(1UL, high/4);
3300	if ((high/4) > (PAGE_SHIFT * 8))
3301		pcp->batch = PAGE_SHIFT * 8;
3302}
3303
3304static __meminit void setup_zone_pageset(struct zone *zone)
3305{
3306	int cpu;
3307
3308	zone->pageset = alloc_percpu(struct per_cpu_pageset);
3309
3310	for_each_possible_cpu(cpu) {
3311		struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3312
3313		setup_pageset(pcp, zone_batchsize(zone));
3314
3315		if (percpu_pagelist_fraction)
3316			setup_pagelist_highmark(pcp,
3317				(zone->present_pages /
3318					percpu_pagelist_fraction));
3319	}
3320}
3321
3322/*
3323 * Allocate per cpu pagesets and initialize them.
3324 * Before this call only boot pagesets were available.
3325 */
3326void __init setup_per_cpu_pageset(void)
3327{
3328	struct zone *zone;
3329
3330	for_each_populated_zone(zone)
3331		setup_zone_pageset(zone);
3332}
3333
3334static noinline __init_refok
3335int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3336{
3337	int i;
3338	struct pglist_data *pgdat = zone->zone_pgdat;
3339	size_t alloc_size;
3340
3341	/*
3342	 * The per-page waitqueue mechanism uses hashed waitqueues
3343	 * per zone.
3344	 */
3345	zone->wait_table_hash_nr_entries =
3346		 wait_table_hash_nr_entries(zone_size_pages);
3347	zone->wait_table_bits =
3348		wait_table_bits(zone->wait_table_hash_nr_entries);
3349	alloc_size = zone->wait_table_hash_nr_entries
3350					* sizeof(wait_queue_head_t);
3351
3352	if (!slab_is_available()) {
3353		zone->wait_table = (wait_queue_head_t *)
3354			alloc_bootmem_node(pgdat, alloc_size);
3355	} else {
3356		/*
3357		 * This case means that a zone whose size was 0 gets new memory
3358		 * via memory hot-add.
3359		 * But it may be the case that a new node was hot-added.  In
3360		 * this case vmalloc() will not be able to use this new node's
3361		 * memory - this wait_table must be initialized to use this new
3362		 * node itself as well.
3363		 * To use this new node's memory, further consideration will be
3364		 * necessary.
3365		 */
3366		zone->wait_table = vmalloc(alloc_size);
3367	}
3368	if (!zone->wait_table)
3369		return -ENOMEM;
3370
3371	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3372		init_waitqueue_head(zone->wait_table + i);
3373
3374	return 0;
3375}
3376
3377static int __zone_pcp_update(void *data)
3378{
3379	struct zone *zone = data;
3380	int cpu;
3381	unsigned long batch = zone_batchsize(zone), flags;
3382
3383	for_each_possible_cpu(cpu) {
3384		struct per_cpu_pageset *pset;
3385		struct per_cpu_pages *pcp;
3386
3387		pset = per_cpu_ptr(zone->pageset, cpu);
3388		pcp = &pset->pcp;
3389
3390		local_irq_save(flags);
3391		free_pcppages_bulk(zone, pcp->count, pcp);
3392		setup_pageset(pset, batch);
3393		local_irq_restore(flags);
3394	}
3395	return 0;
3396}
3397
3398void zone_pcp_update(struct zone *zone)
3399{
3400	stop_machine(__zone_pcp_update, zone, NULL);
3401}
3402
3403static __meminit void zone_pcp_init(struct zone *zone)
3404{
3405	/*
3406	 * per cpu subsystem is not up at this point. The following code
3407	 * relies on the ability of the linker to provide the
3408	 * offset of a (static) per cpu variable into the per cpu area.
3409	 */
3410	zone->pageset = &boot_pageset;
3411
3412	if (zone->present_pages)
3413		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
3414			zone->name, zone->present_pages,
3415					 zone_batchsize(zone));
3416}
3417
3418__meminit int init_currently_empty_zone(struct zone *zone,
3419					unsigned long zone_start_pfn,
3420					unsigned long size,
3421					enum memmap_context context)
3422{
3423	struct pglist_data *pgdat = zone->zone_pgdat;
3424	int ret;
3425	ret = zone_wait_table_init(zone, size);
3426	if (ret)
3427		return ret;
3428	pgdat->nr_zones = zone_idx(zone) + 1;
3429
3430	zone->zone_start_pfn = zone_start_pfn;
3431
3432	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3433			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3434			pgdat->node_id,
3435			(unsigned long)zone_idx(zone),
3436			zone_start_pfn, (zone_start_pfn + size));
3437
3438	zone_init_free_lists(zone);
3439
3440	return 0;
3441}
3442
3443#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3444/*
3445 * Basic iterator support. Return the first range of PFNs for a node
3446 * Note: nid == MAX_NUMNODES returns first region regardless of node
3447 */
3448static int __meminit first_active_region_index_in_nid(int nid)
3449{
3450	int i;
3451
3452	for (i = 0; i < nr_nodemap_entries; i++)
3453		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3454			return i;
3455
3456	return -1;
3457}
3458
3459/*
3460 * Basic iterator support. Return the next active range of PFNs for a node
3461 * Note: nid == MAX_NUMNODES returns next region regardless of node
3462 */
3463static int __meminit next_active_region_index_in_nid(int index, int nid)
3464{
3465	for (index = index + 1; index < nr_nodemap_entries; index++)
3466		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3467			return index;
3468
3469	return -1;
3470}
3471
3472#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3473/*
3474 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3475 * Architectures may implement their own version but if add_active_range()
3476 * was used and there are no special requirements, this is a convenient
3477 * alternative
3478 */
3479int __meminit __early_pfn_to_nid(unsigned long pfn)
3480{
3481	int i;
3482
3483	for (i = 0; i < nr_nodemap_entries; i++) {
3484		unsigned long start_pfn = early_node_map[i].start_pfn;
3485		unsigned long end_pfn = early_node_map[i].end_pfn;
3486
3487		if (start_pfn <= pfn && pfn < end_pfn)
3488			return early_node_map[i].nid;
3489	}
3490	/* This is a memory hole */
3491	return -1;
3492}
3493#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3494
3495int __meminit early_pfn_to_nid(unsigned long pfn)
3496{
3497	int nid;
3498
3499	nid = __early_pfn_to_nid(pfn);
3500	if (nid >= 0)
3501		return nid;
3502	/* just returns 0 */
3503	return 0;
3504}
3505
3506#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3507bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3508{
3509	int nid;
3510
3511	nid = __early_pfn_to_nid(pfn);
3512	if (nid >= 0 && nid != node)
3513		return false;
3514	return true;
3515}
3516#endif
3517
3518/* Basic iterator support to walk early_node_map[] */
3519#define for_each_active_range_index_in_nid(i, nid) \
3520	for (i = first_active_region_index_in_nid(nid); i != -1; \
3521				i = next_active_region_index_in_nid(i, nid))
3522
3523/**
3524 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3525 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3526 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3527 *
3528 * If an architecture guarantees that all ranges registered with
3529 * add_active_ranges() contain no holes and may be freed, this
3530 * this function may be used instead of calling free_bootmem() manually.
3531 */
3532void __init free_bootmem_with_active_regions(int nid,
3533						unsigned long max_low_pfn)
3534{
3535	int i;
3536
3537	for_each_active_range_index_in_nid(i, nid) {
3538		unsigned long size_pages = 0;
3539		unsigned long end_pfn = early_node_map[i].end_pfn;
3540
3541		if (early_node_map[i].start_pfn >= max_low_pfn)
3542			continue;
3543
3544		if (end_pfn > max_low_pfn)
3545			end_pfn = max_low_pfn;
3546
3547		size_pages = end_pfn - early_node_map[i].start_pfn;
3548		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3549				PFN_PHYS(early_node_map[i].start_pfn),
3550				size_pages << PAGE_SHIFT);
3551	}
3552}
3553
3554int __init add_from_early_node_map(struct range *range, int az,
3555				   int nr_range, int nid)
3556{
3557	int i;
3558	u64 start, end;
3559
3560	/* need to go over early_node_map to find out good range for node */
3561	for_each_active_range_index_in_nid(i, nid) {
3562		start = early_node_map[i].start_pfn;
3563		end = early_node_map[i].end_pfn;
3564		nr_range = add_range(range, az, nr_range, start, end);
3565	}
3566	return nr_range;
3567}
3568
3569#ifdef CONFIG_NO_BOOTMEM
3570void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3571					u64 goal, u64 limit)
3572{
3573	int i;
3574	void *ptr;
3575
3576	/* need to go over early_node_map to find out good range for node */
3577	for_each_active_range_index_in_nid(i, nid) {
3578		u64 addr;
3579		u64 ei_start, ei_last;
3580
3581		ei_last = early_node_map[i].end_pfn;
3582		ei_last <<= PAGE_SHIFT;
3583		ei_start = early_node_map[i].start_pfn;
3584		ei_start <<= PAGE_SHIFT;
3585		addr = find_early_area(ei_start, ei_last,
3586					 goal, limit, size, align);
3587
3588		if (addr == -1ULL)
3589			continue;
3590
3591#if 0
3592		printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
3593				nid,
3594				ei_start, ei_last, goal, limit, size,
3595				align, addr);
3596#endif
3597
3598		ptr = phys_to_virt(addr);
3599		memset(ptr, 0, size);
3600		reserve_early_without_check(addr, addr + size, "BOOTMEM");
3601		return ptr;
3602	}
3603
3604	return NULL;
3605}
3606#endif
3607
3608
3609void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3610{
3611	int i;
3612	int ret;
3613
3614	for_each_active_range_index_in_nid(i, nid) {
3615		ret = work_fn(early_node_map[i].start_pfn,
3616			      early_node_map[i].end_pfn, data);
3617		if (ret)
3618			break;
3619	}
3620}
3621/**
3622 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3623 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3624 *
3625 * If an architecture guarantees that all ranges registered with
3626 * add_active_ranges() contain no holes and may be freed, this
3627 * function may be used instead of calling memory_present() manually.
3628 */
3629void __init sparse_memory_present_with_active_regions(int nid)
3630{
3631	int i;
3632
3633	for_each_active_range_index_in_nid(i, nid)
3634		memory_present(early_node_map[i].nid,
3635				early_node_map[i].start_pfn,
3636				early_node_map[i].end_pfn);
3637}
3638
3639/**
3640 * get_pfn_range_for_nid - Return the start and end page frames for a node
3641 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3642 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3643 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3644 *
3645 * It returns the start and end page frame of a node based on information
3646 * provided by an arch calling add_active_range(). If called for a node
3647 * with no available memory, a warning is printed and the start and end
3648 * PFNs will be 0.
3649 */
3650void __meminit get_pfn_range_for_nid(unsigned int nid,
3651			unsigned long *start_pfn, unsigned long *end_pfn)
3652{
3653	int i;
3654	*start_pfn = -1UL;
3655	*end_pfn = 0;
3656
3657	for_each_active_range_index_in_nid(i, nid) {
3658		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3659		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3660	}
3661
3662	if (*start_pfn == -1UL)
3663		*start_pfn = 0;
3664}
3665
3666/*
3667 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3668 * assumption is made that zones within a node are ordered in monotonic
3669 * increasing memory addresses so that the "highest" populated zone is used
3670 */
3671static void __init find_usable_zone_for_movable(void)
3672{
3673	int zone_index;
3674	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3675		if (zone_index == ZONE_MOVABLE)
3676			continue;
3677
3678		if (arch_zone_highest_possible_pfn[zone_index] >
3679				arch_zone_lowest_possible_pfn[zone_index])
3680			break;
3681	}
3682
3683	VM_BUG_ON(zone_index == -1);
3684	movable_zone = zone_index;
3685}
3686
3687/*
3688 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3689 * because it is sized independant of architecture. Unlike the other zones,
3690 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3691 * in each node depending on the size of each node and how evenly kernelcore
3692 * is distributed. This helper function adjusts the zone ranges
3693 * provided by the architecture for a given node by using the end of the
3694 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3695 * zones within a node are in order of monotonic increases memory addresses
3696 */
3697static void __meminit adjust_zone_range_for_zone_movable(int nid,
3698					unsigned long zone_type,
3699					unsigned long node_start_pfn,
3700					unsigned long node_end_pfn,
3701					unsigned long *zone_start_pfn,
3702					unsigned long *zone_end_pfn)
3703{
3704	/* Only adjust if ZONE_MOVABLE is on this node */
3705	if (zone_movable_pfn[nid]) {
3706		/* Size ZONE_MOVABLE */
3707		if (zone_type == ZONE_MOVABLE) {
3708			*zone_start_pfn = zone_movable_pfn[nid];
3709			*zone_end_pfn = min(node_end_pfn,
3710				arch_zone_highest_possible_pfn[movable_zone]);
3711
3712		/* Adjust for ZONE_MOVABLE starting within this range */
3713		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3714				*zone_end_pfn > zone_movable_pfn[nid]) {
3715			*zone_end_pfn = zone_movable_pfn[nid];
3716
3717		/* Check if this whole range is within ZONE_MOVABLE */
3718		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3719			*zone_start_pfn = *zone_end_pfn;
3720	}
3721}
3722
3723/*
3724 * Return the number of pages a zone spans in a node, including holes
3725 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3726 */
3727static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3728					unsigned long zone_type,
3729					unsigned long *ignored)
3730{
3731	unsigned long node_start_pfn, node_end_pfn;
3732	unsigned long zone_start_pfn, zone_end_pfn;
3733
3734	/* Get the start and end of the node and zone */
3735	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3736	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3737	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3738	adjust_zone_range_for_zone_movable(nid, zone_type,
3739				node_start_pfn, node_end_pfn,
3740				&zone_start_pfn, &zone_end_pfn);
3741
3742	/* Check that this node has pages within the zone's required range */
3743	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3744		return 0;
3745
3746	/* Move the zone boundaries inside the node if necessary */
3747	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3748	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3749
3750	/* Return the spanned pages */
3751	return zone_end_pfn - zone_start_pfn;
3752}
3753
3754/*
3755 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3756 * then all holes in the requested range will be accounted for.
3757 */
3758unsigned long __meminit __absent_pages_in_range(int nid,
3759				unsigned long range_start_pfn,
3760				unsigned long range_end_pfn)
3761{
3762	int i = 0;
3763	unsigned long prev_end_pfn = 0, hole_pages = 0;
3764	unsigned long start_pfn;
3765
3766	/* Find the end_pfn of the first active range of pfns in the node */
3767	i = first_active_region_index_in_nid(nid);
3768	if (i == -1)
3769		return 0;
3770
3771	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3772
3773	/* Account for ranges before physical memory on this node */
3774	if (early_node_map[i].start_pfn > range_start_pfn)
3775		hole_pages = prev_end_pfn - range_start_pfn;
3776
3777	/* Find all holes for the zone within the node */
3778	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3779
3780		/* No need to continue if prev_end_pfn is outside the zone */
3781		if (prev_end_pfn >= range_end_pfn)
3782			break;
3783
3784		/* Make sure the end of the zone is not within the hole */
3785		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3786		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3787
3788		/* Update the hole size cound and move on */
3789		if (start_pfn > range_start_pfn) {
3790			BUG_ON(prev_end_pfn > start_pfn);
3791			hole_pages += start_pfn - prev_end_pfn;
3792		}
3793		prev_end_pfn = early_node_map[i].end_pfn;
3794	}
3795
3796	/* Account for ranges past physical memory on this node */
3797	if (range_end_pfn > prev_end_pfn)
3798		hole_pages += range_end_pfn -
3799				max(range_start_pfn, prev_end_pfn);
3800
3801	return hole_pages;
3802}
3803
3804/**
3805 * absent_pages_in_range - Return number of page frames in holes within a range
3806 * @start_pfn: The start PFN to start searching for holes
3807 * @end_pfn: The end PFN to stop searching for holes
3808 *
3809 * It returns the number of pages frames in memory holes within a range.
3810 */
3811unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3812							unsigned long end_pfn)
3813{
3814	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3815}
3816
3817/* Return the number of page frames in holes in a zone on a node */
3818static unsigned long __meminit zone_absent_pages_in_node(int nid,
3819					unsigned long zone_type,
3820					unsigned long *ignored)
3821{
3822	unsigned long node_start_pfn, node_end_pfn;
3823	unsigned long zone_start_pfn, zone_end_pfn;
3824
3825	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3826	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3827							node_start_pfn);
3828	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3829							node_end_pfn);
3830
3831	adjust_zone_range_for_zone_movable(nid, zone_type,
3832			node_start_pfn, node_end_pfn,
3833			&zone_start_pfn, &zone_end_pfn);
3834	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3835}
3836
3837#else
3838static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3839					unsigned long zone_type,
3840					unsigned long *zones_size)
3841{
3842	return zones_size[zone_type];
3843}
3844
3845static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3846						unsigned long zone_type,
3847						unsigned long *zholes_size)
3848{
3849	if (!zholes_size)
3850		return 0;
3851
3852	return zholes_size[zone_type];
3853}
3854
3855#endif
3856
3857static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3858		unsigned long *zones_size, unsigned long *zholes_size)
3859{
3860	unsigned long realtotalpages, totalpages = 0;
3861	enum zone_type i;
3862
3863	for (i = 0; i < MAX_NR_ZONES; i++)
3864		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3865								zones_size);
3866	pgdat->node_spanned_pages = totalpages;
3867
3868	realtotalpages = totalpages;
3869	for (i = 0; i < MAX_NR_ZONES; i++)
3870		realtotalpages -=
3871			zone_absent_pages_in_node(pgdat->node_id, i,
3872								zholes_size);
3873	pgdat->node_present_pages = realtotalpages;
3874	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3875							realtotalpages);
3876}
3877
3878#ifndef CONFIG_SPARSEMEM
3879/*
3880 * Calculate the size of the zone->blockflags rounded to an unsigned long
3881 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3882 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3883 * round what is now in bits to nearest long in bits, then return it in
3884 * bytes.
3885 */
3886static unsigned long __init usemap_size(unsigned long zonesize)
3887{
3888	unsigned long usemapsize;
3889
3890	usemapsize = roundup(zonesize, pageblock_nr_pages);
3891	usemapsize = usemapsize >> pageblock_order;
3892	usemapsize *= NR_PAGEBLOCK_BITS;
3893	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3894
3895	return usemapsize / 8;
3896}
3897
3898static void __init setup_usemap(struct pglist_data *pgdat,
3899				struct zone *zone, unsigned long zonesize)
3900{
3901	unsigned long usemapsize = usemap_size(zonesize);
3902	zone->pageblock_flags = NULL;
3903	if (usemapsize)
3904		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3905}
3906#else
3907static void inline setup_usemap(struct pglist_data *pgdat,
3908				struct zone *zone, unsigned long zonesize) {}
3909#endif /* CONFIG_SPARSEMEM */
3910
3911#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3912
3913/* Return a sensible default order for the pageblock size. */
3914static inline int pageblock_default_order(void)
3915{
3916	if (HPAGE_SHIFT > PAGE_SHIFT)
3917		return HUGETLB_PAGE_ORDER;
3918
3919	return MAX_ORDER-1;
3920}
3921
3922/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3923static inline void __init set_pageblock_order(unsigned int order)
3924{
3925	/* Check that pageblock_nr_pages has not already been setup */
3926	if (pageblock_order)
3927		return;
3928
3929	/*
3930	 * Assume the largest contiguous order of interest is a huge page.
3931	 * This value may be variable depending on boot parameters on IA64
3932	 */
3933	pageblock_order = order;
3934}
3935#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3936
3937/*
3938 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3939 * and pageblock_default_order() are unused as pageblock_order is set
3940 * at compile-time. See include/linux/pageblock-flags.h for the values of
3941 * pageblock_order based on the kernel config
3942 */
3943static inline int pageblock_default_order(unsigned int order)
3944{
3945	return MAX_ORDER-1;
3946}
3947#define set_pageblock_order(x)	do {} while (0)
3948
3949#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3950
3951/*
3952 * Set up the zone data structures:
3953 *   - mark all pages reserved
3954 *   - mark all memory queues empty
3955 *   - clear the memory bitmaps
3956 */
3957static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3958		unsigned long *zones_size, unsigned long *zholes_size)
3959{
3960	enum zone_type j;
3961	int nid = pgdat->node_id;
3962	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3963	int ret;
3964
3965	pgdat_resize_init(pgdat);
3966	pgdat->nr_zones = 0;
3967	init_waitqueue_head(&pgdat->kswapd_wait);
3968	pgdat->kswapd_max_order = 0;
3969	pgdat_page_cgroup_init(pgdat);
3970
3971	for (j = 0; j < MAX_NR_ZONES; j++) {
3972		struct zone *zone = pgdat->node_zones + j;
3973		unsigned long size, realsize, memmap_pages;
3974		enum lru_list l;
3975
3976		size = zone_spanned_pages_in_node(nid, j, zones_size);
3977		realsize = size - zone_absent_pages_in_node(nid, j,
3978								zholes_size);
3979
3980		/*
3981		 * Adjust realsize so that it accounts for how much memory
3982		 * is used by this zone for memmap. This affects the watermark
3983		 * and per-cpu initialisations
3984		 */
3985		memmap_pages =
3986			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3987		if (realsize >= memmap_pages) {
3988			realsize -= memmap_pages;
3989			if (memmap_pages)
3990				printk(KERN_DEBUG
3991				       "  %s zone: %lu pages used for memmap\n",
3992				       zone_names[j], memmap_pages);
3993		} else
3994			printk(KERN_WARNING
3995				"  %s zone: %lu pages exceeds realsize %lu\n",
3996				zone_names[j], memmap_pages, realsize);
3997
3998		/* Account for reserved pages */
3999		if (j == 0 && realsize > dma_reserve) {
4000			realsize -= dma_reserve;
4001			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4002					zone_names[0], dma_reserve);
4003		}
4004
4005		if (!is_highmem_idx(j))
4006			nr_kernel_pages += realsize;
4007		nr_all_pages += realsize;
4008
4009		zone->spanned_pages = size;
4010		zone->present_pages = realsize;
4011#ifdef CONFIG_NUMA
4012		zone->node = nid;
4013		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4014						/ 100;
4015		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4016#endif
4017		zone->name = zone_names[j];
4018		spin_lock_init(&zone->lock);
4019		spin_lock_init(&zone->lru_lock);
4020		zone_seqlock_init(zone);
4021		zone->zone_pgdat = pgdat;
4022
4023		zone->prev_priority = DEF_PRIORITY;
4024
4025		zone_pcp_init(zone);
4026		for_each_lru(l) {
4027			INIT_LIST_HEAD(&zone->lru[l].list);
4028			zone->reclaim_stat.nr_saved_scan[l] = 0;
4029		}
4030		zone->reclaim_stat.recent_rotated[0] = 0;
4031		zone->reclaim_stat.recent_rotated[1] = 0;
4032		zone->reclaim_stat.recent_scanned[0] = 0;
4033		zone->reclaim_stat.recent_scanned[1] = 0;
4034		zap_zone_vm_stats(zone);
4035		zone->flags = 0;
4036		if (!size)
4037			continue;
4038
4039		set_pageblock_order(pageblock_default_order());
4040		setup_usemap(pgdat, zone, size);
4041		ret = init_currently_empty_zone(zone, zone_start_pfn,
4042						size, MEMMAP_EARLY);
4043		BUG_ON(ret);
4044		memmap_init(size, nid, j, zone_start_pfn);
4045		zone_start_pfn += size;
4046	}
4047}
4048
4049static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4050{
4051	/* Skip empty nodes */
4052	if (!pgdat->node_spanned_pages)
4053		return;
4054
4055#ifdef CONFIG_FLAT_NODE_MEM_MAP
4056	/* ia64 gets its own node_mem_map, before this, without bootmem */
4057	if (!pgdat->node_mem_map) {
4058		unsigned long size, start, end;
4059		struct page *map;
4060
4061		/*
4062		 * The zone's endpoints aren't required to be MAX_ORDER
4063		 * aligned but the node_mem_map endpoints must be in order
4064		 * for the buddy allocator to function correctly.
4065		 */
4066		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4067		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4068		end = ALIGN(end, MAX_ORDER_NR_PAGES);
4069		size =  (end - start) * sizeof(struct page);
4070		map = alloc_remap(pgdat->node_id, size);
4071		if (!map)
4072			map = alloc_bootmem_node(pgdat, size);
4073		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4074	}
4075#ifndef CONFIG_NEED_MULTIPLE_NODES
4076	/*
4077	 * With no DISCONTIG, the global mem_map is just set as node 0's
4078	 */
4079	if (pgdat == NODE_DATA(0)) {
4080		mem_map = NODE_DATA(0)->node_mem_map;
4081#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4082		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4083			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4084#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4085	}
4086#endif
4087#endif /* CONFIG_FLAT_NODE_MEM_MAP */
4088}
4089
4090void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4091		unsigned long node_start_pfn, unsigned long *zholes_size)
4092{
4093	pg_data_t *pgdat = NODE_DATA(nid);
4094
4095	pgdat->node_id = nid;
4096	pgdat->node_start_pfn = node_start_pfn;
4097	calculate_node_totalpages(pgdat, zones_size, zholes_size);
4098
4099	alloc_node_mem_map(pgdat);
4100#ifdef CONFIG_FLAT_NODE_MEM_MAP
4101	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4102		nid, (unsigned long)pgdat,
4103		(unsigned long)pgdat->node_mem_map);
4104#endif
4105
4106	free_area_init_core(pgdat, zones_size, zholes_size);
4107}
4108
4109#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4110
4111#if MAX_NUMNODES > 1
4112/*
4113 * Figure out the number of possible node ids.
4114 */
4115static void __init setup_nr_node_ids(void)
4116{
4117	unsigned int node;
4118	unsigned int highest = 0;
4119
4120	for_each_node_mask(node, node_possible_map)
4121		highest = node;
4122	nr_node_ids = highest + 1;
4123}
4124#else
4125static inline void setup_nr_node_ids(void)
4126{
4127}
4128#endif
4129
4130/**
4131 * add_active_range - Register a range of PFNs backed by physical memory
4132 * @nid: The node ID the range resides on
4133 * @start_pfn: The start PFN of the available physical memory
4134 * @end_pfn: The end PFN of the available physical memory
4135 *
4136 * These ranges are stored in an early_node_map[] and later used by
4137 * free_area_init_nodes() to calculate zone sizes and holes. If the
4138 * range spans a memory hole, it is up to the architecture to ensure
4139 * the memory is not freed by the bootmem allocator. If possible
4140 * the range being registered will be merged with existing ranges.
4141 */
4142void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4143						unsigned long end_pfn)
4144{
4145	int i;
4146
4147	mminit_dprintk(MMINIT_TRACE, "memory_register",
4148			"Entering add_active_range(%d, %#lx, %#lx) "
4149			"%d entries of %d used\n",
4150			nid, start_pfn, end_pfn,
4151			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
4152
4153	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4154
4155	/* Merge with existing active regions if possible */
4156	for (i = 0; i < nr_nodemap_entries; i++) {
4157		if (early_node_map[i].nid != nid)
4158			continue;
4159
4160		/* Skip if an existing region covers this new one */
4161		if (start_pfn >= early_node_map[i].start_pfn &&
4162				end_pfn <= early_node_map[i].end_pfn)
4163			return;
4164
4165		/* Merge forward if suitable */
4166		if (start_pfn <= early_node_map[i].end_pfn &&
4167				end_pfn > early_node_map[i].end_pfn) {
4168			early_node_map[i].end_pfn = end_pfn;
4169			return;
4170		}
4171
4172		/* Merge backward if suitable */
4173		if (start_pfn < early_node_map[i].start_pfn &&
4174				end_pfn >= early_node_map[i].start_pfn) {
4175			early_node_map[i].start_pfn = start_pfn;
4176			return;
4177		}
4178	}
4179
4180	/* Check that early_node_map is large enough */
4181	if (i >= MAX_ACTIVE_REGIONS) {
4182		printk(KERN_CRIT "More than %d memory regions, truncating\n",
4183							MAX_ACTIVE_REGIONS);
4184		return;
4185	}
4186
4187	early_node_map[i].nid = nid;
4188	early_node_map[i].start_pfn = start_pfn;
4189	early_node_map[i].end_pfn = end_pfn;
4190	nr_nodemap_entries = i + 1;
4191}
4192
4193/**
4194 * remove_active_range - Shrink an existing registered range of PFNs
4195 * @nid: The node id the range is on that should be shrunk
4196 * @start_pfn: The new PFN of the range
4197 * @end_pfn: The new PFN of the range
4198 *
4199 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4200 * The map is kept near the end physical page range that has already been
4201 * registered. This function allows an arch to shrink an existing registered
4202 * range.
4203 */
4204void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4205				unsigned long end_pfn)
4206{
4207	int i, j;
4208	int removed = 0;
4209
4210	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4211			  nid, start_pfn, end_pfn);
4212
4213	/* Find the old active region end and shrink */
4214	for_each_active_range_index_in_nid(i, nid) {
4215		if (early_node_map[i].start_pfn >= start_pfn &&
4216		    early_node_map[i].end_pfn <= end_pfn) {
4217			/* clear it */
4218			early_node_map[i].start_pfn = 0;
4219			early_node_map[i].end_pfn = 0;
4220			removed = 1;
4221			continue;
4222		}
4223		if (early_node_map[i].start_pfn < start_pfn &&
4224		    early_node_map[i].end_pfn > start_pfn) {
4225			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4226			early_node_map[i].end_pfn = start_pfn;
4227			if (temp_end_pfn > end_pfn)
4228				add_active_range(nid, end_pfn, temp_end_pfn);
4229			continue;
4230		}
4231		if (early_node_map[i].start_pfn >= start_pfn &&
4232		    early_node_map[i].end_pfn > end_pfn &&
4233		    early_node_map[i].start_pfn < end_pfn) {
4234			early_node_map[i].start_pfn = end_pfn;
4235			continue;
4236		}
4237	}
4238
4239	if (!removed)
4240		return;
4241
4242	/* remove the blank ones */
4243	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4244		if (early_node_map[i].nid != nid)
4245			continue;
4246		if (early_node_map[i].end_pfn)
4247			continue;
4248		/* we found it, get rid of it */
4249		for (j = i; j < nr_nodemap_entries - 1; j++)
4250			memcpy(&early_node_map[j], &early_node_map[j+1],
4251				sizeof(early_node_map[j]));
4252		j = nr_nodemap_entries - 1;
4253		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4254		nr_nodemap_entries--;
4255	}
4256}
4257
4258/**
4259 * remove_all_active_ranges - Remove all currently registered regions
4260 *
4261 * During discovery, it may be found that a table like SRAT is invalid
4262 * and an alternative discovery method must be used. This function removes
4263 * all currently registered regions.
4264 */
4265void __init remove_all_active_ranges(void)
4266{
4267	memset(early_node_map, 0, sizeof(early_node_map));
4268	nr_nodemap_entries = 0;
4269}
4270
4271/* Compare two active node_active_regions */
4272static int __init cmp_node_active_region(const void *a, const void *b)
4273{
4274	struct node_active_region *arange = (struct node_active_region *)a;
4275	struct node_active_region *brange = (struct node_active_region *)b;
4276
4277	/* Done this way to avoid overflows */
4278	if (arange->start_pfn > brange->start_pfn)
4279		return 1;
4280	if (arange->start_pfn < brange->start_pfn)
4281		return -1;
4282
4283	return 0;
4284}
4285
4286/* sort the node_map by start_pfn */
4287void __init sort_node_map(void)
4288{
4289	sort(early_node_map, (size_t)nr_nodemap_entries,
4290			sizeof(struct node_active_region),
4291			cmp_node_active_region, NULL);
4292}
4293
4294/* Find the lowest pfn for a node */
4295static unsigned long __init find_min_pfn_for_node(int nid)
4296{
4297	int i;
4298	unsigned long min_pfn = ULONG_MAX;
4299
4300	/* Assuming a sorted map, the first range found has the starting pfn */
4301	for_each_active_range_index_in_nid(i, nid)
4302		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4303
4304	if (min_pfn == ULONG_MAX) {
4305		printk(KERN_WARNING
4306			"Could not find start_pfn for node %d\n", nid);
4307		return 0;
4308	}
4309
4310	return min_pfn;
4311}
4312
4313/**
4314 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4315 *
4316 * It returns the minimum PFN based on information provided via
4317 * add_active_range().
4318 */
4319unsigned long __init find_min_pfn_with_active_regions(void)
4320{
4321	return find_min_pfn_for_node(MAX_NUMNODES);
4322}
4323
4324/*
4325 * early_calculate_totalpages()
4326 * Sum pages in active regions for movable zone.
4327 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4328 */
4329static unsigned long __init early_calculate_totalpages(void)
4330{
4331	int i;
4332	unsigned long totalpages = 0;
4333
4334	for (i = 0; i < nr_nodemap_entries; i++) {
4335		unsigned long pages = early_node_map[i].end_pfn -
4336						early_node_map[i].start_pfn;
4337		totalpages += pages;
4338		if (pages)
4339			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4340	}
4341  	return totalpages;
4342}
4343
4344/*
4345 * Find the PFN the Movable zone begins in each node. Kernel memory
4346 * is spread evenly between nodes as long as the nodes have enough
4347 * memory. When they don't, some nodes will have more kernelcore than
4348 * others
4349 */
4350static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4351{
4352	int i, nid;
4353	unsigned long usable_startpfn;
4354	unsigned long kernelcore_node, kernelcore_remaining;
4355	/* save the state before borrow the nodemask */
4356	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4357	unsigned long totalpages = early_calculate_totalpages();
4358	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4359
4360	/*
4361	 * If movablecore was specified, calculate what size of
4362	 * kernelcore that corresponds so that memory usable for
4363	 * any allocation type is evenly spread. If both kernelcore
4364	 * and movablecore are specified, then the value of kernelcore
4365	 * will be used for required_kernelcore if it's greater than
4366	 * what movablecore would have allowed.
4367	 */
4368	if (required_movablecore) {
4369		unsigned long corepages;
4370
4371		/*
4372		 * Round-up so that ZONE_MOVABLE is at least as large as what
4373		 * was requested by the user
4374		 */
4375		required_movablecore =
4376			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4377		corepages = totalpages - required_movablecore;
4378
4379		required_kernelcore = max(required_kernelcore, corepages);
4380	}
4381
4382	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4383	if (!required_kernelcore)
4384		goto out;
4385
4386	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4387	find_usable_zone_for_movable();
4388	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4389
4390restart:
4391	/* Spread kernelcore memory as evenly as possible throughout nodes */
4392	kernelcore_node = required_kernelcore / usable_nodes;
4393	for_each_node_state(nid, N_HIGH_MEMORY) {
4394		/*
4395		 * Recalculate kernelcore_node if the division per node
4396		 * now exceeds what is necessary to satisfy the requested
4397		 * amount of memory for the kernel
4398		 */
4399		if (required_kernelcore < kernelcore_node)
4400			kernelcore_node = required_kernelcore / usable_nodes;
4401
4402		/*
4403		 * As the map is walked, we track how much memory is usable
4404		 * by the kernel using kernelcore_remaining. When it is
4405		 * 0, the rest of the node is usable by ZONE_MOVABLE
4406		 */
4407		kernelcore_remaining = kernelcore_node;
4408
4409		/* Go through each range of PFNs within this node */
4410		for_each_active_range_index_in_nid(i, nid) {
4411			unsigned long start_pfn, end_pfn;
4412			unsigned long size_pages;
4413
4414			start_pfn = max(early_node_map[i].start_pfn,
4415						zone_movable_pfn[nid]);
4416			end_pfn = early_node_map[i].end_pfn;
4417			if (start_pfn >= end_pfn)
4418				continue;
4419
4420			/* Account for what is only usable for kernelcore */
4421			if (start_pfn < usable_startpfn) {
4422				unsigned long kernel_pages;
4423				kernel_pages = min(end_pfn, usable_startpfn)
4424								- start_pfn;
4425
4426				kernelcore_remaining -= min(kernel_pages,
4427							kernelcore_remaining);
4428				required_kernelcore -= min(kernel_pages,
4429							required_kernelcore);
4430
4431				/* Continue if range is now fully accounted */
4432				if (end_pfn <= usable_startpfn) {
4433
4434					/*
4435					 * Push zone_movable_pfn to the end so
4436					 * that if we have to rebalance
4437					 * kernelcore across nodes, we will
4438					 * not double account here
4439					 */
4440					zone_movable_pfn[nid] = end_pfn;
4441					continue;
4442				}
4443				start_pfn = usable_startpfn;
4444			}
4445
4446			/*
4447			 * The usable PFN range for ZONE_MOVABLE is from
4448			 * start_pfn->end_pfn. Calculate size_pages as the
4449			 * number of pages used as kernelcore
4450			 */
4451			size_pages = end_pfn - start_pfn;
4452			if (size_pages > kernelcore_remaining)
4453				size_pages = kernelcore_remaining;
4454			zone_movable_pfn[nid] = start_pfn + size_pages;
4455
4456			/*
4457			 * Some kernelcore has been met, update counts and
4458			 * break if the kernelcore for this node has been
4459			 * satisified
4460			 */
4461			required_kernelcore -= min(required_kernelcore,
4462								size_pages);
4463			kernelcore_remaining -= size_pages;
4464			if (!kernelcore_remaining)
4465				break;
4466		}
4467	}
4468
4469	/*
4470	 * If there is still required_kernelcore, we do another pass with one
4471	 * less node in the count. This will push zone_movable_pfn[nid] further
4472	 * along on the nodes that still have memory until kernelcore is
4473	 * satisified
4474	 */
4475	usable_nodes--;
4476	if (usable_nodes && required_kernelcore > usable_nodes)
4477		goto restart;
4478
4479	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4480	for (nid = 0; nid < MAX_NUMNODES; nid++)
4481		zone_movable_pfn[nid] =
4482			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4483
4484out:
4485	/* restore the node_state */
4486	node_states[N_HIGH_MEMORY] = saved_node_state;
4487}
4488
4489/* Any regular memory on that node ? */
4490static void check_for_regular_memory(pg_data_t *pgdat)
4491{
4492#ifdef CONFIG_HIGHMEM
4493	enum zone_type zone_type;
4494
4495	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4496		struct zone *zone = &pgdat->node_zones[zone_type];
4497		if (zone->present_pages)
4498			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4499	}
4500#endif
4501}
4502
4503/**
4504 * free_area_init_nodes - Initialise all pg_data_t and zone data
4505 * @max_zone_pfn: an array of max PFNs for each zone
4506 *
4507 * This will call free_area_init_node() for each active node in the system.
4508 * Using the page ranges provided by add_active_range(), the size of each
4509 * zone in each node and their holes is calculated. If the maximum PFN
4510 * between two adjacent zones match, it is assumed that the zone is empty.
4511 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4512 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4513 * starts where the previous one ended. For example, ZONE_DMA32 starts
4514 * at arch_max_dma_pfn.
4515 */
4516void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4517{
4518	unsigned long nid;
4519	int i;
4520
4521	/* Sort early_node_map as initialisation assumes it is sorted */
4522	sort_node_map();
4523
4524	/* Record where the zone boundaries are */
4525	memset(arch_zone_lowest_possible_pfn, 0,
4526				sizeof(arch_zone_lowest_possible_pfn));
4527	memset(arch_zone_highest_possible_pfn, 0,
4528				sizeof(arch_zone_highest_possible_pfn));
4529	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4530	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4531	for (i = 1; i < MAX_NR_ZONES; i++) {
4532		if (i == ZONE_MOVABLE)
4533			continue;
4534		arch_zone_lowest_possible_pfn[i] =
4535			arch_zone_highest_possible_pfn[i-1];
4536		arch_zone_highest_possible_pfn[i] =
4537			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4538	}
4539	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4540	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4541
4542	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4543	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4544	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4545
4546	/* Print out the zone ranges */
4547	printk("Zone PFN ranges:\n");
4548	for (i = 0; i < MAX_NR_ZONES; i++) {
4549		if (i == ZONE_MOVABLE)
4550			continue;
4551		printk("  %-8s ", zone_names[i]);
4552		if (arch_zone_lowest_possible_pfn[i] ==
4553				arch_zone_highest_possible_pfn[i])
4554			printk("empty\n");
4555		else
4556			printk("%0#10lx -> %0#10lx\n",
4557				arch_zone_lowest_possible_pfn[i],
4558				arch_zone_highest_possible_pfn[i]);
4559	}
4560
4561	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4562	printk("Movable zone start PFN for each node\n");
4563	for (i = 0; i < MAX_NUMNODES; i++) {
4564		if (zone_movable_pfn[i])
4565			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4566	}
4567
4568	/* Print out the early_node_map[] */
4569	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4570	for (i = 0; i < nr_nodemap_entries; i++)
4571		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4572						early_node_map[i].start_pfn,
4573						early_node_map[i].end_pfn);
4574
4575	/* Initialise every node */
4576	mminit_verify_pageflags_layout();
4577	setup_nr_node_ids();
4578	for_each_online_node(nid) {
4579		pg_data_t *pgdat = NODE_DATA(nid);
4580		free_area_init_node(nid, NULL,
4581				find_min_pfn_for_node(nid), NULL);
4582
4583		/* Any memory on that node */
4584		if (pgdat->node_present_pages)
4585			node_set_state(nid, N_HIGH_MEMORY);
4586		check_for_regular_memory(pgdat);
4587	}
4588}
4589
4590static int __init cmdline_parse_core(char *p, unsigned long *core)
4591{
4592	unsigned long long coremem;
4593	if (!p)
4594		return -EINVAL;
4595
4596	coremem = memparse(p, &p);
4597	*core = coremem >> PAGE_SHIFT;
4598
4599	/* Paranoid check that UL is enough for the coremem value */
4600	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4601
4602	return 0;
4603}
4604
4605/*
4606 * kernelcore=size sets the amount of memory for use for allocations that
4607 * cannot be reclaimed or migrated.
4608 */
4609static int __init cmdline_parse_kernelcore(char *p)
4610{
4611	return cmdline_parse_core(p, &required_kernelcore);
4612}
4613
4614/*
4615 * movablecore=size sets the amount of memory for use for allocations that
4616 * can be reclaimed or migrated.
4617 */
4618static int __init cmdline_parse_movablecore(char *p)
4619{
4620	return cmdline_parse_core(p, &required_movablecore);
4621}
4622
4623early_param("kernelcore", cmdline_parse_kernelcore);
4624early_param("movablecore", cmdline_parse_movablecore);
4625
4626#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4627
4628/**
4629 * set_dma_reserve - set the specified number of pages reserved in the first zone
4630 * @new_dma_reserve: The number of pages to mark reserved
4631 *
4632 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4633 * In the DMA zone, a significant percentage may be consumed by kernel image
4634 * and other unfreeable allocations which can skew the watermarks badly. This
4635 * function may optionally be used to account for unfreeable pages in the
4636 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4637 * smaller per-cpu batchsize.
4638 */
4639void __init set_dma_reserve(unsigned long new_dma_reserve)
4640{
4641	dma_reserve = new_dma_reserve;
4642}
4643
4644#ifndef CONFIG_NEED_MULTIPLE_NODES
4645struct pglist_data __refdata contig_page_data = {
4646#ifndef CONFIG_NO_BOOTMEM
4647 .bdata = &bootmem_node_data[0]
4648#endif
4649 };
4650EXPORT_SYMBOL(contig_page_data);
4651#endif
4652
4653void __init free_area_init(unsigned long *zones_size)
4654{
4655	free_area_init_node(0, zones_size,
4656			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4657}
4658
4659static int page_alloc_cpu_notify(struct notifier_block *self,
4660				 unsigned long action, void *hcpu)
4661{
4662	int cpu = (unsigned long)hcpu;
4663
4664	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4665		drain_pages(cpu);
4666
4667		/*
4668		 * Spill the event counters of the dead processor
4669		 * into the current processors event counters.
4670		 * This artificially elevates the count of the current
4671		 * processor.
4672		 */
4673		vm_events_fold_cpu(cpu);
4674
4675		/*
4676		 * Zero the differential counters of the dead processor
4677		 * so that the vm statistics are consistent.
4678		 *
4679		 * This is only okay since the processor is dead and cannot
4680		 * race with what we are doing.
4681		 */
4682		refresh_cpu_vm_stats(cpu);
4683	}
4684	return NOTIFY_OK;
4685}
4686
4687void __init page_alloc_init(void)
4688{
4689	hotcpu_notifier(page_alloc_cpu_notify, 0);
4690}
4691
4692/*
4693 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4694 *	or min_free_kbytes changes.
4695 */
4696static void calculate_totalreserve_pages(void)
4697{
4698	struct pglist_data *pgdat;
4699	unsigned long reserve_pages = 0;
4700	enum zone_type i, j;
4701
4702	for_each_online_pgdat(pgdat) {
4703		for (i = 0; i < MAX_NR_ZONES; i++) {
4704			struct zone *zone = pgdat->node_zones + i;
4705			unsigned long max = 0;
4706
4707			/* Find valid and maximum lowmem_reserve in the zone */
4708			for (j = i; j < MAX_NR_ZONES; j++) {
4709				if (zone->lowmem_reserve[j] > max)
4710					max = zone->lowmem_reserve[j];
4711			}
4712
4713			/* we treat the high watermark as reserved pages. */
4714			max += high_wmark_pages(zone);
4715
4716			if (max > zone->present_pages)
4717				max = zone->present_pages;
4718			reserve_pages += max;
4719		}
4720	}
4721	totalreserve_pages = reserve_pages;
4722}
4723
4724/*
4725 * setup_per_zone_lowmem_reserve - called whenever
4726 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4727 *	has a correct pages reserved value, so an adequate number of
4728 *	pages are left in the zone after a successful __alloc_pages().
4729 */
4730static void setup_per_zone_lowmem_reserve(void)
4731{
4732	struct pglist_data *pgdat;
4733	enum zone_type j, idx;
4734
4735	for_each_online_pgdat(pgdat) {
4736		for (j = 0; j < MAX_NR_ZONES; j++) {
4737			struct zone *zone = pgdat->node_zones + j;
4738			unsigned long present_pages = zone->present_pages;
4739
4740			zone->lowmem_reserve[j] = 0;
4741
4742			idx = j;
4743			while (idx) {
4744				struct zone *lower_zone;
4745
4746				idx--;
4747
4748				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4749					sysctl_lowmem_reserve_ratio[idx] = 1;
4750
4751				lower_zone = pgdat->node_zones + idx;
4752				lower_zone->lowmem_reserve[j] = present_pages /
4753					sysctl_lowmem_reserve_ratio[idx];
4754				present_pages += lower_zone->present_pages;
4755			}
4756		}
4757	}
4758
4759	/* update totalreserve_pages */
4760	calculate_totalreserve_pages();
4761}
4762
4763/**
4764 * setup_per_zone_wmarks - called when min_free_kbytes changes
4765 * or when memory is hot-{added|removed}
4766 *
4767 * Ensures that the watermark[min,low,high] values for each zone are set
4768 * correctly with respect to min_free_kbytes.
4769 */
4770void setup_per_zone_wmarks(void)
4771{
4772	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4773	unsigned long lowmem_pages = 0;
4774	struct zone *zone;
4775	unsigned long flags;
4776
4777	/* Calculate total number of !ZONE_HIGHMEM pages */
4778	for_each_zone(zone) {
4779		if (!is_highmem(zone))
4780			lowmem_pages += zone->present_pages;
4781	}
4782
4783	for_each_zone(zone) {
4784		u64 tmp;
4785
4786		spin_lock_irqsave(&zone->lock, flags);
4787		tmp = (u64)pages_min * zone->present_pages;
4788		do_div(tmp, lowmem_pages);
4789		if (is_highmem(zone)) {
4790			/*
4791			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4792			 * need highmem pages, so cap pages_min to a small
4793			 * value here.
4794			 *
4795			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4796			 * deltas controls asynch page reclaim, and so should
4797			 * not be capped for highmem.
4798			 */
4799			int min_pages;
4800
4801			min_pages = zone->present_pages / 1024;
4802			if (min_pages < SWAP_CLUSTER_MAX)
4803				min_pages = SWAP_CLUSTER_MAX;
4804			if (min_pages > 128)
4805				min_pages = 128;
4806			zone->watermark[WMARK_MIN] = min_pages;
4807		} else {
4808			/*
4809			 * If it's a lowmem zone, reserve a number of pages
4810			 * proportionate to the zone's size.
4811			 */
4812			zone->watermark[WMARK_MIN] = tmp;
4813		}
4814
4815		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4816		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4817		setup_zone_migrate_reserve(zone);
4818		spin_unlock_irqrestore(&zone->lock, flags);
4819	}
4820
4821	/* update totalreserve_pages */
4822	calculate_totalreserve_pages();
4823}
4824
4825/*
4826 * The inactive anon list should be small enough that the VM never has to
4827 * do too much work, but large enough that each inactive page has a chance
4828 * to be referenced again before it is swapped out.
4829 *
4830 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4831 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4832 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4833 * the anonymous pages are kept on the inactive list.
4834 *
4835 * total     target    max
4836 * memory    ratio     inactive anon
4837 * -------------------------------------
4838 *   10MB       1         5MB
4839 *  100MB       1        50MB
4840 *    1GB       3       250MB
4841 *   10GB      10       0.9GB
4842 *  100GB      31         3GB
4843 *    1TB     101        10GB
4844 *   10TB     320        32GB
4845 */
4846void calculate_zone_inactive_ratio(struct zone *zone)
4847{
4848	unsigned int gb, ratio;
4849
4850	/* Zone size in gigabytes */
4851	gb = zone->present_pages >> (30 - PAGE_SHIFT);
4852	if (gb)
4853		ratio = int_sqrt(10 * gb);
4854	else
4855		ratio = 1;
4856
4857	zone->inactive_ratio = ratio;
4858}
4859
4860static void __init setup_per_zone_inactive_ratio(void)
4861{
4862	struct zone *zone;
4863
4864	for_each_zone(zone)
4865		calculate_zone_inactive_ratio(zone);
4866}
4867
4868/*
4869 * Initialise min_free_kbytes.
4870 *
4871 * For small machines we want it small (128k min).  For large machines
4872 * we want it large (64MB max).  But it is not linear, because network
4873 * bandwidth does not increase linearly with machine size.  We use
4874 *
4875 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4876 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4877 *
4878 * which yields
4879 *
4880 * 16MB:	512k
4881 * 32MB:	724k
4882 * 64MB:	1024k
4883 * 128MB:	1448k
4884 * 256MB:	2048k
4885 * 512MB:	2896k
4886 * 1024MB:	4096k
4887 * 2048MB:	5792k
4888 * 4096MB:	8192k
4889 * 8192MB:	11584k
4890 * 16384MB:	16384k
4891 */
4892static int __init init_per_zone_wmark_min(void)
4893{
4894	unsigned long lowmem_kbytes;
4895
4896	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4897
4898	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4899	if (min_free_kbytes < 128)
4900		min_free_kbytes = 128;
4901	if (min_free_kbytes > 65536)
4902		min_free_kbytes = 65536;
4903	setup_per_zone_wmarks();
4904	setup_per_zone_lowmem_reserve();
4905	setup_per_zone_inactive_ratio();
4906	return 0;
4907}
4908module_init(init_per_zone_wmark_min)
4909
4910/*
4911 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4912 *	that we can call two helper functions whenever min_free_kbytes
4913 *	changes.
4914 */
4915int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4916	void __user *buffer, size_t *length, loff_t *ppos)
4917{
4918	proc_dointvec(table, write, buffer, length, ppos);
4919	if (write)
4920		setup_per_zone_wmarks();
4921	return 0;
4922}
4923
4924#ifdef CONFIG_NUMA
4925int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4926	void __user *buffer, size_t *length, loff_t *ppos)
4927{
4928	struct zone *zone;
4929	int rc;
4930
4931	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4932	if (rc)
4933		return rc;
4934
4935	for_each_zone(zone)
4936		zone->min_unmapped_pages = (zone->present_pages *
4937				sysctl_min_unmapped_ratio) / 100;
4938	return 0;
4939}
4940
4941int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4942	void __user *buffer, size_t *length, loff_t *ppos)
4943{
4944	struct zone *zone;
4945	int rc;
4946
4947	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4948	if (rc)
4949		return rc;
4950
4951	for_each_zone(zone)
4952		zone->min_slab_pages = (zone->present_pages *
4953				sysctl_min_slab_ratio) / 100;
4954	return 0;
4955}
4956#endif
4957
4958/*
4959 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4960 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4961 *	whenever sysctl_lowmem_reserve_ratio changes.
4962 *
4963 * The reserve ratio obviously has absolutely no relation with the
4964 * minimum watermarks. The lowmem reserve ratio can only make sense
4965 * if in function of the boot time zone sizes.
4966 */
4967int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4968	void __user *buffer, size_t *length, loff_t *ppos)
4969{
4970	proc_dointvec_minmax(table, write, buffer, length, ppos);
4971	setup_per_zone_lowmem_reserve();
4972	return 0;
4973}
4974
4975/*
4976 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4977 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4978 * can have before it gets flushed back to buddy allocator.
4979 */
4980
4981int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4982	void __user *buffer, size_t *length, loff_t *ppos)
4983{
4984	struct zone *zone;
4985	unsigned int cpu;
4986	int ret;
4987
4988	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
4989	if (!write || (ret == -EINVAL))
4990		return ret;
4991	for_each_populated_zone(zone) {
4992		for_each_possible_cpu(cpu) {
4993			unsigned long  high;
4994			high = zone->present_pages / percpu_pagelist_fraction;
4995			setup_pagelist_highmark(
4996				per_cpu_ptr(zone->pageset, cpu), high);
4997		}
4998	}
4999	return 0;
5000}
5001
5002int hashdist = HASHDIST_DEFAULT;
5003
5004#ifdef CONFIG_NUMA
5005static int __init set_hashdist(char *str)
5006{
5007	if (!str)
5008		return 0;
5009	hashdist = simple_strtoul(str, &str, 0);
5010	return 1;
5011}
5012__setup("hashdist=", set_hashdist);
5013#endif
5014
5015/*
5016 * allocate a large system hash table from bootmem
5017 * - it is assumed that the hash table must contain an exact power-of-2
5018 *   quantity of entries
5019 * - limit is the number of hash buckets, not the total allocation size
5020 */
5021void *__init alloc_large_system_hash(const char *tablename,
5022				     unsigned long bucketsize,
5023				     unsigned long numentries,
5024				     int scale,
5025				     int flags,
5026				     unsigned int *_hash_shift,
5027				     unsigned int *_hash_mask,
5028				     unsigned long limit)
5029{
5030	unsigned long long max = limit;
5031	unsigned long log2qty, size;
5032	void *table = NULL;
5033
5034	/* allow the kernel cmdline to have a say */
5035	if (!numentries) {
5036		/* round applicable memory size up to nearest megabyte */
5037		numentries = nr_kernel_pages;
5038		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5039		numentries >>= 20 - PAGE_SHIFT;
5040		numentries <<= 20 - PAGE_SHIFT;
5041
5042		/* limit to 1 bucket per 2^scale bytes of low memory */
5043		if (scale > PAGE_SHIFT)
5044			numentries >>= (scale - PAGE_SHIFT);
5045		else
5046			numentries <<= (PAGE_SHIFT - scale);
5047
5048		/* Make sure we've got at least a 0-order allocation.. */
5049		if (unlikely(flags & HASH_SMALL)) {
5050			/* Makes no sense without HASH_EARLY */
5051			WARN_ON(!(flags & HASH_EARLY));
5052			if (!(numentries >> *_hash_shift)) {
5053				numentries = 1UL << *_hash_shift;
5054				BUG_ON(!numentries);
5055			}
5056		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5057			numentries = PAGE_SIZE / bucketsize;
5058	}
5059	numentries = roundup_pow_of_two(numentries);
5060
5061	/* limit allocation size to 1/16 total memory by default */
5062	if (max == 0) {
5063		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5064		do_div(max, bucketsize);
5065	}
5066
5067	if (numentries > max)
5068		numentries = max;
5069
5070	log2qty = ilog2(numentries);
5071
5072	do {
5073		size = bucketsize << log2qty;
5074		if (flags & HASH_EARLY)
5075			table = alloc_bootmem_nopanic(size);
5076		else if (hashdist)
5077			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5078		else {
5079			/*
5080			 * If bucketsize is not a power-of-two, we may free
5081			 * some pages at the end of hash table which
5082			 * alloc_pages_exact() automatically does
5083			 */
5084			if (get_order(size) < MAX_ORDER) {
5085				table = alloc_pages_exact(size, GFP_ATOMIC);
5086				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5087			}
5088		}
5089	} while (!table && size > PAGE_SIZE && --log2qty);
5090
5091	if (!table)
5092		panic("Failed to allocate %s hash table\n", tablename);
5093
5094	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
5095	       tablename,
5096	       (1U << log2qty),
5097	       ilog2(size) - PAGE_SHIFT,
5098	       size);
5099
5100	if (_hash_shift)
5101		*_hash_shift = log2qty;
5102	if (_hash_mask)
5103		*_hash_mask = (1 << log2qty) - 1;
5104
5105	return table;
5106}
5107
5108/* Return a pointer to the bitmap storing bits affecting a block of pages */
5109static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5110							unsigned long pfn)
5111{
5112#ifdef CONFIG_SPARSEMEM
5113	return __pfn_to_section(pfn)->pageblock_flags;
5114#else
5115	return zone->pageblock_flags;
5116#endif /* CONFIG_SPARSEMEM */
5117}
5118
5119static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5120{
5121#ifdef CONFIG_SPARSEMEM
5122	pfn &= (PAGES_PER_SECTION-1);
5123	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5124#else
5125	pfn = pfn - zone->zone_start_pfn;
5126	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5127#endif /* CONFIG_SPARSEMEM */
5128}
5129
5130/**
5131 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5132 * @page: The page within the block of interest
5133 * @start_bitidx: The first bit of interest to retrieve
5134 * @end_bitidx: The last bit of interest
5135 * returns pageblock_bits flags
5136 */
5137unsigned long get_pageblock_flags_group(struct page *page,
5138					int start_bitidx, int end_bitidx)
5139{
5140	struct zone *zone;
5141	unsigned long *bitmap;
5142	unsigned long pfn, bitidx;
5143	unsigned long flags = 0;
5144	unsigned long value = 1;
5145
5146	zone = page_zone(page);
5147	pfn = page_to_pfn(page);
5148	bitmap = get_pageblock_bitmap(zone, pfn);
5149	bitidx = pfn_to_bitidx(zone, pfn);
5150
5151	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5152		if (test_bit(bitidx + start_bitidx, bitmap))
5153			flags |= value;
5154
5155	return flags;
5156}
5157
5158/**
5159 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5160 * @page: The page within the block of interest
5161 * @start_bitidx: The first bit of interest
5162 * @end_bitidx: The last bit of interest
5163 * @flags: The flags to set
5164 */
5165void set_pageblock_flags_group(struct page *page, unsigned long flags,
5166					int start_bitidx, int end_bitidx)
5167{
5168	struct zone *zone;
5169	unsigned long *bitmap;
5170	unsigned long pfn, bitidx;
5171	unsigned long value = 1;
5172
5173	zone = page_zone(page);
5174	pfn = page_to_pfn(page);
5175	bitmap = get_pageblock_bitmap(zone, pfn);
5176	bitidx = pfn_to_bitidx(zone, pfn);
5177	VM_BUG_ON(pfn < zone->zone_start_pfn);
5178	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5179
5180	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5181		if (flags & value)
5182			__set_bit(bitidx + start_bitidx, bitmap);
5183		else
5184			__clear_bit(bitidx + start_bitidx, bitmap);
5185}
5186
5187/*
5188 * This is designed as sub function...plz see page_isolation.c also.
5189 * set/clear page block's type to be ISOLATE.
5190 * page allocater never alloc memory from ISOLATE block.
5191 */
5192
5193int set_migratetype_isolate(struct page *page)
5194{
5195	struct zone *zone;
5196	struct page *curr_page;
5197	unsigned long flags, pfn, iter;
5198	unsigned long immobile = 0;
5199	struct memory_isolate_notify arg;
5200	int notifier_ret;
5201	int ret = -EBUSY;
5202	int zone_idx;
5203
5204	zone = page_zone(page);
5205	zone_idx = zone_idx(zone);
5206
5207	spin_lock_irqsave(&zone->lock, flags);
5208	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
5209	    zone_idx == ZONE_MOVABLE) {
5210		ret = 0;
5211		goto out;
5212	}
5213
5214	pfn = page_to_pfn(page);
5215	arg.start_pfn = pfn;
5216	arg.nr_pages = pageblock_nr_pages;
5217	arg.pages_found = 0;
5218
5219	/*
5220	 * It may be possible to isolate a pageblock even if the
5221	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5222	 * notifier chain is used by balloon drivers to return the
5223	 * number of pages in a range that are held by the balloon
5224	 * driver to shrink memory. If all the pages are accounted for
5225	 * by balloons, are free, or on the LRU, isolation can continue.
5226	 * Later, for example, when memory hotplug notifier runs, these
5227	 * pages reported as "can be isolated" should be isolated(freed)
5228	 * by the balloon driver through the memory notifier chain.
5229	 */
5230	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5231	notifier_ret = notifier_to_errno(notifier_ret);
5232	if (notifier_ret || !arg.pages_found)
5233		goto out;
5234
5235	for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
5236		if (!pfn_valid_within(pfn))
5237			continue;
5238
5239		curr_page = pfn_to_page(iter);
5240		if (!page_count(curr_page) || PageLRU(curr_page))
5241			continue;
5242
5243		immobile++;
5244	}
5245
5246	if (arg.pages_found == immobile)
5247		ret = 0;
5248
5249out:
5250	if (!ret) {
5251		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5252		move_freepages_block(zone, page, MIGRATE_ISOLATE);
5253	}
5254
5255	spin_unlock_irqrestore(&zone->lock, flags);
5256	if (!ret)
5257		drain_all_pages();
5258	return ret;
5259}
5260
5261void unset_migratetype_isolate(struct page *page)
5262{
5263	struct zone *zone;
5264	unsigned long flags;
5265	zone = page_zone(page);
5266	spin_lock_irqsave(&zone->lock, flags);
5267	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5268		goto out;
5269	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5270	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5271out:
5272	spin_unlock_irqrestore(&zone->lock, flags);
5273}
5274
5275#ifdef CONFIG_MEMORY_HOTREMOVE
5276/*
5277 * All pages in the range must be isolated before calling this.
5278 */
5279void
5280__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5281{
5282	struct page *page;
5283	struct zone *zone;
5284	int order, i;
5285	unsigned long pfn;
5286	unsigned long flags;
5287	/* find the first valid pfn */
5288	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5289		if (pfn_valid(pfn))
5290			break;
5291	if (pfn == end_pfn)
5292		return;
5293	zone = page_zone(pfn_to_page(pfn));
5294	spin_lock_irqsave(&zone->lock, flags);
5295	pfn = start_pfn;
5296	while (pfn < end_pfn) {
5297		if (!pfn_valid(pfn)) {
5298			pfn++;
5299			continue;
5300		}
5301		page = pfn_to_page(pfn);
5302		BUG_ON(page_count(page));
5303		BUG_ON(!PageBuddy(page));
5304		order = page_order(page);
5305#ifdef CONFIG_DEBUG_VM
5306		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5307		       pfn, 1 << order, end_pfn);
5308#endif
5309		list_del(&page->lru);
5310		rmv_page_order(page);
5311		zone->free_area[order].nr_free--;
5312		__mod_zone_page_state(zone, NR_FREE_PAGES,
5313				      - (1UL << order));
5314		for (i = 0; i < (1 << order); i++)
5315			SetPageReserved((page+i));
5316		pfn += (1 << order);
5317	}
5318	spin_unlock_irqrestore(&zone->lock, flags);
5319}
5320#endif
5321
5322#ifdef CONFIG_MEMORY_FAILURE
5323bool is_free_buddy_page(struct page *page)
5324{
5325	struct zone *zone = page_zone(page);
5326	unsigned long pfn = page_to_pfn(page);
5327	unsigned long flags;
5328	int order;
5329
5330	spin_lock_irqsave(&zone->lock, flags);
5331	for (order = 0; order < MAX_ORDER; order++) {
5332		struct page *page_head = page - (pfn & ((1 << order) - 1));
5333
5334		if (PageBuddy(page_head) && page_order(page_head) >= order)
5335			break;
5336	}
5337	spin_unlock_irqrestore(&zone->lock, flags);
5338
5339	return order < MAX_ORDER;
5340}
5341#endif
5342
5343static struct trace_print_flags pageflag_names[] = {
5344	{1UL << PG_locked,		"locked"	},
5345	{1UL << PG_error,		"error"		},
5346	{1UL << PG_referenced,		"referenced"	},
5347	{1UL << PG_uptodate,		"uptodate"	},
5348	{1UL << PG_dirty,		"dirty"		},
5349	{1UL << PG_lru,			"lru"		},
5350	{1UL << PG_active,		"active"	},
5351	{1UL << PG_slab,		"slab"		},
5352	{1UL << PG_owner_priv_1,	"owner_priv_1"	},
5353	{1UL << PG_arch_1,		"arch_1"	},
5354	{1UL << PG_reserved,		"reserved"	},
5355	{1UL << PG_private,		"private"	},
5356	{1UL << PG_private_2,		"private_2"	},
5357	{1UL << PG_writeback,		"writeback"	},
5358#ifdef CONFIG_PAGEFLAGS_EXTENDED
5359	{1UL << PG_head,		"head"		},
5360	{1UL << PG_tail,		"tail"		},
5361#else
5362	{1UL << PG_compound,		"compound"	},
5363#endif
5364	{1UL << PG_swapcache,		"swapcache"	},
5365	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
5366	{1UL << PG_reclaim,		"reclaim"	},
5367	{1UL << PG_buddy,		"buddy"		},
5368	{1UL << PG_swapbacked,		"swapbacked"	},
5369	{1UL << PG_unevictable,		"unevictable"	},
5370#ifdef CONFIG_MMU
5371	{1UL << PG_mlocked,		"mlocked"	},
5372#endif
5373#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5374	{1UL << PG_uncached,		"uncached"	},
5375#endif
5376#ifdef CONFIG_MEMORY_FAILURE
5377	{1UL << PG_hwpoison,		"hwpoison"	},
5378#endif
5379	{-1UL,				NULL		},
5380};
5381
5382static void dump_page_flags(unsigned long flags)
5383{
5384	const char *delim = "";
5385	unsigned long mask;
5386	int i;
5387
5388	printk(KERN_ALERT "page flags: %#lx(", flags);
5389
5390	/* remove zone id */
5391	flags &= (1UL << NR_PAGEFLAGS) - 1;
5392
5393	for (i = 0; pageflag_names[i].name && flags; i++) {
5394
5395		mask = pageflag_names[i].mask;
5396		if ((flags & mask) != mask)
5397			continue;
5398
5399		flags &= ~mask;
5400		printk("%s%s", delim, pageflag_names[i].name);
5401		delim = "|";
5402	}
5403
5404	/* check for left over flags */
5405	if (flags)
5406		printk("%s%#lx", delim, flags);
5407
5408	printk(")\n");
5409}
5410
5411void dump_page(struct page *page)
5412{
5413	printk(KERN_ALERT
5414	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5415		page, page_count(page), page_mapcount(page),
5416		page->mapping, page->index);
5417	dump_page_flags(page->flags);
5418}
5419