page_alloc.c revision 8d65af789f3e2cf4cfbdbf71a0f7a61ebcd41d38
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/kmemcheck.h>
27#include <linux/module.h>
28#include <linux/suspend.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h>
32#include <linux/oom.h>
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/sysctl.h>
36#include <linux/cpu.h>
37#include <linux/cpuset.h>
38#include <linux/memory_hotplug.h>
39#include <linux/nodemask.h>
40#include <linux/vmalloc.h>
41#include <linux/mempolicy.h>
42#include <linux/stop_machine.h>
43#include <linux/sort.h>
44#include <linux/pfn.h>
45#include <linux/backing-dev.h>
46#include <linux/fault-inject.h>
47#include <linux/page-isolation.h>
48#include <linux/page_cgroup.h>
49#include <linux/debugobjects.h>
50#include <linux/kmemleak.h>
51#include <trace/events/kmem.h>
52
53#include <asm/tlbflush.h>
54#include <asm/div64.h>
55#include "internal.h"
56
57/*
58 * Array of node states.
59 */
60nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
61	[N_POSSIBLE] = NODE_MASK_ALL,
62	[N_ONLINE] = { { [0] = 1UL } },
63#ifndef CONFIG_NUMA
64	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
65#ifdef CONFIG_HIGHMEM
66	[N_HIGH_MEMORY] = { { [0] = 1UL } },
67#endif
68	[N_CPU] = { { [0] = 1UL } },
69#endif	/* NUMA */
70};
71EXPORT_SYMBOL(node_states);
72
73unsigned long totalram_pages __read_mostly;
74unsigned long totalreserve_pages __read_mostly;
75int percpu_pagelist_fraction;
76gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
77
78#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
79int pageblock_order __read_mostly;
80#endif
81
82static void __free_pages_ok(struct page *page, unsigned int order);
83
84/*
85 * results with 256, 32 in the lowmem_reserve sysctl:
86 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
87 *	1G machine -> (16M dma, 784M normal, 224M high)
88 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
89 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
90 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
91 *
92 * TBD: should special case ZONE_DMA32 machines here - in those we normally
93 * don't need any ZONE_NORMAL reservation
94 */
95int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
96#ifdef CONFIG_ZONE_DMA
97	 256,
98#endif
99#ifdef CONFIG_ZONE_DMA32
100	 256,
101#endif
102#ifdef CONFIG_HIGHMEM
103	 32,
104#endif
105	 32,
106};
107
108EXPORT_SYMBOL(totalram_pages);
109
110static char * const zone_names[MAX_NR_ZONES] = {
111#ifdef CONFIG_ZONE_DMA
112	 "DMA",
113#endif
114#ifdef CONFIG_ZONE_DMA32
115	 "DMA32",
116#endif
117	 "Normal",
118#ifdef CONFIG_HIGHMEM
119	 "HighMem",
120#endif
121	 "Movable",
122};
123
124int min_free_kbytes = 1024;
125
126static unsigned long __meminitdata nr_kernel_pages;
127static unsigned long __meminitdata nr_all_pages;
128static unsigned long __meminitdata dma_reserve;
129
130#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
131  /*
132   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
133   * ranges of memory (RAM) that may be registered with add_active_range().
134   * Ranges passed to add_active_range() will be merged if possible
135   * so the number of times add_active_range() can be called is
136   * related to the number of nodes and the number of holes
137   */
138  #ifdef CONFIG_MAX_ACTIVE_REGIONS
139    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
140    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
141  #else
142    #if MAX_NUMNODES >= 32
143      /* If there can be many nodes, allow up to 50 holes per node */
144      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
145    #else
146      /* By default, allow up to 256 distinct regions */
147      #define MAX_ACTIVE_REGIONS 256
148    #endif
149  #endif
150
151  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
152  static int __meminitdata nr_nodemap_entries;
153  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
154  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
155  static unsigned long __initdata required_kernelcore;
156  static unsigned long __initdata required_movablecore;
157  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
158
159  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
160  int movable_zone;
161  EXPORT_SYMBOL(movable_zone);
162#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
163
164#if MAX_NUMNODES > 1
165int nr_node_ids __read_mostly = MAX_NUMNODES;
166int nr_online_nodes __read_mostly = 1;
167EXPORT_SYMBOL(nr_node_ids);
168EXPORT_SYMBOL(nr_online_nodes);
169#endif
170
171int page_group_by_mobility_disabled __read_mostly;
172
173static void set_pageblock_migratetype(struct page *page, int migratetype)
174{
175
176	if (unlikely(page_group_by_mobility_disabled))
177		migratetype = MIGRATE_UNMOVABLE;
178
179	set_pageblock_flags_group(page, (unsigned long)migratetype,
180					PB_migrate, PB_migrate_end);
181}
182
183bool oom_killer_disabled __read_mostly;
184
185#ifdef CONFIG_DEBUG_VM
186static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
187{
188	int ret = 0;
189	unsigned seq;
190	unsigned long pfn = page_to_pfn(page);
191
192	do {
193		seq = zone_span_seqbegin(zone);
194		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
195			ret = 1;
196		else if (pfn < zone->zone_start_pfn)
197			ret = 1;
198	} while (zone_span_seqretry(zone, seq));
199
200	return ret;
201}
202
203static int page_is_consistent(struct zone *zone, struct page *page)
204{
205	if (!pfn_valid_within(page_to_pfn(page)))
206		return 0;
207	if (zone != page_zone(page))
208		return 0;
209
210	return 1;
211}
212/*
213 * Temporary debugging check for pages not lying within a given zone.
214 */
215static int bad_range(struct zone *zone, struct page *page)
216{
217	if (page_outside_zone_boundaries(zone, page))
218		return 1;
219	if (!page_is_consistent(zone, page))
220		return 1;
221
222	return 0;
223}
224#else
225static inline int bad_range(struct zone *zone, struct page *page)
226{
227	return 0;
228}
229#endif
230
231static void bad_page(struct page *page)
232{
233	static unsigned long resume;
234	static unsigned long nr_shown;
235	static unsigned long nr_unshown;
236
237	/*
238	 * Allow a burst of 60 reports, then keep quiet for that minute;
239	 * or allow a steady drip of one report per second.
240	 */
241	if (nr_shown == 60) {
242		if (time_before(jiffies, resume)) {
243			nr_unshown++;
244			goto out;
245		}
246		if (nr_unshown) {
247			printk(KERN_ALERT
248			      "BUG: Bad page state: %lu messages suppressed\n",
249				nr_unshown);
250			nr_unshown = 0;
251		}
252		nr_shown = 0;
253	}
254	if (nr_shown++ == 0)
255		resume = jiffies + 60 * HZ;
256
257	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
258		current->comm, page_to_pfn(page));
259	printk(KERN_ALERT
260		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
261		page, (void *)page->flags, page_count(page),
262		page_mapcount(page), page->mapping, page->index);
263
264	dump_stack();
265out:
266	/* Leave bad fields for debug, except PageBuddy could make trouble */
267	__ClearPageBuddy(page);
268	add_taint(TAINT_BAD_PAGE);
269}
270
271/*
272 * Higher-order pages are called "compound pages".  They are structured thusly:
273 *
274 * The first PAGE_SIZE page is called the "head page".
275 *
276 * The remaining PAGE_SIZE pages are called "tail pages".
277 *
278 * All pages have PG_compound set.  All pages have their ->private pointing at
279 * the head page (even the head page has this).
280 *
281 * The first tail page's ->lru.next holds the address of the compound page's
282 * put_page() function.  Its ->lru.prev holds the order of allocation.
283 * This usage means that zero-order pages may not be compound.
284 */
285
286static void free_compound_page(struct page *page)
287{
288	__free_pages_ok(page, compound_order(page));
289}
290
291void prep_compound_page(struct page *page, unsigned long order)
292{
293	int i;
294	int nr_pages = 1 << order;
295
296	set_compound_page_dtor(page, free_compound_page);
297	set_compound_order(page, order);
298	__SetPageHead(page);
299	for (i = 1; i < nr_pages; i++) {
300		struct page *p = page + i;
301
302		__SetPageTail(p);
303		p->first_page = page;
304	}
305}
306
307static int destroy_compound_page(struct page *page, unsigned long order)
308{
309	int i;
310	int nr_pages = 1 << order;
311	int bad = 0;
312
313	if (unlikely(compound_order(page) != order) ||
314	    unlikely(!PageHead(page))) {
315		bad_page(page);
316		bad++;
317	}
318
319	__ClearPageHead(page);
320
321	for (i = 1; i < nr_pages; i++) {
322		struct page *p = page + i;
323
324		if (unlikely(!PageTail(p) || (p->first_page != page))) {
325			bad_page(page);
326			bad++;
327		}
328		__ClearPageTail(p);
329	}
330
331	return bad;
332}
333
334static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
335{
336	int i;
337
338	/*
339	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
340	 * and __GFP_HIGHMEM from hard or soft interrupt context.
341	 */
342	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
343	for (i = 0; i < (1 << order); i++)
344		clear_highpage(page + i);
345}
346
347static inline void set_page_order(struct page *page, int order)
348{
349	set_page_private(page, order);
350	__SetPageBuddy(page);
351}
352
353static inline void rmv_page_order(struct page *page)
354{
355	__ClearPageBuddy(page);
356	set_page_private(page, 0);
357}
358
359/*
360 * Locate the struct page for both the matching buddy in our
361 * pair (buddy1) and the combined O(n+1) page they form (page).
362 *
363 * 1) Any buddy B1 will have an order O twin B2 which satisfies
364 * the following equation:
365 *     B2 = B1 ^ (1 << O)
366 * For example, if the starting buddy (buddy2) is #8 its order
367 * 1 buddy is #10:
368 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
369 *
370 * 2) Any buddy B will have an order O+1 parent P which
371 * satisfies the following equation:
372 *     P = B & ~(1 << O)
373 *
374 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
375 */
376static inline struct page *
377__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
378{
379	unsigned long buddy_idx = page_idx ^ (1 << order);
380
381	return page + (buddy_idx - page_idx);
382}
383
384static inline unsigned long
385__find_combined_index(unsigned long page_idx, unsigned int order)
386{
387	return (page_idx & ~(1 << order));
388}
389
390/*
391 * This function checks whether a page is free && is the buddy
392 * we can do coalesce a page and its buddy if
393 * (a) the buddy is not in a hole &&
394 * (b) the buddy is in the buddy system &&
395 * (c) a page and its buddy have the same order &&
396 * (d) a page and its buddy are in the same zone.
397 *
398 * For recording whether a page is in the buddy system, we use PG_buddy.
399 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
400 *
401 * For recording page's order, we use page_private(page).
402 */
403static inline int page_is_buddy(struct page *page, struct page *buddy,
404								int order)
405{
406	if (!pfn_valid_within(page_to_pfn(buddy)))
407		return 0;
408
409	if (page_zone_id(page) != page_zone_id(buddy))
410		return 0;
411
412	if (PageBuddy(buddy) && page_order(buddy) == order) {
413		VM_BUG_ON(page_count(buddy) != 0);
414		return 1;
415	}
416	return 0;
417}
418
419/*
420 * Freeing function for a buddy system allocator.
421 *
422 * The concept of a buddy system is to maintain direct-mapped table
423 * (containing bit values) for memory blocks of various "orders".
424 * The bottom level table contains the map for the smallest allocatable
425 * units of memory (here, pages), and each level above it describes
426 * pairs of units from the levels below, hence, "buddies".
427 * At a high level, all that happens here is marking the table entry
428 * at the bottom level available, and propagating the changes upward
429 * as necessary, plus some accounting needed to play nicely with other
430 * parts of the VM system.
431 * At each level, we keep a list of pages, which are heads of continuous
432 * free pages of length of (1 << order) and marked with PG_buddy. Page's
433 * order is recorded in page_private(page) field.
434 * So when we are allocating or freeing one, we can derive the state of the
435 * other.  That is, if we allocate a small block, and both were
436 * free, the remainder of the region must be split into blocks.
437 * If a block is freed, and its buddy is also free, then this
438 * triggers coalescing into a block of larger size.
439 *
440 * -- wli
441 */
442
443static inline void __free_one_page(struct page *page,
444		struct zone *zone, unsigned int order,
445		int migratetype)
446{
447	unsigned long page_idx;
448
449	if (unlikely(PageCompound(page)))
450		if (unlikely(destroy_compound_page(page, order)))
451			return;
452
453	VM_BUG_ON(migratetype == -1);
454
455	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
456
457	VM_BUG_ON(page_idx & ((1 << order) - 1));
458	VM_BUG_ON(bad_range(zone, page));
459
460	while (order < MAX_ORDER-1) {
461		unsigned long combined_idx;
462		struct page *buddy;
463
464		buddy = __page_find_buddy(page, page_idx, order);
465		if (!page_is_buddy(page, buddy, order))
466			break;
467
468		/* Our buddy is free, merge with it and move up one order. */
469		list_del(&buddy->lru);
470		zone->free_area[order].nr_free--;
471		rmv_page_order(buddy);
472		combined_idx = __find_combined_index(page_idx, order);
473		page = page + (combined_idx - page_idx);
474		page_idx = combined_idx;
475		order++;
476	}
477	set_page_order(page, order);
478	list_add(&page->lru,
479		&zone->free_area[order].free_list[migratetype]);
480	zone->free_area[order].nr_free++;
481}
482
483#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
484/*
485 * free_page_mlock() -- clean up attempts to free and mlocked() page.
486 * Page should not be on lru, so no need to fix that up.
487 * free_pages_check() will verify...
488 */
489static inline void free_page_mlock(struct page *page)
490{
491	__dec_zone_page_state(page, NR_MLOCK);
492	__count_vm_event(UNEVICTABLE_MLOCKFREED);
493}
494#else
495static void free_page_mlock(struct page *page) { }
496#endif
497
498static inline int free_pages_check(struct page *page)
499{
500	if (unlikely(page_mapcount(page) |
501		(page->mapping != NULL)  |
502		(atomic_read(&page->_count) != 0) |
503		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
504		bad_page(page);
505		return 1;
506	}
507	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
508		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
509	return 0;
510}
511
512/*
513 * Frees a number of pages from the PCP lists
514 * Assumes all pages on list are in same zone, and of same order.
515 * count is the number of pages to free.
516 *
517 * If the zone was previously in an "all pages pinned" state then look to
518 * see if this freeing clears that state.
519 *
520 * And clear the zone's pages_scanned counter, to hold off the "all pages are
521 * pinned" detection logic.
522 */
523static void free_pcppages_bulk(struct zone *zone, int count,
524					struct per_cpu_pages *pcp)
525{
526	int migratetype = 0;
527	int batch_free = 0;
528
529	spin_lock(&zone->lock);
530	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
531	zone->pages_scanned = 0;
532
533	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
534	while (count) {
535		struct page *page;
536		struct list_head *list;
537
538		/*
539		 * Remove pages from lists in a round-robin fashion. A
540		 * batch_free count is maintained that is incremented when an
541		 * empty list is encountered.  This is so more pages are freed
542		 * off fuller lists instead of spinning excessively around empty
543		 * lists
544		 */
545		do {
546			batch_free++;
547			if (++migratetype == MIGRATE_PCPTYPES)
548				migratetype = 0;
549			list = &pcp->lists[migratetype];
550		} while (list_empty(list));
551
552		do {
553			page = list_entry(list->prev, struct page, lru);
554			/* must delete as __free_one_page list manipulates */
555			list_del(&page->lru);
556			__free_one_page(page, zone, 0, migratetype);
557			trace_mm_page_pcpu_drain(page, 0, migratetype);
558		} while (--count && --batch_free && !list_empty(list));
559	}
560	spin_unlock(&zone->lock);
561}
562
563static void free_one_page(struct zone *zone, struct page *page, int order,
564				int migratetype)
565{
566	spin_lock(&zone->lock);
567	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
568	zone->pages_scanned = 0;
569
570	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
571	__free_one_page(page, zone, order, migratetype);
572	spin_unlock(&zone->lock);
573}
574
575static void __free_pages_ok(struct page *page, unsigned int order)
576{
577	unsigned long flags;
578	int i;
579	int bad = 0;
580	int wasMlocked = __TestClearPageMlocked(page);
581
582	kmemcheck_free_shadow(page, order);
583
584	for (i = 0 ; i < (1 << order) ; ++i)
585		bad += free_pages_check(page + i);
586	if (bad)
587		return;
588
589	if (!PageHighMem(page)) {
590		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
591		debug_check_no_obj_freed(page_address(page),
592					   PAGE_SIZE << order);
593	}
594	arch_free_page(page, order);
595	kernel_map_pages(page, 1 << order, 0);
596
597	local_irq_save(flags);
598	if (unlikely(wasMlocked))
599		free_page_mlock(page);
600	__count_vm_events(PGFREE, 1 << order);
601	free_one_page(page_zone(page), page, order,
602					get_pageblock_migratetype(page));
603	local_irq_restore(flags);
604}
605
606/*
607 * permit the bootmem allocator to evade page validation on high-order frees
608 */
609void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
610{
611	if (order == 0) {
612		__ClearPageReserved(page);
613		set_page_count(page, 0);
614		set_page_refcounted(page);
615		__free_page(page);
616	} else {
617		int loop;
618
619		prefetchw(page);
620		for (loop = 0; loop < BITS_PER_LONG; loop++) {
621			struct page *p = &page[loop];
622
623			if (loop + 1 < BITS_PER_LONG)
624				prefetchw(p + 1);
625			__ClearPageReserved(p);
626			set_page_count(p, 0);
627		}
628
629		set_page_refcounted(page);
630		__free_pages(page, order);
631	}
632}
633
634
635/*
636 * The order of subdivision here is critical for the IO subsystem.
637 * Please do not alter this order without good reasons and regression
638 * testing. Specifically, as large blocks of memory are subdivided,
639 * the order in which smaller blocks are delivered depends on the order
640 * they're subdivided in this function. This is the primary factor
641 * influencing the order in which pages are delivered to the IO
642 * subsystem according to empirical testing, and this is also justified
643 * by considering the behavior of a buddy system containing a single
644 * large block of memory acted on by a series of small allocations.
645 * This behavior is a critical factor in sglist merging's success.
646 *
647 * -- wli
648 */
649static inline void expand(struct zone *zone, struct page *page,
650	int low, int high, struct free_area *area,
651	int migratetype)
652{
653	unsigned long size = 1 << high;
654
655	while (high > low) {
656		area--;
657		high--;
658		size >>= 1;
659		VM_BUG_ON(bad_range(zone, &page[size]));
660		list_add(&page[size].lru, &area->free_list[migratetype]);
661		area->nr_free++;
662		set_page_order(&page[size], high);
663	}
664}
665
666/*
667 * This page is about to be returned from the page allocator
668 */
669static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
670{
671	if (unlikely(page_mapcount(page) |
672		(page->mapping != NULL)  |
673		(atomic_read(&page->_count) != 0)  |
674		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
675		bad_page(page);
676		return 1;
677	}
678
679	set_page_private(page, 0);
680	set_page_refcounted(page);
681
682	arch_alloc_page(page, order);
683	kernel_map_pages(page, 1 << order, 1);
684
685	if (gfp_flags & __GFP_ZERO)
686		prep_zero_page(page, order, gfp_flags);
687
688	if (order && (gfp_flags & __GFP_COMP))
689		prep_compound_page(page, order);
690
691	return 0;
692}
693
694/*
695 * Go through the free lists for the given migratetype and remove
696 * the smallest available page from the freelists
697 */
698static inline
699struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
700						int migratetype)
701{
702	unsigned int current_order;
703	struct free_area * area;
704	struct page *page;
705
706	/* Find a page of the appropriate size in the preferred list */
707	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
708		area = &(zone->free_area[current_order]);
709		if (list_empty(&area->free_list[migratetype]))
710			continue;
711
712		page = list_entry(area->free_list[migratetype].next,
713							struct page, lru);
714		list_del(&page->lru);
715		rmv_page_order(page);
716		area->nr_free--;
717		expand(zone, page, order, current_order, area, migratetype);
718		return page;
719	}
720
721	return NULL;
722}
723
724
725/*
726 * This array describes the order lists are fallen back to when
727 * the free lists for the desirable migrate type are depleted
728 */
729static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
730	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
731	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
732	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
733	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
734};
735
736/*
737 * Move the free pages in a range to the free lists of the requested type.
738 * Note that start_page and end_pages are not aligned on a pageblock
739 * boundary. If alignment is required, use move_freepages_block()
740 */
741static int move_freepages(struct zone *zone,
742			  struct page *start_page, struct page *end_page,
743			  int migratetype)
744{
745	struct page *page;
746	unsigned long order;
747	int pages_moved = 0;
748
749#ifndef CONFIG_HOLES_IN_ZONE
750	/*
751	 * page_zone is not safe to call in this context when
752	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
753	 * anyway as we check zone boundaries in move_freepages_block().
754	 * Remove at a later date when no bug reports exist related to
755	 * grouping pages by mobility
756	 */
757	BUG_ON(page_zone(start_page) != page_zone(end_page));
758#endif
759
760	for (page = start_page; page <= end_page;) {
761		/* Make sure we are not inadvertently changing nodes */
762		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
763
764		if (!pfn_valid_within(page_to_pfn(page))) {
765			page++;
766			continue;
767		}
768
769		if (!PageBuddy(page)) {
770			page++;
771			continue;
772		}
773
774		order = page_order(page);
775		list_del(&page->lru);
776		list_add(&page->lru,
777			&zone->free_area[order].free_list[migratetype]);
778		page += 1 << order;
779		pages_moved += 1 << order;
780	}
781
782	return pages_moved;
783}
784
785static int move_freepages_block(struct zone *zone, struct page *page,
786				int migratetype)
787{
788	unsigned long start_pfn, end_pfn;
789	struct page *start_page, *end_page;
790
791	start_pfn = page_to_pfn(page);
792	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
793	start_page = pfn_to_page(start_pfn);
794	end_page = start_page + pageblock_nr_pages - 1;
795	end_pfn = start_pfn + pageblock_nr_pages - 1;
796
797	/* Do not cross zone boundaries */
798	if (start_pfn < zone->zone_start_pfn)
799		start_page = page;
800	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
801		return 0;
802
803	return move_freepages(zone, start_page, end_page, migratetype);
804}
805
806static void change_pageblock_range(struct page *pageblock_page,
807					int start_order, int migratetype)
808{
809	int nr_pageblocks = 1 << (start_order - pageblock_order);
810
811	while (nr_pageblocks--) {
812		set_pageblock_migratetype(pageblock_page, migratetype);
813		pageblock_page += pageblock_nr_pages;
814	}
815}
816
817/* Remove an element from the buddy allocator from the fallback list */
818static inline struct page *
819__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
820{
821	struct free_area * area;
822	int current_order;
823	struct page *page;
824	int migratetype, i;
825
826	/* Find the largest possible block of pages in the other list */
827	for (current_order = MAX_ORDER-1; current_order >= order;
828						--current_order) {
829		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
830			migratetype = fallbacks[start_migratetype][i];
831
832			/* MIGRATE_RESERVE handled later if necessary */
833			if (migratetype == MIGRATE_RESERVE)
834				continue;
835
836			area = &(zone->free_area[current_order]);
837			if (list_empty(&area->free_list[migratetype]))
838				continue;
839
840			page = list_entry(area->free_list[migratetype].next,
841					struct page, lru);
842			area->nr_free--;
843
844			/*
845			 * If breaking a large block of pages, move all free
846			 * pages to the preferred allocation list. If falling
847			 * back for a reclaimable kernel allocation, be more
848			 * agressive about taking ownership of free pages
849			 */
850			if (unlikely(current_order >= (pageblock_order >> 1)) ||
851					start_migratetype == MIGRATE_RECLAIMABLE ||
852					page_group_by_mobility_disabled) {
853				unsigned long pages;
854				pages = move_freepages_block(zone, page,
855								start_migratetype);
856
857				/* Claim the whole block if over half of it is free */
858				if (pages >= (1 << (pageblock_order-1)) ||
859						page_group_by_mobility_disabled)
860					set_pageblock_migratetype(page,
861								start_migratetype);
862
863				migratetype = start_migratetype;
864			}
865
866			/* Remove the page from the freelists */
867			list_del(&page->lru);
868			rmv_page_order(page);
869
870			/* Take ownership for orders >= pageblock_order */
871			if (current_order >= pageblock_order)
872				change_pageblock_range(page, current_order,
873							start_migratetype);
874
875			expand(zone, page, order, current_order, area, migratetype);
876
877			trace_mm_page_alloc_extfrag(page, order, current_order,
878				start_migratetype, migratetype);
879
880			return page;
881		}
882	}
883
884	return NULL;
885}
886
887/*
888 * Do the hard work of removing an element from the buddy allocator.
889 * Call me with the zone->lock already held.
890 */
891static struct page *__rmqueue(struct zone *zone, unsigned int order,
892						int migratetype)
893{
894	struct page *page;
895
896retry_reserve:
897	page = __rmqueue_smallest(zone, order, migratetype);
898
899	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
900		page = __rmqueue_fallback(zone, order, migratetype);
901
902		/*
903		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
904		 * is used because __rmqueue_smallest is an inline function
905		 * and we want just one call site
906		 */
907		if (!page) {
908			migratetype = MIGRATE_RESERVE;
909			goto retry_reserve;
910		}
911	}
912
913	trace_mm_page_alloc_zone_locked(page, order, migratetype);
914	return page;
915}
916
917/*
918 * Obtain a specified number of elements from the buddy allocator, all under
919 * a single hold of the lock, for efficiency.  Add them to the supplied list.
920 * Returns the number of new pages which were placed at *list.
921 */
922static int rmqueue_bulk(struct zone *zone, unsigned int order,
923			unsigned long count, struct list_head *list,
924			int migratetype, int cold)
925{
926	int i;
927
928	spin_lock(&zone->lock);
929	for (i = 0; i < count; ++i) {
930		struct page *page = __rmqueue(zone, order, migratetype);
931		if (unlikely(page == NULL))
932			break;
933
934		/*
935		 * Split buddy pages returned by expand() are received here
936		 * in physical page order. The page is added to the callers and
937		 * list and the list head then moves forward. From the callers
938		 * perspective, the linked list is ordered by page number in
939		 * some conditions. This is useful for IO devices that can
940		 * merge IO requests if the physical pages are ordered
941		 * properly.
942		 */
943		if (likely(cold == 0))
944			list_add(&page->lru, list);
945		else
946			list_add_tail(&page->lru, list);
947		set_page_private(page, migratetype);
948		list = &page->lru;
949	}
950	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
951	spin_unlock(&zone->lock);
952	return i;
953}
954
955#ifdef CONFIG_NUMA
956/*
957 * Called from the vmstat counter updater to drain pagesets of this
958 * currently executing processor on remote nodes after they have
959 * expired.
960 *
961 * Note that this function must be called with the thread pinned to
962 * a single processor.
963 */
964void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
965{
966	unsigned long flags;
967	int to_drain;
968
969	local_irq_save(flags);
970	if (pcp->count >= pcp->batch)
971		to_drain = pcp->batch;
972	else
973		to_drain = pcp->count;
974	free_pcppages_bulk(zone, to_drain, pcp);
975	pcp->count -= to_drain;
976	local_irq_restore(flags);
977}
978#endif
979
980/*
981 * Drain pages of the indicated processor.
982 *
983 * The processor must either be the current processor and the
984 * thread pinned to the current processor or a processor that
985 * is not online.
986 */
987static void drain_pages(unsigned int cpu)
988{
989	unsigned long flags;
990	struct zone *zone;
991
992	for_each_populated_zone(zone) {
993		struct per_cpu_pageset *pset;
994		struct per_cpu_pages *pcp;
995
996		pset = zone_pcp(zone, cpu);
997
998		pcp = &pset->pcp;
999		local_irq_save(flags);
1000		free_pcppages_bulk(zone, pcp->count, pcp);
1001		pcp->count = 0;
1002		local_irq_restore(flags);
1003	}
1004}
1005
1006/*
1007 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1008 */
1009void drain_local_pages(void *arg)
1010{
1011	drain_pages(smp_processor_id());
1012}
1013
1014/*
1015 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1016 */
1017void drain_all_pages(void)
1018{
1019	on_each_cpu(drain_local_pages, NULL, 1);
1020}
1021
1022#ifdef CONFIG_HIBERNATION
1023
1024void mark_free_pages(struct zone *zone)
1025{
1026	unsigned long pfn, max_zone_pfn;
1027	unsigned long flags;
1028	int order, t;
1029	struct list_head *curr;
1030
1031	if (!zone->spanned_pages)
1032		return;
1033
1034	spin_lock_irqsave(&zone->lock, flags);
1035
1036	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1037	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1038		if (pfn_valid(pfn)) {
1039			struct page *page = pfn_to_page(pfn);
1040
1041			if (!swsusp_page_is_forbidden(page))
1042				swsusp_unset_page_free(page);
1043		}
1044
1045	for_each_migratetype_order(order, t) {
1046		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1047			unsigned long i;
1048
1049			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1050			for (i = 0; i < (1UL << order); i++)
1051				swsusp_set_page_free(pfn_to_page(pfn + i));
1052		}
1053	}
1054	spin_unlock_irqrestore(&zone->lock, flags);
1055}
1056#endif /* CONFIG_PM */
1057
1058/*
1059 * Free a 0-order page
1060 */
1061static void free_hot_cold_page(struct page *page, int cold)
1062{
1063	struct zone *zone = page_zone(page);
1064	struct per_cpu_pages *pcp;
1065	unsigned long flags;
1066	int migratetype;
1067	int wasMlocked = __TestClearPageMlocked(page);
1068
1069	kmemcheck_free_shadow(page, 0);
1070
1071	if (PageAnon(page))
1072		page->mapping = NULL;
1073	if (free_pages_check(page))
1074		return;
1075
1076	if (!PageHighMem(page)) {
1077		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1078		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1079	}
1080	arch_free_page(page, 0);
1081	kernel_map_pages(page, 1, 0);
1082
1083	pcp = &zone_pcp(zone, get_cpu())->pcp;
1084	migratetype = get_pageblock_migratetype(page);
1085	set_page_private(page, migratetype);
1086	local_irq_save(flags);
1087	if (unlikely(wasMlocked))
1088		free_page_mlock(page);
1089	__count_vm_event(PGFREE);
1090
1091	/*
1092	 * We only track unmovable, reclaimable and movable on pcp lists.
1093	 * Free ISOLATE pages back to the allocator because they are being
1094	 * offlined but treat RESERVE as movable pages so we can get those
1095	 * areas back if necessary. Otherwise, we may have to free
1096	 * excessively into the page allocator
1097	 */
1098	if (migratetype >= MIGRATE_PCPTYPES) {
1099		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1100			free_one_page(zone, page, 0, migratetype);
1101			goto out;
1102		}
1103		migratetype = MIGRATE_MOVABLE;
1104	}
1105
1106	if (cold)
1107		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1108	else
1109		list_add(&page->lru, &pcp->lists[migratetype]);
1110	pcp->count++;
1111	if (pcp->count >= pcp->high) {
1112		free_pcppages_bulk(zone, pcp->batch, pcp);
1113		pcp->count -= pcp->batch;
1114	}
1115
1116out:
1117	local_irq_restore(flags);
1118	put_cpu();
1119}
1120
1121void free_hot_page(struct page *page)
1122{
1123	trace_mm_page_free_direct(page, 0);
1124	free_hot_cold_page(page, 0);
1125}
1126
1127/*
1128 * split_page takes a non-compound higher-order page, and splits it into
1129 * n (1<<order) sub-pages: page[0..n]
1130 * Each sub-page must be freed individually.
1131 *
1132 * Note: this is probably too low level an operation for use in drivers.
1133 * Please consult with lkml before using this in your driver.
1134 */
1135void split_page(struct page *page, unsigned int order)
1136{
1137	int i;
1138
1139	VM_BUG_ON(PageCompound(page));
1140	VM_BUG_ON(!page_count(page));
1141
1142#ifdef CONFIG_KMEMCHECK
1143	/*
1144	 * Split shadow pages too, because free(page[0]) would
1145	 * otherwise free the whole shadow.
1146	 */
1147	if (kmemcheck_page_is_tracked(page))
1148		split_page(virt_to_page(page[0].shadow), order);
1149#endif
1150
1151	for (i = 1; i < (1 << order); i++)
1152		set_page_refcounted(page + i);
1153}
1154
1155/*
1156 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1157 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1158 * or two.
1159 */
1160static inline
1161struct page *buffered_rmqueue(struct zone *preferred_zone,
1162			struct zone *zone, int order, gfp_t gfp_flags,
1163			int migratetype)
1164{
1165	unsigned long flags;
1166	struct page *page;
1167	int cold = !!(gfp_flags & __GFP_COLD);
1168	int cpu;
1169
1170again:
1171	cpu  = get_cpu();
1172	if (likely(order == 0)) {
1173		struct per_cpu_pages *pcp;
1174		struct list_head *list;
1175
1176		pcp = &zone_pcp(zone, cpu)->pcp;
1177		list = &pcp->lists[migratetype];
1178		local_irq_save(flags);
1179		if (list_empty(list)) {
1180			pcp->count += rmqueue_bulk(zone, 0,
1181					pcp->batch, list,
1182					migratetype, cold);
1183			if (unlikely(list_empty(list)))
1184				goto failed;
1185		}
1186
1187		if (cold)
1188			page = list_entry(list->prev, struct page, lru);
1189		else
1190			page = list_entry(list->next, struct page, lru);
1191
1192		list_del(&page->lru);
1193		pcp->count--;
1194	} else {
1195		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1196			/*
1197			 * __GFP_NOFAIL is not to be used in new code.
1198			 *
1199			 * All __GFP_NOFAIL callers should be fixed so that they
1200			 * properly detect and handle allocation failures.
1201			 *
1202			 * We most definitely don't want callers attempting to
1203			 * allocate greater than order-1 page units with
1204			 * __GFP_NOFAIL.
1205			 */
1206			WARN_ON_ONCE(order > 1);
1207		}
1208		spin_lock_irqsave(&zone->lock, flags);
1209		page = __rmqueue(zone, order, migratetype);
1210		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1211		spin_unlock(&zone->lock);
1212		if (!page)
1213			goto failed;
1214	}
1215
1216	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1217	zone_statistics(preferred_zone, zone);
1218	local_irq_restore(flags);
1219	put_cpu();
1220
1221	VM_BUG_ON(bad_range(zone, page));
1222	if (prep_new_page(page, order, gfp_flags))
1223		goto again;
1224	return page;
1225
1226failed:
1227	local_irq_restore(flags);
1228	put_cpu();
1229	return NULL;
1230}
1231
1232/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1233#define ALLOC_WMARK_MIN		WMARK_MIN
1234#define ALLOC_WMARK_LOW		WMARK_LOW
1235#define ALLOC_WMARK_HIGH	WMARK_HIGH
1236#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1237
1238/* Mask to get the watermark bits */
1239#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1240
1241#define ALLOC_HARDER		0x10 /* try to alloc harder */
1242#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1243#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1244
1245#ifdef CONFIG_FAIL_PAGE_ALLOC
1246
1247static struct fail_page_alloc_attr {
1248	struct fault_attr attr;
1249
1250	u32 ignore_gfp_highmem;
1251	u32 ignore_gfp_wait;
1252	u32 min_order;
1253
1254#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1255
1256	struct dentry *ignore_gfp_highmem_file;
1257	struct dentry *ignore_gfp_wait_file;
1258	struct dentry *min_order_file;
1259
1260#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1261
1262} fail_page_alloc = {
1263	.attr = FAULT_ATTR_INITIALIZER,
1264	.ignore_gfp_wait = 1,
1265	.ignore_gfp_highmem = 1,
1266	.min_order = 1,
1267};
1268
1269static int __init setup_fail_page_alloc(char *str)
1270{
1271	return setup_fault_attr(&fail_page_alloc.attr, str);
1272}
1273__setup("fail_page_alloc=", setup_fail_page_alloc);
1274
1275static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1276{
1277	if (order < fail_page_alloc.min_order)
1278		return 0;
1279	if (gfp_mask & __GFP_NOFAIL)
1280		return 0;
1281	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1282		return 0;
1283	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1284		return 0;
1285
1286	return should_fail(&fail_page_alloc.attr, 1 << order);
1287}
1288
1289#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1290
1291static int __init fail_page_alloc_debugfs(void)
1292{
1293	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1294	struct dentry *dir;
1295	int err;
1296
1297	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1298				       "fail_page_alloc");
1299	if (err)
1300		return err;
1301	dir = fail_page_alloc.attr.dentries.dir;
1302
1303	fail_page_alloc.ignore_gfp_wait_file =
1304		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1305				      &fail_page_alloc.ignore_gfp_wait);
1306
1307	fail_page_alloc.ignore_gfp_highmem_file =
1308		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1309				      &fail_page_alloc.ignore_gfp_highmem);
1310	fail_page_alloc.min_order_file =
1311		debugfs_create_u32("min-order", mode, dir,
1312				   &fail_page_alloc.min_order);
1313
1314	if (!fail_page_alloc.ignore_gfp_wait_file ||
1315            !fail_page_alloc.ignore_gfp_highmem_file ||
1316            !fail_page_alloc.min_order_file) {
1317		err = -ENOMEM;
1318		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1319		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1320		debugfs_remove(fail_page_alloc.min_order_file);
1321		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1322	}
1323
1324	return err;
1325}
1326
1327late_initcall(fail_page_alloc_debugfs);
1328
1329#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1330
1331#else /* CONFIG_FAIL_PAGE_ALLOC */
1332
1333static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1334{
1335	return 0;
1336}
1337
1338#endif /* CONFIG_FAIL_PAGE_ALLOC */
1339
1340/*
1341 * Return 1 if free pages are above 'mark'. This takes into account the order
1342 * of the allocation.
1343 */
1344int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1345		      int classzone_idx, int alloc_flags)
1346{
1347	/* free_pages my go negative - that's OK */
1348	long min = mark;
1349	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1350	int o;
1351
1352	if (alloc_flags & ALLOC_HIGH)
1353		min -= min / 2;
1354	if (alloc_flags & ALLOC_HARDER)
1355		min -= min / 4;
1356
1357	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1358		return 0;
1359	for (o = 0; o < order; o++) {
1360		/* At the next order, this order's pages become unavailable */
1361		free_pages -= z->free_area[o].nr_free << o;
1362
1363		/* Require fewer higher order pages to be free */
1364		min >>= 1;
1365
1366		if (free_pages <= min)
1367			return 0;
1368	}
1369	return 1;
1370}
1371
1372#ifdef CONFIG_NUMA
1373/*
1374 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1375 * skip over zones that are not allowed by the cpuset, or that have
1376 * been recently (in last second) found to be nearly full.  See further
1377 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1378 * that have to skip over a lot of full or unallowed zones.
1379 *
1380 * If the zonelist cache is present in the passed in zonelist, then
1381 * returns a pointer to the allowed node mask (either the current
1382 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1383 *
1384 * If the zonelist cache is not available for this zonelist, does
1385 * nothing and returns NULL.
1386 *
1387 * If the fullzones BITMAP in the zonelist cache is stale (more than
1388 * a second since last zap'd) then we zap it out (clear its bits.)
1389 *
1390 * We hold off even calling zlc_setup, until after we've checked the
1391 * first zone in the zonelist, on the theory that most allocations will
1392 * be satisfied from that first zone, so best to examine that zone as
1393 * quickly as we can.
1394 */
1395static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1396{
1397	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1398	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1399
1400	zlc = zonelist->zlcache_ptr;
1401	if (!zlc)
1402		return NULL;
1403
1404	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1405		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1406		zlc->last_full_zap = jiffies;
1407	}
1408
1409	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1410					&cpuset_current_mems_allowed :
1411					&node_states[N_HIGH_MEMORY];
1412	return allowednodes;
1413}
1414
1415/*
1416 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1417 * if it is worth looking at further for free memory:
1418 *  1) Check that the zone isn't thought to be full (doesn't have its
1419 *     bit set in the zonelist_cache fullzones BITMAP).
1420 *  2) Check that the zones node (obtained from the zonelist_cache
1421 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1422 * Return true (non-zero) if zone is worth looking at further, or
1423 * else return false (zero) if it is not.
1424 *
1425 * This check -ignores- the distinction between various watermarks,
1426 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1427 * found to be full for any variation of these watermarks, it will
1428 * be considered full for up to one second by all requests, unless
1429 * we are so low on memory on all allowed nodes that we are forced
1430 * into the second scan of the zonelist.
1431 *
1432 * In the second scan we ignore this zonelist cache and exactly
1433 * apply the watermarks to all zones, even it is slower to do so.
1434 * We are low on memory in the second scan, and should leave no stone
1435 * unturned looking for a free page.
1436 */
1437static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1438						nodemask_t *allowednodes)
1439{
1440	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1441	int i;				/* index of *z in zonelist zones */
1442	int n;				/* node that zone *z is on */
1443
1444	zlc = zonelist->zlcache_ptr;
1445	if (!zlc)
1446		return 1;
1447
1448	i = z - zonelist->_zonerefs;
1449	n = zlc->z_to_n[i];
1450
1451	/* This zone is worth trying if it is allowed but not full */
1452	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1453}
1454
1455/*
1456 * Given 'z' scanning a zonelist, set the corresponding bit in
1457 * zlc->fullzones, so that subsequent attempts to allocate a page
1458 * from that zone don't waste time re-examining it.
1459 */
1460static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1461{
1462	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1463	int i;				/* index of *z in zonelist zones */
1464
1465	zlc = zonelist->zlcache_ptr;
1466	if (!zlc)
1467		return;
1468
1469	i = z - zonelist->_zonerefs;
1470
1471	set_bit(i, zlc->fullzones);
1472}
1473
1474#else	/* CONFIG_NUMA */
1475
1476static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1477{
1478	return NULL;
1479}
1480
1481static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1482				nodemask_t *allowednodes)
1483{
1484	return 1;
1485}
1486
1487static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1488{
1489}
1490#endif	/* CONFIG_NUMA */
1491
1492/*
1493 * get_page_from_freelist goes through the zonelist trying to allocate
1494 * a page.
1495 */
1496static struct page *
1497get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1498		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1499		struct zone *preferred_zone, int migratetype)
1500{
1501	struct zoneref *z;
1502	struct page *page = NULL;
1503	int classzone_idx;
1504	struct zone *zone;
1505	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1506	int zlc_active = 0;		/* set if using zonelist_cache */
1507	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1508
1509	classzone_idx = zone_idx(preferred_zone);
1510zonelist_scan:
1511	/*
1512	 * Scan zonelist, looking for a zone with enough free.
1513	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1514	 */
1515	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1516						high_zoneidx, nodemask) {
1517		if (NUMA_BUILD && zlc_active &&
1518			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1519				continue;
1520		if ((alloc_flags & ALLOC_CPUSET) &&
1521			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1522				goto try_next_zone;
1523
1524		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1525		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1526			unsigned long mark;
1527			int ret;
1528
1529			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1530			if (zone_watermark_ok(zone, order, mark,
1531				    classzone_idx, alloc_flags))
1532				goto try_this_zone;
1533
1534			if (zone_reclaim_mode == 0)
1535				goto this_zone_full;
1536
1537			ret = zone_reclaim(zone, gfp_mask, order);
1538			switch (ret) {
1539			case ZONE_RECLAIM_NOSCAN:
1540				/* did not scan */
1541				goto try_next_zone;
1542			case ZONE_RECLAIM_FULL:
1543				/* scanned but unreclaimable */
1544				goto this_zone_full;
1545			default:
1546				/* did we reclaim enough */
1547				if (!zone_watermark_ok(zone, order, mark,
1548						classzone_idx, alloc_flags))
1549					goto this_zone_full;
1550			}
1551		}
1552
1553try_this_zone:
1554		page = buffered_rmqueue(preferred_zone, zone, order,
1555						gfp_mask, migratetype);
1556		if (page)
1557			break;
1558this_zone_full:
1559		if (NUMA_BUILD)
1560			zlc_mark_zone_full(zonelist, z);
1561try_next_zone:
1562		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1563			/*
1564			 * we do zlc_setup after the first zone is tried but only
1565			 * if there are multiple nodes make it worthwhile
1566			 */
1567			allowednodes = zlc_setup(zonelist, alloc_flags);
1568			zlc_active = 1;
1569			did_zlc_setup = 1;
1570		}
1571	}
1572
1573	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1574		/* Disable zlc cache for second zonelist scan */
1575		zlc_active = 0;
1576		goto zonelist_scan;
1577	}
1578	return page;
1579}
1580
1581static inline int
1582should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1583				unsigned long pages_reclaimed)
1584{
1585	/* Do not loop if specifically requested */
1586	if (gfp_mask & __GFP_NORETRY)
1587		return 0;
1588
1589	/*
1590	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1591	 * means __GFP_NOFAIL, but that may not be true in other
1592	 * implementations.
1593	 */
1594	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1595		return 1;
1596
1597	/*
1598	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1599	 * specified, then we retry until we no longer reclaim any pages
1600	 * (above), or we've reclaimed an order of pages at least as
1601	 * large as the allocation's order. In both cases, if the
1602	 * allocation still fails, we stop retrying.
1603	 */
1604	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1605		return 1;
1606
1607	/*
1608	 * Don't let big-order allocations loop unless the caller
1609	 * explicitly requests that.
1610	 */
1611	if (gfp_mask & __GFP_NOFAIL)
1612		return 1;
1613
1614	return 0;
1615}
1616
1617static inline struct page *
1618__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1619	struct zonelist *zonelist, enum zone_type high_zoneidx,
1620	nodemask_t *nodemask, struct zone *preferred_zone,
1621	int migratetype)
1622{
1623	struct page *page;
1624
1625	/* Acquire the OOM killer lock for the zones in zonelist */
1626	if (!try_set_zone_oom(zonelist, gfp_mask)) {
1627		schedule_timeout_uninterruptible(1);
1628		return NULL;
1629	}
1630
1631	/*
1632	 * Go through the zonelist yet one more time, keep very high watermark
1633	 * here, this is only to catch a parallel oom killing, we must fail if
1634	 * we're still under heavy pressure.
1635	 */
1636	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1637		order, zonelist, high_zoneidx,
1638		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1639		preferred_zone, migratetype);
1640	if (page)
1641		goto out;
1642
1643	/* The OOM killer will not help higher order allocs */
1644	if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
1645		goto out;
1646
1647	/* Exhausted what can be done so it's blamo time */
1648	out_of_memory(zonelist, gfp_mask, order);
1649
1650out:
1651	clear_zonelist_oom(zonelist, gfp_mask);
1652	return page;
1653}
1654
1655/* The really slow allocator path where we enter direct reclaim */
1656static inline struct page *
1657__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1658	struct zonelist *zonelist, enum zone_type high_zoneidx,
1659	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1660	int migratetype, unsigned long *did_some_progress)
1661{
1662	struct page *page = NULL;
1663	struct reclaim_state reclaim_state;
1664	struct task_struct *p = current;
1665
1666	cond_resched();
1667
1668	/* We now go into synchronous reclaim */
1669	cpuset_memory_pressure_bump();
1670	p->flags |= PF_MEMALLOC;
1671	lockdep_set_current_reclaim_state(gfp_mask);
1672	reclaim_state.reclaimed_slab = 0;
1673	p->reclaim_state = &reclaim_state;
1674
1675	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1676
1677	p->reclaim_state = NULL;
1678	lockdep_clear_current_reclaim_state();
1679	p->flags &= ~PF_MEMALLOC;
1680
1681	cond_resched();
1682
1683	if (order != 0)
1684		drain_all_pages();
1685
1686	if (likely(*did_some_progress))
1687		page = get_page_from_freelist(gfp_mask, nodemask, order,
1688					zonelist, high_zoneidx,
1689					alloc_flags, preferred_zone,
1690					migratetype);
1691	return page;
1692}
1693
1694/*
1695 * This is called in the allocator slow-path if the allocation request is of
1696 * sufficient urgency to ignore watermarks and take other desperate measures
1697 */
1698static inline struct page *
1699__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1700	struct zonelist *zonelist, enum zone_type high_zoneidx,
1701	nodemask_t *nodemask, struct zone *preferred_zone,
1702	int migratetype)
1703{
1704	struct page *page;
1705
1706	do {
1707		page = get_page_from_freelist(gfp_mask, nodemask, order,
1708			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1709			preferred_zone, migratetype);
1710
1711		if (!page && gfp_mask & __GFP_NOFAIL)
1712			congestion_wait(BLK_RW_ASYNC, HZ/50);
1713	} while (!page && (gfp_mask & __GFP_NOFAIL));
1714
1715	return page;
1716}
1717
1718static inline
1719void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1720						enum zone_type high_zoneidx)
1721{
1722	struct zoneref *z;
1723	struct zone *zone;
1724
1725	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1726		wakeup_kswapd(zone, order);
1727}
1728
1729static inline int
1730gfp_to_alloc_flags(gfp_t gfp_mask)
1731{
1732	struct task_struct *p = current;
1733	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1734	const gfp_t wait = gfp_mask & __GFP_WAIT;
1735
1736	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1737	BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1738
1739	/*
1740	 * The caller may dip into page reserves a bit more if the caller
1741	 * cannot run direct reclaim, or if the caller has realtime scheduling
1742	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1743	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1744	 */
1745	alloc_flags |= (gfp_mask & __GFP_HIGH);
1746
1747	if (!wait) {
1748		alloc_flags |= ALLOC_HARDER;
1749		/*
1750		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1751		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1752		 */
1753		alloc_flags &= ~ALLOC_CPUSET;
1754	} else if (unlikely(rt_task(p)))
1755		alloc_flags |= ALLOC_HARDER;
1756
1757	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1758		if (!in_interrupt() &&
1759		    ((p->flags & PF_MEMALLOC) ||
1760		     unlikely(test_thread_flag(TIF_MEMDIE))))
1761			alloc_flags |= ALLOC_NO_WATERMARKS;
1762	}
1763
1764	return alloc_flags;
1765}
1766
1767static inline struct page *
1768__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1769	struct zonelist *zonelist, enum zone_type high_zoneidx,
1770	nodemask_t *nodemask, struct zone *preferred_zone,
1771	int migratetype)
1772{
1773	const gfp_t wait = gfp_mask & __GFP_WAIT;
1774	struct page *page = NULL;
1775	int alloc_flags;
1776	unsigned long pages_reclaimed = 0;
1777	unsigned long did_some_progress;
1778	struct task_struct *p = current;
1779
1780	/*
1781	 * In the slowpath, we sanity check order to avoid ever trying to
1782	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1783	 * be using allocators in order of preference for an area that is
1784	 * too large.
1785	 */
1786	if (order >= MAX_ORDER) {
1787		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1788		return NULL;
1789	}
1790
1791	/*
1792	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1793	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1794	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1795	 * using a larger set of nodes after it has established that the
1796	 * allowed per node queues are empty and that nodes are
1797	 * over allocated.
1798	 */
1799	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1800		goto nopage;
1801
1802	wake_all_kswapd(order, zonelist, high_zoneidx);
1803
1804restart:
1805	/*
1806	 * OK, we're below the kswapd watermark and have kicked background
1807	 * reclaim. Now things get more complex, so set up alloc_flags according
1808	 * to how we want to proceed.
1809	 */
1810	alloc_flags = gfp_to_alloc_flags(gfp_mask);
1811
1812	/* This is the last chance, in general, before the goto nopage. */
1813	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1814			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1815			preferred_zone, migratetype);
1816	if (page)
1817		goto got_pg;
1818
1819rebalance:
1820	/* Allocate without watermarks if the context allows */
1821	if (alloc_flags & ALLOC_NO_WATERMARKS) {
1822		page = __alloc_pages_high_priority(gfp_mask, order,
1823				zonelist, high_zoneidx, nodemask,
1824				preferred_zone, migratetype);
1825		if (page)
1826			goto got_pg;
1827	}
1828
1829	/* Atomic allocations - we can't balance anything */
1830	if (!wait)
1831		goto nopage;
1832
1833	/* Avoid recursion of direct reclaim */
1834	if (p->flags & PF_MEMALLOC)
1835		goto nopage;
1836
1837	/* Avoid allocations with no watermarks from looping endlessly */
1838	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1839		goto nopage;
1840
1841	/* Try direct reclaim and then allocating */
1842	page = __alloc_pages_direct_reclaim(gfp_mask, order,
1843					zonelist, high_zoneidx,
1844					nodemask,
1845					alloc_flags, preferred_zone,
1846					migratetype, &did_some_progress);
1847	if (page)
1848		goto got_pg;
1849
1850	/*
1851	 * If we failed to make any progress reclaiming, then we are
1852	 * running out of options and have to consider going OOM
1853	 */
1854	if (!did_some_progress) {
1855		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1856			if (oom_killer_disabled)
1857				goto nopage;
1858			page = __alloc_pages_may_oom(gfp_mask, order,
1859					zonelist, high_zoneidx,
1860					nodemask, preferred_zone,
1861					migratetype);
1862			if (page)
1863				goto got_pg;
1864
1865			/*
1866			 * The OOM killer does not trigger for high-order
1867			 * ~__GFP_NOFAIL allocations so if no progress is being
1868			 * made, there are no other options and retrying is
1869			 * unlikely to help.
1870			 */
1871			if (order > PAGE_ALLOC_COSTLY_ORDER &&
1872						!(gfp_mask & __GFP_NOFAIL))
1873				goto nopage;
1874
1875			goto restart;
1876		}
1877	}
1878
1879	/* Check if we should retry the allocation */
1880	pages_reclaimed += did_some_progress;
1881	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1882		/* Wait for some write requests to complete then retry */
1883		congestion_wait(BLK_RW_ASYNC, HZ/50);
1884		goto rebalance;
1885	}
1886
1887nopage:
1888	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1889		printk(KERN_WARNING "%s: page allocation failure."
1890			" order:%d, mode:0x%x\n",
1891			p->comm, order, gfp_mask);
1892		dump_stack();
1893		show_mem();
1894	}
1895	return page;
1896got_pg:
1897	if (kmemcheck_enabled)
1898		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1899	return page;
1900
1901}
1902
1903/*
1904 * This is the 'heart' of the zoned buddy allocator.
1905 */
1906struct page *
1907__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1908			struct zonelist *zonelist, nodemask_t *nodemask)
1909{
1910	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1911	struct zone *preferred_zone;
1912	struct page *page;
1913	int migratetype = allocflags_to_migratetype(gfp_mask);
1914
1915	gfp_mask &= gfp_allowed_mask;
1916
1917	lockdep_trace_alloc(gfp_mask);
1918
1919	might_sleep_if(gfp_mask & __GFP_WAIT);
1920
1921	if (should_fail_alloc_page(gfp_mask, order))
1922		return NULL;
1923
1924	/*
1925	 * Check the zones suitable for the gfp_mask contain at least one
1926	 * valid zone. It's possible to have an empty zonelist as a result
1927	 * of GFP_THISNODE and a memoryless node
1928	 */
1929	if (unlikely(!zonelist->_zonerefs->zone))
1930		return NULL;
1931
1932	/* The preferred zone is used for statistics later */
1933	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1934	if (!preferred_zone)
1935		return NULL;
1936
1937	/* First allocation attempt */
1938	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1939			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1940			preferred_zone, migratetype);
1941	if (unlikely(!page))
1942		page = __alloc_pages_slowpath(gfp_mask, order,
1943				zonelist, high_zoneidx, nodemask,
1944				preferred_zone, migratetype);
1945
1946	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
1947	return page;
1948}
1949EXPORT_SYMBOL(__alloc_pages_nodemask);
1950
1951/*
1952 * Common helper functions.
1953 */
1954unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1955{
1956	struct page *page;
1957
1958	/*
1959	 * __get_free_pages() returns a 32-bit address, which cannot represent
1960	 * a highmem page
1961	 */
1962	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1963
1964	page = alloc_pages(gfp_mask, order);
1965	if (!page)
1966		return 0;
1967	return (unsigned long) page_address(page);
1968}
1969EXPORT_SYMBOL(__get_free_pages);
1970
1971unsigned long get_zeroed_page(gfp_t gfp_mask)
1972{
1973	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1974}
1975EXPORT_SYMBOL(get_zeroed_page);
1976
1977void __pagevec_free(struct pagevec *pvec)
1978{
1979	int i = pagevec_count(pvec);
1980
1981	while (--i >= 0) {
1982		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
1983		free_hot_cold_page(pvec->pages[i], pvec->cold);
1984	}
1985}
1986
1987void __free_pages(struct page *page, unsigned int order)
1988{
1989	if (put_page_testzero(page)) {
1990		trace_mm_page_free_direct(page, order);
1991		if (order == 0)
1992			free_hot_page(page);
1993		else
1994			__free_pages_ok(page, order);
1995	}
1996}
1997
1998EXPORT_SYMBOL(__free_pages);
1999
2000void free_pages(unsigned long addr, unsigned int order)
2001{
2002	if (addr != 0) {
2003		VM_BUG_ON(!virt_addr_valid((void *)addr));
2004		__free_pages(virt_to_page((void *)addr), order);
2005	}
2006}
2007
2008EXPORT_SYMBOL(free_pages);
2009
2010/**
2011 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2012 * @size: the number of bytes to allocate
2013 * @gfp_mask: GFP flags for the allocation
2014 *
2015 * This function is similar to alloc_pages(), except that it allocates the
2016 * minimum number of pages to satisfy the request.  alloc_pages() can only
2017 * allocate memory in power-of-two pages.
2018 *
2019 * This function is also limited by MAX_ORDER.
2020 *
2021 * Memory allocated by this function must be released by free_pages_exact().
2022 */
2023void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2024{
2025	unsigned int order = get_order(size);
2026	unsigned long addr;
2027
2028	addr = __get_free_pages(gfp_mask, order);
2029	if (addr) {
2030		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2031		unsigned long used = addr + PAGE_ALIGN(size);
2032
2033		split_page(virt_to_page((void *)addr), order);
2034		while (used < alloc_end) {
2035			free_page(used);
2036			used += PAGE_SIZE;
2037		}
2038	}
2039
2040	return (void *)addr;
2041}
2042EXPORT_SYMBOL(alloc_pages_exact);
2043
2044/**
2045 * free_pages_exact - release memory allocated via alloc_pages_exact()
2046 * @virt: the value returned by alloc_pages_exact.
2047 * @size: size of allocation, same value as passed to alloc_pages_exact().
2048 *
2049 * Release the memory allocated by a previous call to alloc_pages_exact.
2050 */
2051void free_pages_exact(void *virt, size_t size)
2052{
2053	unsigned long addr = (unsigned long)virt;
2054	unsigned long end = addr + PAGE_ALIGN(size);
2055
2056	while (addr < end) {
2057		free_page(addr);
2058		addr += PAGE_SIZE;
2059	}
2060}
2061EXPORT_SYMBOL(free_pages_exact);
2062
2063static unsigned int nr_free_zone_pages(int offset)
2064{
2065	struct zoneref *z;
2066	struct zone *zone;
2067
2068	/* Just pick one node, since fallback list is circular */
2069	unsigned int sum = 0;
2070
2071	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2072
2073	for_each_zone_zonelist(zone, z, zonelist, offset) {
2074		unsigned long size = zone->present_pages;
2075		unsigned long high = high_wmark_pages(zone);
2076		if (size > high)
2077			sum += size - high;
2078	}
2079
2080	return sum;
2081}
2082
2083/*
2084 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2085 */
2086unsigned int nr_free_buffer_pages(void)
2087{
2088	return nr_free_zone_pages(gfp_zone(GFP_USER));
2089}
2090EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2091
2092/*
2093 * Amount of free RAM allocatable within all zones
2094 */
2095unsigned int nr_free_pagecache_pages(void)
2096{
2097	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2098}
2099
2100static inline void show_node(struct zone *zone)
2101{
2102	if (NUMA_BUILD)
2103		printk("Node %d ", zone_to_nid(zone));
2104}
2105
2106void si_meminfo(struct sysinfo *val)
2107{
2108	val->totalram = totalram_pages;
2109	val->sharedram = 0;
2110	val->freeram = global_page_state(NR_FREE_PAGES);
2111	val->bufferram = nr_blockdev_pages();
2112	val->totalhigh = totalhigh_pages;
2113	val->freehigh = nr_free_highpages();
2114	val->mem_unit = PAGE_SIZE;
2115}
2116
2117EXPORT_SYMBOL(si_meminfo);
2118
2119#ifdef CONFIG_NUMA
2120void si_meminfo_node(struct sysinfo *val, int nid)
2121{
2122	pg_data_t *pgdat = NODE_DATA(nid);
2123
2124	val->totalram = pgdat->node_present_pages;
2125	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2126#ifdef CONFIG_HIGHMEM
2127	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2128	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2129			NR_FREE_PAGES);
2130#else
2131	val->totalhigh = 0;
2132	val->freehigh = 0;
2133#endif
2134	val->mem_unit = PAGE_SIZE;
2135}
2136#endif
2137
2138#define K(x) ((x) << (PAGE_SHIFT-10))
2139
2140/*
2141 * Show free area list (used inside shift_scroll-lock stuff)
2142 * We also calculate the percentage fragmentation. We do this by counting the
2143 * memory on each free list with the exception of the first item on the list.
2144 */
2145void show_free_areas(void)
2146{
2147	int cpu;
2148	struct zone *zone;
2149
2150	for_each_populated_zone(zone) {
2151		show_node(zone);
2152		printk("%s per-cpu:\n", zone->name);
2153
2154		for_each_online_cpu(cpu) {
2155			struct per_cpu_pageset *pageset;
2156
2157			pageset = zone_pcp(zone, cpu);
2158
2159			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2160			       cpu, pageset->pcp.high,
2161			       pageset->pcp.batch, pageset->pcp.count);
2162		}
2163	}
2164
2165	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2166		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2167		" unevictable:%lu"
2168		" dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
2169		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2170		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2171		global_page_state(NR_ACTIVE_ANON),
2172		global_page_state(NR_INACTIVE_ANON),
2173		global_page_state(NR_ISOLATED_ANON),
2174		global_page_state(NR_ACTIVE_FILE),
2175		global_page_state(NR_INACTIVE_FILE),
2176		global_page_state(NR_ISOLATED_FILE),
2177		global_page_state(NR_UNEVICTABLE),
2178		global_page_state(NR_FILE_DIRTY),
2179		global_page_state(NR_WRITEBACK),
2180		global_page_state(NR_UNSTABLE_NFS),
2181		nr_blockdev_pages(),
2182		global_page_state(NR_FREE_PAGES),
2183		global_page_state(NR_SLAB_RECLAIMABLE),
2184		global_page_state(NR_SLAB_UNRECLAIMABLE),
2185		global_page_state(NR_FILE_MAPPED),
2186		global_page_state(NR_SHMEM),
2187		global_page_state(NR_PAGETABLE),
2188		global_page_state(NR_BOUNCE));
2189
2190	for_each_populated_zone(zone) {
2191		int i;
2192
2193		show_node(zone);
2194		printk("%s"
2195			" free:%lukB"
2196			" min:%lukB"
2197			" low:%lukB"
2198			" high:%lukB"
2199			" active_anon:%lukB"
2200			" inactive_anon:%lukB"
2201			" active_file:%lukB"
2202			" inactive_file:%lukB"
2203			" unevictable:%lukB"
2204			" isolated(anon):%lukB"
2205			" isolated(file):%lukB"
2206			" present:%lukB"
2207			" mlocked:%lukB"
2208			" dirty:%lukB"
2209			" writeback:%lukB"
2210			" mapped:%lukB"
2211			" shmem:%lukB"
2212			" slab_reclaimable:%lukB"
2213			" slab_unreclaimable:%lukB"
2214			" kernel_stack:%lukB"
2215			" pagetables:%lukB"
2216			" unstable:%lukB"
2217			" bounce:%lukB"
2218			" writeback_tmp:%lukB"
2219			" pages_scanned:%lu"
2220			" all_unreclaimable? %s"
2221			"\n",
2222			zone->name,
2223			K(zone_page_state(zone, NR_FREE_PAGES)),
2224			K(min_wmark_pages(zone)),
2225			K(low_wmark_pages(zone)),
2226			K(high_wmark_pages(zone)),
2227			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2228			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2229			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2230			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2231			K(zone_page_state(zone, NR_UNEVICTABLE)),
2232			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2233			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2234			K(zone->present_pages),
2235			K(zone_page_state(zone, NR_MLOCK)),
2236			K(zone_page_state(zone, NR_FILE_DIRTY)),
2237			K(zone_page_state(zone, NR_WRITEBACK)),
2238			K(zone_page_state(zone, NR_FILE_MAPPED)),
2239			K(zone_page_state(zone, NR_SHMEM)),
2240			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2241			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2242			zone_page_state(zone, NR_KERNEL_STACK) *
2243				THREAD_SIZE / 1024,
2244			K(zone_page_state(zone, NR_PAGETABLE)),
2245			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2246			K(zone_page_state(zone, NR_BOUNCE)),
2247			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2248			zone->pages_scanned,
2249			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
2250			);
2251		printk("lowmem_reserve[]:");
2252		for (i = 0; i < MAX_NR_ZONES; i++)
2253			printk(" %lu", zone->lowmem_reserve[i]);
2254		printk("\n");
2255	}
2256
2257	for_each_populated_zone(zone) {
2258 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2259
2260		show_node(zone);
2261		printk("%s: ", zone->name);
2262
2263		spin_lock_irqsave(&zone->lock, flags);
2264		for (order = 0; order < MAX_ORDER; order++) {
2265			nr[order] = zone->free_area[order].nr_free;
2266			total += nr[order] << order;
2267		}
2268		spin_unlock_irqrestore(&zone->lock, flags);
2269		for (order = 0; order < MAX_ORDER; order++)
2270			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2271		printk("= %lukB\n", K(total));
2272	}
2273
2274	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2275
2276	show_swap_cache_info();
2277}
2278
2279static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2280{
2281	zoneref->zone = zone;
2282	zoneref->zone_idx = zone_idx(zone);
2283}
2284
2285/*
2286 * Builds allocation fallback zone lists.
2287 *
2288 * Add all populated zones of a node to the zonelist.
2289 */
2290static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2291				int nr_zones, enum zone_type zone_type)
2292{
2293	struct zone *zone;
2294
2295	BUG_ON(zone_type >= MAX_NR_ZONES);
2296	zone_type++;
2297
2298	do {
2299		zone_type--;
2300		zone = pgdat->node_zones + zone_type;
2301		if (populated_zone(zone)) {
2302			zoneref_set_zone(zone,
2303				&zonelist->_zonerefs[nr_zones++]);
2304			check_highest_zone(zone_type);
2305		}
2306
2307	} while (zone_type);
2308	return nr_zones;
2309}
2310
2311
2312/*
2313 *  zonelist_order:
2314 *  0 = automatic detection of better ordering.
2315 *  1 = order by ([node] distance, -zonetype)
2316 *  2 = order by (-zonetype, [node] distance)
2317 *
2318 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2319 *  the same zonelist. So only NUMA can configure this param.
2320 */
2321#define ZONELIST_ORDER_DEFAULT  0
2322#define ZONELIST_ORDER_NODE     1
2323#define ZONELIST_ORDER_ZONE     2
2324
2325/* zonelist order in the kernel.
2326 * set_zonelist_order() will set this to NODE or ZONE.
2327 */
2328static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2329static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2330
2331
2332#ifdef CONFIG_NUMA
2333/* The value user specified ....changed by config */
2334static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2335/* string for sysctl */
2336#define NUMA_ZONELIST_ORDER_LEN	16
2337char numa_zonelist_order[16] = "default";
2338
2339/*
2340 * interface for configure zonelist ordering.
2341 * command line option "numa_zonelist_order"
2342 *	= "[dD]efault	- default, automatic configuration.
2343 *	= "[nN]ode 	- order by node locality, then by zone within node
2344 *	= "[zZ]one      - order by zone, then by locality within zone
2345 */
2346
2347static int __parse_numa_zonelist_order(char *s)
2348{
2349	if (*s == 'd' || *s == 'D') {
2350		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2351	} else if (*s == 'n' || *s == 'N') {
2352		user_zonelist_order = ZONELIST_ORDER_NODE;
2353	} else if (*s == 'z' || *s == 'Z') {
2354		user_zonelist_order = ZONELIST_ORDER_ZONE;
2355	} else {
2356		printk(KERN_WARNING
2357			"Ignoring invalid numa_zonelist_order value:  "
2358			"%s\n", s);
2359		return -EINVAL;
2360	}
2361	return 0;
2362}
2363
2364static __init int setup_numa_zonelist_order(char *s)
2365{
2366	if (s)
2367		return __parse_numa_zonelist_order(s);
2368	return 0;
2369}
2370early_param("numa_zonelist_order", setup_numa_zonelist_order);
2371
2372/*
2373 * sysctl handler for numa_zonelist_order
2374 */
2375int numa_zonelist_order_handler(ctl_table *table, int write,
2376		void __user *buffer, size_t *length,
2377		loff_t *ppos)
2378{
2379	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2380	int ret;
2381
2382	if (write)
2383		strncpy(saved_string, (char*)table->data,
2384			NUMA_ZONELIST_ORDER_LEN);
2385	ret = proc_dostring(table, write, buffer, length, ppos);
2386	if (ret)
2387		return ret;
2388	if (write) {
2389		int oldval = user_zonelist_order;
2390		if (__parse_numa_zonelist_order((char*)table->data)) {
2391			/*
2392			 * bogus value.  restore saved string
2393			 */
2394			strncpy((char*)table->data, saved_string,
2395				NUMA_ZONELIST_ORDER_LEN);
2396			user_zonelist_order = oldval;
2397		} else if (oldval != user_zonelist_order)
2398			build_all_zonelists();
2399	}
2400	return 0;
2401}
2402
2403
2404#define MAX_NODE_LOAD (nr_online_nodes)
2405static int node_load[MAX_NUMNODES];
2406
2407/**
2408 * find_next_best_node - find the next node that should appear in a given node's fallback list
2409 * @node: node whose fallback list we're appending
2410 * @used_node_mask: nodemask_t of already used nodes
2411 *
2412 * We use a number of factors to determine which is the next node that should
2413 * appear on a given node's fallback list.  The node should not have appeared
2414 * already in @node's fallback list, and it should be the next closest node
2415 * according to the distance array (which contains arbitrary distance values
2416 * from each node to each node in the system), and should also prefer nodes
2417 * with no CPUs, since presumably they'll have very little allocation pressure
2418 * on them otherwise.
2419 * It returns -1 if no node is found.
2420 */
2421static int find_next_best_node(int node, nodemask_t *used_node_mask)
2422{
2423	int n, val;
2424	int min_val = INT_MAX;
2425	int best_node = -1;
2426	const struct cpumask *tmp = cpumask_of_node(0);
2427
2428	/* Use the local node if we haven't already */
2429	if (!node_isset(node, *used_node_mask)) {
2430		node_set(node, *used_node_mask);
2431		return node;
2432	}
2433
2434	for_each_node_state(n, N_HIGH_MEMORY) {
2435
2436		/* Don't want a node to appear more than once */
2437		if (node_isset(n, *used_node_mask))
2438			continue;
2439
2440		/* Use the distance array to find the distance */
2441		val = node_distance(node, n);
2442
2443		/* Penalize nodes under us ("prefer the next node") */
2444		val += (n < node);
2445
2446		/* Give preference to headless and unused nodes */
2447		tmp = cpumask_of_node(n);
2448		if (!cpumask_empty(tmp))
2449			val += PENALTY_FOR_NODE_WITH_CPUS;
2450
2451		/* Slight preference for less loaded node */
2452		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2453		val += node_load[n];
2454
2455		if (val < min_val) {
2456			min_val = val;
2457			best_node = n;
2458		}
2459	}
2460
2461	if (best_node >= 0)
2462		node_set(best_node, *used_node_mask);
2463
2464	return best_node;
2465}
2466
2467
2468/*
2469 * Build zonelists ordered by node and zones within node.
2470 * This results in maximum locality--normal zone overflows into local
2471 * DMA zone, if any--but risks exhausting DMA zone.
2472 */
2473static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2474{
2475	int j;
2476	struct zonelist *zonelist;
2477
2478	zonelist = &pgdat->node_zonelists[0];
2479	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2480		;
2481	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2482							MAX_NR_ZONES - 1);
2483	zonelist->_zonerefs[j].zone = NULL;
2484	zonelist->_zonerefs[j].zone_idx = 0;
2485}
2486
2487/*
2488 * Build gfp_thisnode zonelists
2489 */
2490static void build_thisnode_zonelists(pg_data_t *pgdat)
2491{
2492	int j;
2493	struct zonelist *zonelist;
2494
2495	zonelist = &pgdat->node_zonelists[1];
2496	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2497	zonelist->_zonerefs[j].zone = NULL;
2498	zonelist->_zonerefs[j].zone_idx = 0;
2499}
2500
2501/*
2502 * Build zonelists ordered by zone and nodes within zones.
2503 * This results in conserving DMA zone[s] until all Normal memory is
2504 * exhausted, but results in overflowing to remote node while memory
2505 * may still exist in local DMA zone.
2506 */
2507static int node_order[MAX_NUMNODES];
2508
2509static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2510{
2511	int pos, j, node;
2512	int zone_type;		/* needs to be signed */
2513	struct zone *z;
2514	struct zonelist *zonelist;
2515
2516	zonelist = &pgdat->node_zonelists[0];
2517	pos = 0;
2518	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2519		for (j = 0; j < nr_nodes; j++) {
2520			node = node_order[j];
2521			z = &NODE_DATA(node)->node_zones[zone_type];
2522			if (populated_zone(z)) {
2523				zoneref_set_zone(z,
2524					&zonelist->_zonerefs[pos++]);
2525				check_highest_zone(zone_type);
2526			}
2527		}
2528	}
2529	zonelist->_zonerefs[pos].zone = NULL;
2530	zonelist->_zonerefs[pos].zone_idx = 0;
2531}
2532
2533static int default_zonelist_order(void)
2534{
2535	int nid, zone_type;
2536	unsigned long low_kmem_size,total_size;
2537	struct zone *z;
2538	int average_size;
2539	/*
2540         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2541	 * If they are really small and used heavily, the system can fall
2542	 * into OOM very easily.
2543	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2544	 */
2545	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2546	low_kmem_size = 0;
2547	total_size = 0;
2548	for_each_online_node(nid) {
2549		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2550			z = &NODE_DATA(nid)->node_zones[zone_type];
2551			if (populated_zone(z)) {
2552				if (zone_type < ZONE_NORMAL)
2553					low_kmem_size += z->present_pages;
2554				total_size += z->present_pages;
2555			}
2556		}
2557	}
2558	if (!low_kmem_size ||  /* there are no DMA area. */
2559	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2560		return ZONELIST_ORDER_NODE;
2561	/*
2562	 * look into each node's config.
2563  	 * If there is a node whose DMA/DMA32 memory is very big area on
2564 	 * local memory, NODE_ORDER may be suitable.
2565         */
2566	average_size = total_size /
2567				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2568	for_each_online_node(nid) {
2569		low_kmem_size = 0;
2570		total_size = 0;
2571		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2572			z = &NODE_DATA(nid)->node_zones[zone_type];
2573			if (populated_zone(z)) {
2574				if (zone_type < ZONE_NORMAL)
2575					low_kmem_size += z->present_pages;
2576				total_size += z->present_pages;
2577			}
2578		}
2579		if (low_kmem_size &&
2580		    total_size > average_size && /* ignore small node */
2581		    low_kmem_size > total_size * 70/100)
2582			return ZONELIST_ORDER_NODE;
2583	}
2584	return ZONELIST_ORDER_ZONE;
2585}
2586
2587static void set_zonelist_order(void)
2588{
2589	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2590		current_zonelist_order = default_zonelist_order();
2591	else
2592		current_zonelist_order = user_zonelist_order;
2593}
2594
2595static void build_zonelists(pg_data_t *pgdat)
2596{
2597	int j, node, load;
2598	enum zone_type i;
2599	nodemask_t used_mask;
2600	int local_node, prev_node;
2601	struct zonelist *zonelist;
2602	int order = current_zonelist_order;
2603
2604	/* initialize zonelists */
2605	for (i = 0; i < MAX_ZONELISTS; i++) {
2606		zonelist = pgdat->node_zonelists + i;
2607		zonelist->_zonerefs[0].zone = NULL;
2608		zonelist->_zonerefs[0].zone_idx = 0;
2609	}
2610
2611	/* NUMA-aware ordering of nodes */
2612	local_node = pgdat->node_id;
2613	load = nr_online_nodes;
2614	prev_node = local_node;
2615	nodes_clear(used_mask);
2616
2617	memset(node_order, 0, sizeof(node_order));
2618	j = 0;
2619
2620	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2621		int distance = node_distance(local_node, node);
2622
2623		/*
2624		 * If another node is sufficiently far away then it is better
2625		 * to reclaim pages in a zone before going off node.
2626		 */
2627		if (distance > RECLAIM_DISTANCE)
2628			zone_reclaim_mode = 1;
2629
2630		/*
2631		 * We don't want to pressure a particular node.
2632		 * So adding penalty to the first node in same
2633		 * distance group to make it round-robin.
2634		 */
2635		if (distance != node_distance(local_node, prev_node))
2636			node_load[node] = load;
2637
2638		prev_node = node;
2639		load--;
2640		if (order == ZONELIST_ORDER_NODE)
2641			build_zonelists_in_node_order(pgdat, node);
2642		else
2643			node_order[j++] = node;	/* remember order */
2644	}
2645
2646	if (order == ZONELIST_ORDER_ZONE) {
2647		/* calculate node order -- i.e., DMA last! */
2648		build_zonelists_in_zone_order(pgdat, j);
2649	}
2650
2651	build_thisnode_zonelists(pgdat);
2652}
2653
2654/* Construct the zonelist performance cache - see further mmzone.h */
2655static void build_zonelist_cache(pg_data_t *pgdat)
2656{
2657	struct zonelist *zonelist;
2658	struct zonelist_cache *zlc;
2659	struct zoneref *z;
2660
2661	zonelist = &pgdat->node_zonelists[0];
2662	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2663	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2664	for (z = zonelist->_zonerefs; z->zone; z++)
2665		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2666}
2667
2668
2669#else	/* CONFIG_NUMA */
2670
2671static void set_zonelist_order(void)
2672{
2673	current_zonelist_order = ZONELIST_ORDER_ZONE;
2674}
2675
2676static void build_zonelists(pg_data_t *pgdat)
2677{
2678	int node, local_node;
2679	enum zone_type j;
2680	struct zonelist *zonelist;
2681
2682	local_node = pgdat->node_id;
2683
2684	zonelist = &pgdat->node_zonelists[0];
2685	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2686
2687	/*
2688	 * Now we build the zonelist so that it contains the zones
2689	 * of all the other nodes.
2690	 * We don't want to pressure a particular node, so when
2691	 * building the zones for node N, we make sure that the
2692	 * zones coming right after the local ones are those from
2693	 * node N+1 (modulo N)
2694	 */
2695	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2696		if (!node_online(node))
2697			continue;
2698		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2699							MAX_NR_ZONES - 1);
2700	}
2701	for (node = 0; node < local_node; node++) {
2702		if (!node_online(node))
2703			continue;
2704		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2705							MAX_NR_ZONES - 1);
2706	}
2707
2708	zonelist->_zonerefs[j].zone = NULL;
2709	zonelist->_zonerefs[j].zone_idx = 0;
2710}
2711
2712/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2713static void build_zonelist_cache(pg_data_t *pgdat)
2714{
2715	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2716}
2717
2718#endif	/* CONFIG_NUMA */
2719
2720/* return values int ....just for stop_machine() */
2721static int __build_all_zonelists(void *dummy)
2722{
2723	int nid;
2724
2725#ifdef CONFIG_NUMA
2726	memset(node_load, 0, sizeof(node_load));
2727#endif
2728	for_each_online_node(nid) {
2729		pg_data_t *pgdat = NODE_DATA(nid);
2730
2731		build_zonelists(pgdat);
2732		build_zonelist_cache(pgdat);
2733	}
2734	return 0;
2735}
2736
2737void build_all_zonelists(void)
2738{
2739	set_zonelist_order();
2740
2741	if (system_state == SYSTEM_BOOTING) {
2742		__build_all_zonelists(NULL);
2743		mminit_verify_zonelist();
2744		cpuset_init_current_mems_allowed();
2745	} else {
2746		/* we have to stop all cpus to guarantee there is no user
2747		   of zonelist */
2748		stop_machine(__build_all_zonelists, NULL, NULL);
2749		/* cpuset refresh routine should be here */
2750	}
2751	vm_total_pages = nr_free_pagecache_pages();
2752	/*
2753	 * Disable grouping by mobility if the number of pages in the
2754	 * system is too low to allow the mechanism to work. It would be
2755	 * more accurate, but expensive to check per-zone. This check is
2756	 * made on memory-hotadd so a system can start with mobility
2757	 * disabled and enable it later
2758	 */
2759	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2760		page_group_by_mobility_disabled = 1;
2761	else
2762		page_group_by_mobility_disabled = 0;
2763
2764	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2765		"Total pages: %ld\n",
2766			nr_online_nodes,
2767			zonelist_order_name[current_zonelist_order],
2768			page_group_by_mobility_disabled ? "off" : "on",
2769			vm_total_pages);
2770#ifdef CONFIG_NUMA
2771	printk("Policy zone: %s\n", zone_names[policy_zone]);
2772#endif
2773}
2774
2775/*
2776 * Helper functions to size the waitqueue hash table.
2777 * Essentially these want to choose hash table sizes sufficiently
2778 * large so that collisions trying to wait on pages are rare.
2779 * But in fact, the number of active page waitqueues on typical
2780 * systems is ridiculously low, less than 200. So this is even
2781 * conservative, even though it seems large.
2782 *
2783 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2784 * waitqueues, i.e. the size of the waitq table given the number of pages.
2785 */
2786#define PAGES_PER_WAITQUEUE	256
2787
2788#ifndef CONFIG_MEMORY_HOTPLUG
2789static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2790{
2791	unsigned long size = 1;
2792
2793	pages /= PAGES_PER_WAITQUEUE;
2794
2795	while (size < pages)
2796		size <<= 1;
2797
2798	/*
2799	 * Once we have dozens or even hundreds of threads sleeping
2800	 * on IO we've got bigger problems than wait queue collision.
2801	 * Limit the size of the wait table to a reasonable size.
2802	 */
2803	size = min(size, 4096UL);
2804
2805	return max(size, 4UL);
2806}
2807#else
2808/*
2809 * A zone's size might be changed by hot-add, so it is not possible to determine
2810 * a suitable size for its wait_table.  So we use the maximum size now.
2811 *
2812 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2813 *
2814 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2815 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2816 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2817 *
2818 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2819 * or more by the traditional way. (See above).  It equals:
2820 *
2821 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2822 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2823 *    powerpc (64K page size)             : =  (32G +16M)byte.
2824 */
2825static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2826{
2827	return 4096UL;
2828}
2829#endif
2830
2831/*
2832 * This is an integer logarithm so that shifts can be used later
2833 * to extract the more random high bits from the multiplicative
2834 * hash function before the remainder is taken.
2835 */
2836static inline unsigned long wait_table_bits(unsigned long size)
2837{
2838	return ffz(~size);
2839}
2840
2841#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2842
2843/*
2844 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2845 * of blocks reserved is based on min_wmark_pages(zone). The memory within
2846 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2847 * higher will lead to a bigger reserve which will get freed as contiguous
2848 * blocks as reclaim kicks in
2849 */
2850static void setup_zone_migrate_reserve(struct zone *zone)
2851{
2852	unsigned long start_pfn, pfn, end_pfn;
2853	struct page *page;
2854	unsigned long block_migratetype;
2855	int reserve;
2856
2857	/* Get the start pfn, end pfn and the number of blocks to reserve */
2858	start_pfn = zone->zone_start_pfn;
2859	end_pfn = start_pfn + zone->spanned_pages;
2860	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2861							pageblock_order;
2862
2863	/*
2864	 * Reserve blocks are generally in place to help high-order atomic
2865	 * allocations that are short-lived. A min_free_kbytes value that
2866	 * would result in more than 2 reserve blocks for atomic allocations
2867	 * is assumed to be in place to help anti-fragmentation for the
2868	 * future allocation of hugepages at runtime.
2869	 */
2870	reserve = min(2, reserve);
2871
2872	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2873		if (!pfn_valid(pfn))
2874			continue;
2875		page = pfn_to_page(pfn);
2876
2877		/* Watch out for overlapping nodes */
2878		if (page_to_nid(page) != zone_to_nid(zone))
2879			continue;
2880
2881		/* Blocks with reserved pages will never free, skip them. */
2882		if (PageReserved(page))
2883			continue;
2884
2885		block_migratetype = get_pageblock_migratetype(page);
2886
2887		/* If this block is reserved, account for it */
2888		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2889			reserve--;
2890			continue;
2891		}
2892
2893		/* Suitable for reserving if this block is movable */
2894		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2895			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2896			move_freepages_block(zone, page, MIGRATE_RESERVE);
2897			reserve--;
2898			continue;
2899		}
2900
2901		/*
2902		 * If the reserve is met and this is a previous reserved block,
2903		 * take it back
2904		 */
2905		if (block_migratetype == MIGRATE_RESERVE) {
2906			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2907			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2908		}
2909	}
2910}
2911
2912/*
2913 * Initially all pages are reserved - free ones are freed
2914 * up by free_all_bootmem() once the early boot process is
2915 * done. Non-atomic initialization, single-pass.
2916 */
2917void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2918		unsigned long start_pfn, enum memmap_context context)
2919{
2920	struct page *page;
2921	unsigned long end_pfn = start_pfn + size;
2922	unsigned long pfn;
2923	struct zone *z;
2924
2925	if (highest_memmap_pfn < end_pfn - 1)
2926		highest_memmap_pfn = end_pfn - 1;
2927
2928	z = &NODE_DATA(nid)->node_zones[zone];
2929	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2930		/*
2931		 * There can be holes in boot-time mem_map[]s
2932		 * handed to this function.  They do not
2933		 * exist on hotplugged memory.
2934		 */
2935		if (context == MEMMAP_EARLY) {
2936			if (!early_pfn_valid(pfn))
2937				continue;
2938			if (!early_pfn_in_nid(pfn, nid))
2939				continue;
2940		}
2941		page = pfn_to_page(pfn);
2942		set_page_links(page, zone, nid, pfn);
2943		mminit_verify_page_links(page, zone, nid, pfn);
2944		init_page_count(page);
2945		reset_page_mapcount(page);
2946		SetPageReserved(page);
2947		/*
2948		 * Mark the block movable so that blocks are reserved for
2949		 * movable at startup. This will force kernel allocations
2950		 * to reserve their blocks rather than leaking throughout
2951		 * the address space during boot when many long-lived
2952		 * kernel allocations are made. Later some blocks near
2953		 * the start are marked MIGRATE_RESERVE by
2954		 * setup_zone_migrate_reserve()
2955		 *
2956		 * bitmap is created for zone's valid pfn range. but memmap
2957		 * can be created for invalid pages (for alignment)
2958		 * check here not to call set_pageblock_migratetype() against
2959		 * pfn out of zone.
2960		 */
2961		if ((z->zone_start_pfn <= pfn)
2962		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2963		    && !(pfn & (pageblock_nr_pages - 1)))
2964			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2965
2966		INIT_LIST_HEAD(&page->lru);
2967#ifdef WANT_PAGE_VIRTUAL
2968		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2969		if (!is_highmem_idx(zone))
2970			set_page_address(page, __va(pfn << PAGE_SHIFT));
2971#endif
2972	}
2973}
2974
2975static void __meminit zone_init_free_lists(struct zone *zone)
2976{
2977	int order, t;
2978	for_each_migratetype_order(order, t) {
2979		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2980		zone->free_area[order].nr_free = 0;
2981	}
2982}
2983
2984#ifndef __HAVE_ARCH_MEMMAP_INIT
2985#define memmap_init(size, nid, zone, start_pfn) \
2986	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2987#endif
2988
2989static int zone_batchsize(struct zone *zone)
2990{
2991#ifdef CONFIG_MMU
2992	int batch;
2993
2994	/*
2995	 * The per-cpu-pages pools are set to around 1000th of the
2996	 * size of the zone.  But no more than 1/2 of a meg.
2997	 *
2998	 * OK, so we don't know how big the cache is.  So guess.
2999	 */
3000	batch = zone->present_pages / 1024;
3001	if (batch * PAGE_SIZE > 512 * 1024)
3002		batch = (512 * 1024) / PAGE_SIZE;
3003	batch /= 4;		/* We effectively *= 4 below */
3004	if (batch < 1)
3005		batch = 1;
3006
3007	/*
3008	 * Clamp the batch to a 2^n - 1 value. Having a power
3009	 * of 2 value was found to be more likely to have
3010	 * suboptimal cache aliasing properties in some cases.
3011	 *
3012	 * For example if 2 tasks are alternately allocating
3013	 * batches of pages, one task can end up with a lot
3014	 * of pages of one half of the possible page colors
3015	 * and the other with pages of the other colors.
3016	 */
3017	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3018
3019	return batch;
3020
3021#else
3022	/* The deferral and batching of frees should be suppressed under NOMMU
3023	 * conditions.
3024	 *
3025	 * The problem is that NOMMU needs to be able to allocate large chunks
3026	 * of contiguous memory as there's no hardware page translation to
3027	 * assemble apparent contiguous memory from discontiguous pages.
3028	 *
3029	 * Queueing large contiguous runs of pages for batching, however,
3030	 * causes the pages to actually be freed in smaller chunks.  As there
3031	 * can be a significant delay between the individual batches being
3032	 * recycled, this leads to the once large chunks of space being
3033	 * fragmented and becoming unavailable for high-order allocations.
3034	 */
3035	return 0;
3036#endif
3037}
3038
3039static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3040{
3041	struct per_cpu_pages *pcp;
3042	int migratetype;
3043
3044	memset(p, 0, sizeof(*p));
3045
3046	pcp = &p->pcp;
3047	pcp->count = 0;
3048	pcp->high = 6 * batch;
3049	pcp->batch = max(1UL, 1 * batch);
3050	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3051		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3052}
3053
3054/*
3055 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3056 * to the value high for the pageset p.
3057 */
3058
3059static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3060				unsigned long high)
3061{
3062	struct per_cpu_pages *pcp;
3063
3064	pcp = &p->pcp;
3065	pcp->high = high;
3066	pcp->batch = max(1UL, high/4);
3067	if ((high/4) > (PAGE_SHIFT * 8))
3068		pcp->batch = PAGE_SHIFT * 8;
3069}
3070
3071
3072#ifdef CONFIG_NUMA
3073/*
3074 * Boot pageset table. One per cpu which is going to be used for all
3075 * zones and all nodes. The parameters will be set in such a way
3076 * that an item put on a list will immediately be handed over to
3077 * the buddy list. This is safe since pageset manipulation is done
3078 * with interrupts disabled.
3079 *
3080 * Some NUMA counter updates may also be caught by the boot pagesets.
3081 *
3082 * The boot_pagesets must be kept even after bootup is complete for
3083 * unused processors and/or zones. They do play a role for bootstrapping
3084 * hotplugged processors.
3085 *
3086 * zoneinfo_show() and maybe other functions do
3087 * not check if the processor is online before following the pageset pointer.
3088 * Other parts of the kernel may not check if the zone is available.
3089 */
3090static struct per_cpu_pageset boot_pageset[NR_CPUS];
3091
3092/*
3093 * Dynamically allocate memory for the
3094 * per cpu pageset array in struct zone.
3095 */
3096static int __cpuinit process_zones(int cpu)
3097{
3098	struct zone *zone, *dzone;
3099	int node = cpu_to_node(cpu);
3100
3101	node_set_state(node, N_CPU);	/* this node has a cpu */
3102
3103	for_each_populated_zone(zone) {
3104		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
3105					 GFP_KERNEL, node);
3106		if (!zone_pcp(zone, cpu))
3107			goto bad;
3108
3109		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
3110
3111		if (percpu_pagelist_fraction)
3112			setup_pagelist_highmark(zone_pcp(zone, cpu),
3113			 	(zone->present_pages / percpu_pagelist_fraction));
3114	}
3115
3116	return 0;
3117bad:
3118	for_each_zone(dzone) {
3119		if (!populated_zone(dzone))
3120			continue;
3121		if (dzone == zone)
3122			break;
3123		kfree(zone_pcp(dzone, cpu));
3124		zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3125	}
3126	return -ENOMEM;
3127}
3128
3129static inline void free_zone_pagesets(int cpu)
3130{
3131	struct zone *zone;
3132
3133	for_each_zone(zone) {
3134		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
3135
3136		/* Free per_cpu_pageset if it is slab allocated */
3137		if (pset != &boot_pageset[cpu])
3138			kfree(pset);
3139		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3140	}
3141}
3142
3143static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3144		unsigned long action,
3145		void *hcpu)
3146{
3147	int cpu = (long)hcpu;
3148	int ret = NOTIFY_OK;
3149
3150	switch (action) {
3151	case CPU_UP_PREPARE:
3152	case CPU_UP_PREPARE_FROZEN:
3153		if (process_zones(cpu))
3154			ret = NOTIFY_BAD;
3155		break;
3156	case CPU_UP_CANCELED:
3157	case CPU_UP_CANCELED_FROZEN:
3158	case CPU_DEAD:
3159	case CPU_DEAD_FROZEN:
3160		free_zone_pagesets(cpu);
3161		break;
3162	default:
3163		break;
3164	}
3165	return ret;
3166}
3167
3168static struct notifier_block __cpuinitdata pageset_notifier =
3169	{ &pageset_cpuup_callback, NULL, 0 };
3170
3171void __init setup_per_cpu_pageset(void)
3172{
3173	int err;
3174
3175	/* Initialize per_cpu_pageset for cpu 0.
3176	 * A cpuup callback will do this for every cpu
3177	 * as it comes online
3178	 */
3179	err = process_zones(smp_processor_id());
3180	BUG_ON(err);
3181	register_cpu_notifier(&pageset_notifier);
3182}
3183
3184#endif
3185
3186static noinline __init_refok
3187int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3188{
3189	int i;
3190	struct pglist_data *pgdat = zone->zone_pgdat;
3191	size_t alloc_size;
3192
3193	/*
3194	 * The per-page waitqueue mechanism uses hashed waitqueues
3195	 * per zone.
3196	 */
3197	zone->wait_table_hash_nr_entries =
3198		 wait_table_hash_nr_entries(zone_size_pages);
3199	zone->wait_table_bits =
3200		wait_table_bits(zone->wait_table_hash_nr_entries);
3201	alloc_size = zone->wait_table_hash_nr_entries
3202					* sizeof(wait_queue_head_t);
3203
3204	if (!slab_is_available()) {
3205		zone->wait_table = (wait_queue_head_t *)
3206			alloc_bootmem_node(pgdat, alloc_size);
3207	} else {
3208		/*
3209		 * This case means that a zone whose size was 0 gets new memory
3210		 * via memory hot-add.
3211		 * But it may be the case that a new node was hot-added.  In
3212		 * this case vmalloc() will not be able to use this new node's
3213		 * memory - this wait_table must be initialized to use this new
3214		 * node itself as well.
3215		 * To use this new node's memory, further consideration will be
3216		 * necessary.
3217		 */
3218		zone->wait_table = vmalloc(alloc_size);
3219	}
3220	if (!zone->wait_table)
3221		return -ENOMEM;
3222
3223	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3224		init_waitqueue_head(zone->wait_table + i);
3225
3226	return 0;
3227}
3228
3229static int __zone_pcp_update(void *data)
3230{
3231	struct zone *zone = data;
3232	int cpu;
3233	unsigned long batch = zone_batchsize(zone), flags;
3234
3235	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3236		struct per_cpu_pageset *pset;
3237		struct per_cpu_pages *pcp;
3238
3239		pset = zone_pcp(zone, cpu);
3240		pcp = &pset->pcp;
3241
3242		local_irq_save(flags);
3243		free_pcppages_bulk(zone, pcp->count, pcp);
3244		setup_pageset(pset, batch);
3245		local_irq_restore(flags);
3246	}
3247	return 0;
3248}
3249
3250void zone_pcp_update(struct zone *zone)
3251{
3252	stop_machine(__zone_pcp_update, zone, NULL);
3253}
3254
3255static __meminit void zone_pcp_init(struct zone *zone)
3256{
3257	int cpu;
3258	unsigned long batch = zone_batchsize(zone);
3259
3260	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3261#ifdef CONFIG_NUMA
3262		/* Early boot. Slab allocator not functional yet */
3263		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3264		setup_pageset(&boot_pageset[cpu],0);
3265#else
3266		setup_pageset(zone_pcp(zone,cpu), batch);
3267#endif
3268	}
3269	if (zone->present_pages)
3270		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
3271			zone->name, zone->present_pages, batch);
3272}
3273
3274__meminit int init_currently_empty_zone(struct zone *zone,
3275					unsigned long zone_start_pfn,
3276					unsigned long size,
3277					enum memmap_context context)
3278{
3279	struct pglist_data *pgdat = zone->zone_pgdat;
3280	int ret;
3281	ret = zone_wait_table_init(zone, size);
3282	if (ret)
3283		return ret;
3284	pgdat->nr_zones = zone_idx(zone) + 1;
3285
3286	zone->zone_start_pfn = zone_start_pfn;
3287
3288	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3289			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3290			pgdat->node_id,
3291			(unsigned long)zone_idx(zone),
3292			zone_start_pfn, (zone_start_pfn + size));
3293
3294	zone_init_free_lists(zone);
3295
3296	return 0;
3297}
3298
3299#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3300/*
3301 * Basic iterator support. Return the first range of PFNs for a node
3302 * Note: nid == MAX_NUMNODES returns first region regardless of node
3303 */
3304static int __meminit first_active_region_index_in_nid(int nid)
3305{
3306	int i;
3307
3308	for (i = 0; i < nr_nodemap_entries; i++)
3309		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3310			return i;
3311
3312	return -1;
3313}
3314
3315/*
3316 * Basic iterator support. Return the next active range of PFNs for a node
3317 * Note: nid == MAX_NUMNODES returns next region regardless of node
3318 */
3319static int __meminit next_active_region_index_in_nid(int index, int nid)
3320{
3321	for (index = index + 1; index < nr_nodemap_entries; index++)
3322		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3323			return index;
3324
3325	return -1;
3326}
3327
3328#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3329/*
3330 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3331 * Architectures may implement their own version but if add_active_range()
3332 * was used and there are no special requirements, this is a convenient
3333 * alternative
3334 */
3335int __meminit __early_pfn_to_nid(unsigned long pfn)
3336{
3337	int i;
3338
3339	for (i = 0; i < nr_nodemap_entries; i++) {
3340		unsigned long start_pfn = early_node_map[i].start_pfn;
3341		unsigned long end_pfn = early_node_map[i].end_pfn;
3342
3343		if (start_pfn <= pfn && pfn < end_pfn)
3344			return early_node_map[i].nid;
3345	}
3346	/* This is a memory hole */
3347	return -1;
3348}
3349#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3350
3351int __meminit early_pfn_to_nid(unsigned long pfn)
3352{
3353	int nid;
3354
3355	nid = __early_pfn_to_nid(pfn);
3356	if (nid >= 0)
3357		return nid;
3358	/* just returns 0 */
3359	return 0;
3360}
3361
3362#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3363bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3364{
3365	int nid;
3366
3367	nid = __early_pfn_to_nid(pfn);
3368	if (nid >= 0 && nid != node)
3369		return false;
3370	return true;
3371}
3372#endif
3373
3374/* Basic iterator support to walk early_node_map[] */
3375#define for_each_active_range_index_in_nid(i, nid) \
3376	for (i = first_active_region_index_in_nid(nid); i != -1; \
3377				i = next_active_region_index_in_nid(i, nid))
3378
3379/**
3380 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3381 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3382 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3383 *
3384 * If an architecture guarantees that all ranges registered with
3385 * add_active_ranges() contain no holes and may be freed, this
3386 * this function may be used instead of calling free_bootmem() manually.
3387 */
3388void __init free_bootmem_with_active_regions(int nid,
3389						unsigned long max_low_pfn)
3390{
3391	int i;
3392
3393	for_each_active_range_index_in_nid(i, nid) {
3394		unsigned long size_pages = 0;
3395		unsigned long end_pfn = early_node_map[i].end_pfn;
3396
3397		if (early_node_map[i].start_pfn >= max_low_pfn)
3398			continue;
3399
3400		if (end_pfn > max_low_pfn)
3401			end_pfn = max_low_pfn;
3402
3403		size_pages = end_pfn - early_node_map[i].start_pfn;
3404		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3405				PFN_PHYS(early_node_map[i].start_pfn),
3406				size_pages << PAGE_SHIFT);
3407	}
3408}
3409
3410void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3411{
3412	int i;
3413	int ret;
3414
3415	for_each_active_range_index_in_nid(i, nid) {
3416		ret = work_fn(early_node_map[i].start_pfn,
3417			      early_node_map[i].end_pfn, data);
3418		if (ret)
3419			break;
3420	}
3421}
3422/**
3423 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3424 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3425 *
3426 * If an architecture guarantees that all ranges registered with
3427 * add_active_ranges() contain no holes and may be freed, this
3428 * function may be used instead of calling memory_present() manually.
3429 */
3430void __init sparse_memory_present_with_active_regions(int nid)
3431{
3432	int i;
3433
3434	for_each_active_range_index_in_nid(i, nid)
3435		memory_present(early_node_map[i].nid,
3436				early_node_map[i].start_pfn,
3437				early_node_map[i].end_pfn);
3438}
3439
3440/**
3441 * get_pfn_range_for_nid - Return the start and end page frames for a node
3442 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3443 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3444 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3445 *
3446 * It returns the start and end page frame of a node based on information
3447 * provided by an arch calling add_active_range(). If called for a node
3448 * with no available memory, a warning is printed and the start and end
3449 * PFNs will be 0.
3450 */
3451void __meminit get_pfn_range_for_nid(unsigned int nid,
3452			unsigned long *start_pfn, unsigned long *end_pfn)
3453{
3454	int i;
3455	*start_pfn = -1UL;
3456	*end_pfn = 0;
3457
3458	for_each_active_range_index_in_nid(i, nid) {
3459		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3460		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3461	}
3462
3463	if (*start_pfn == -1UL)
3464		*start_pfn = 0;
3465}
3466
3467/*
3468 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3469 * assumption is made that zones within a node are ordered in monotonic
3470 * increasing memory addresses so that the "highest" populated zone is used
3471 */
3472static void __init find_usable_zone_for_movable(void)
3473{
3474	int zone_index;
3475	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3476		if (zone_index == ZONE_MOVABLE)
3477			continue;
3478
3479		if (arch_zone_highest_possible_pfn[zone_index] >
3480				arch_zone_lowest_possible_pfn[zone_index])
3481			break;
3482	}
3483
3484	VM_BUG_ON(zone_index == -1);
3485	movable_zone = zone_index;
3486}
3487
3488/*
3489 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3490 * because it is sized independant of architecture. Unlike the other zones,
3491 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3492 * in each node depending on the size of each node and how evenly kernelcore
3493 * is distributed. This helper function adjusts the zone ranges
3494 * provided by the architecture for a given node by using the end of the
3495 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3496 * zones within a node are in order of monotonic increases memory addresses
3497 */
3498static void __meminit adjust_zone_range_for_zone_movable(int nid,
3499					unsigned long zone_type,
3500					unsigned long node_start_pfn,
3501					unsigned long node_end_pfn,
3502					unsigned long *zone_start_pfn,
3503					unsigned long *zone_end_pfn)
3504{
3505	/* Only adjust if ZONE_MOVABLE is on this node */
3506	if (zone_movable_pfn[nid]) {
3507		/* Size ZONE_MOVABLE */
3508		if (zone_type == ZONE_MOVABLE) {
3509			*zone_start_pfn = zone_movable_pfn[nid];
3510			*zone_end_pfn = min(node_end_pfn,
3511				arch_zone_highest_possible_pfn[movable_zone]);
3512
3513		/* Adjust for ZONE_MOVABLE starting within this range */
3514		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3515				*zone_end_pfn > zone_movable_pfn[nid]) {
3516			*zone_end_pfn = zone_movable_pfn[nid];
3517
3518		/* Check if this whole range is within ZONE_MOVABLE */
3519		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3520			*zone_start_pfn = *zone_end_pfn;
3521	}
3522}
3523
3524/*
3525 * Return the number of pages a zone spans in a node, including holes
3526 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3527 */
3528static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3529					unsigned long zone_type,
3530					unsigned long *ignored)
3531{
3532	unsigned long node_start_pfn, node_end_pfn;
3533	unsigned long zone_start_pfn, zone_end_pfn;
3534
3535	/* Get the start and end of the node and zone */
3536	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3537	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3538	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3539	adjust_zone_range_for_zone_movable(nid, zone_type,
3540				node_start_pfn, node_end_pfn,
3541				&zone_start_pfn, &zone_end_pfn);
3542
3543	/* Check that this node has pages within the zone's required range */
3544	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3545		return 0;
3546
3547	/* Move the zone boundaries inside the node if necessary */
3548	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3549	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3550
3551	/* Return the spanned pages */
3552	return zone_end_pfn - zone_start_pfn;
3553}
3554
3555/*
3556 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3557 * then all holes in the requested range will be accounted for.
3558 */
3559static unsigned long __meminit __absent_pages_in_range(int nid,
3560				unsigned long range_start_pfn,
3561				unsigned long range_end_pfn)
3562{
3563	int i = 0;
3564	unsigned long prev_end_pfn = 0, hole_pages = 0;
3565	unsigned long start_pfn;
3566
3567	/* Find the end_pfn of the first active range of pfns in the node */
3568	i = first_active_region_index_in_nid(nid);
3569	if (i == -1)
3570		return 0;
3571
3572	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3573
3574	/* Account for ranges before physical memory on this node */
3575	if (early_node_map[i].start_pfn > range_start_pfn)
3576		hole_pages = prev_end_pfn - range_start_pfn;
3577
3578	/* Find all holes for the zone within the node */
3579	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3580
3581		/* No need to continue if prev_end_pfn is outside the zone */
3582		if (prev_end_pfn >= range_end_pfn)
3583			break;
3584
3585		/* Make sure the end of the zone is not within the hole */
3586		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3587		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3588
3589		/* Update the hole size cound and move on */
3590		if (start_pfn > range_start_pfn) {
3591			BUG_ON(prev_end_pfn > start_pfn);
3592			hole_pages += start_pfn - prev_end_pfn;
3593		}
3594		prev_end_pfn = early_node_map[i].end_pfn;
3595	}
3596
3597	/* Account for ranges past physical memory on this node */
3598	if (range_end_pfn > prev_end_pfn)
3599		hole_pages += range_end_pfn -
3600				max(range_start_pfn, prev_end_pfn);
3601
3602	return hole_pages;
3603}
3604
3605/**
3606 * absent_pages_in_range - Return number of page frames in holes within a range
3607 * @start_pfn: The start PFN to start searching for holes
3608 * @end_pfn: The end PFN to stop searching for holes
3609 *
3610 * It returns the number of pages frames in memory holes within a range.
3611 */
3612unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3613							unsigned long end_pfn)
3614{
3615	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3616}
3617
3618/* Return the number of page frames in holes in a zone on a node */
3619static unsigned long __meminit zone_absent_pages_in_node(int nid,
3620					unsigned long zone_type,
3621					unsigned long *ignored)
3622{
3623	unsigned long node_start_pfn, node_end_pfn;
3624	unsigned long zone_start_pfn, zone_end_pfn;
3625
3626	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3627	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3628							node_start_pfn);
3629	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3630							node_end_pfn);
3631
3632	adjust_zone_range_for_zone_movable(nid, zone_type,
3633			node_start_pfn, node_end_pfn,
3634			&zone_start_pfn, &zone_end_pfn);
3635	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3636}
3637
3638#else
3639static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3640					unsigned long zone_type,
3641					unsigned long *zones_size)
3642{
3643	return zones_size[zone_type];
3644}
3645
3646static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3647						unsigned long zone_type,
3648						unsigned long *zholes_size)
3649{
3650	if (!zholes_size)
3651		return 0;
3652
3653	return zholes_size[zone_type];
3654}
3655
3656#endif
3657
3658static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3659		unsigned long *zones_size, unsigned long *zholes_size)
3660{
3661	unsigned long realtotalpages, totalpages = 0;
3662	enum zone_type i;
3663
3664	for (i = 0; i < MAX_NR_ZONES; i++)
3665		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3666								zones_size);
3667	pgdat->node_spanned_pages = totalpages;
3668
3669	realtotalpages = totalpages;
3670	for (i = 0; i < MAX_NR_ZONES; i++)
3671		realtotalpages -=
3672			zone_absent_pages_in_node(pgdat->node_id, i,
3673								zholes_size);
3674	pgdat->node_present_pages = realtotalpages;
3675	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3676							realtotalpages);
3677}
3678
3679#ifndef CONFIG_SPARSEMEM
3680/*
3681 * Calculate the size of the zone->blockflags rounded to an unsigned long
3682 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3683 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3684 * round what is now in bits to nearest long in bits, then return it in
3685 * bytes.
3686 */
3687static unsigned long __init usemap_size(unsigned long zonesize)
3688{
3689	unsigned long usemapsize;
3690
3691	usemapsize = roundup(zonesize, pageblock_nr_pages);
3692	usemapsize = usemapsize >> pageblock_order;
3693	usemapsize *= NR_PAGEBLOCK_BITS;
3694	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3695
3696	return usemapsize / 8;
3697}
3698
3699static void __init setup_usemap(struct pglist_data *pgdat,
3700				struct zone *zone, unsigned long zonesize)
3701{
3702	unsigned long usemapsize = usemap_size(zonesize);
3703	zone->pageblock_flags = NULL;
3704	if (usemapsize)
3705		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3706}
3707#else
3708static void inline setup_usemap(struct pglist_data *pgdat,
3709				struct zone *zone, unsigned long zonesize) {}
3710#endif /* CONFIG_SPARSEMEM */
3711
3712#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3713
3714/* Return a sensible default order for the pageblock size. */
3715static inline int pageblock_default_order(void)
3716{
3717	if (HPAGE_SHIFT > PAGE_SHIFT)
3718		return HUGETLB_PAGE_ORDER;
3719
3720	return MAX_ORDER-1;
3721}
3722
3723/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3724static inline void __init set_pageblock_order(unsigned int order)
3725{
3726	/* Check that pageblock_nr_pages has not already been setup */
3727	if (pageblock_order)
3728		return;
3729
3730	/*
3731	 * Assume the largest contiguous order of interest is a huge page.
3732	 * This value may be variable depending on boot parameters on IA64
3733	 */
3734	pageblock_order = order;
3735}
3736#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3737
3738/*
3739 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3740 * and pageblock_default_order() are unused as pageblock_order is set
3741 * at compile-time. See include/linux/pageblock-flags.h for the values of
3742 * pageblock_order based on the kernel config
3743 */
3744static inline int pageblock_default_order(unsigned int order)
3745{
3746	return MAX_ORDER-1;
3747}
3748#define set_pageblock_order(x)	do {} while (0)
3749
3750#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3751
3752/*
3753 * Set up the zone data structures:
3754 *   - mark all pages reserved
3755 *   - mark all memory queues empty
3756 *   - clear the memory bitmaps
3757 */
3758static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3759		unsigned long *zones_size, unsigned long *zholes_size)
3760{
3761	enum zone_type j;
3762	int nid = pgdat->node_id;
3763	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3764	int ret;
3765
3766	pgdat_resize_init(pgdat);
3767	pgdat->nr_zones = 0;
3768	init_waitqueue_head(&pgdat->kswapd_wait);
3769	pgdat->kswapd_max_order = 0;
3770	pgdat_page_cgroup_init(pgdat);
3771
3772	for (j = 0; j < MAX_NR_ZONES; j++) {
3773		struct zone *zone = pgdat->node_zones + j;
3774		unsigned long size, realsize, memmap_pages;
3775		enum lru_list l;
3776
3777		size = zone_spanned_pages_in_node(nid, j, zones_size);
3778		realsize = size - zone_absent_pages_in_node(nid, j,
3779								zholes_size);
3780
3781		/*
3782		 * Adjust realsize so that it accounts for how much memory
3783		 * is used by this zone for memmap. This affects the watermark
3784		 * and per-cpu initialisations
3785		 */
3786		memmap_pages =
3787			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3788		if (realsize >= memmap_pages) {
3789			realsize -= memmap_pages;
3790			if (memmap_pages)
3791				printk(KERN_DEBUG
3792				       "  %s zone: %lu pages used for memmap\n",
3793				       zone_names[j], memmap_pages);
3794		} else
3795			printk(KERN_WARNING
3796				"  %s zone: %lu pages exceeds realsize %lu\n",
3797				zone_names[j], memmap_pages, realsize);
3798
3799		/* Account for reserved pages */
3800		if (j == 0 && realsize > dma_reserve) {
3801			realsize -= dma_reserve;
3802			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3803					zone_names[0], dma_reserve);
3804		}
3805
3806		if (!is_highmem_idx(j))
3807			nr_kernel_pages += realsize;
3808		nr_all_pages += realsize;
3809
3810		zone->spanned_pages = size;
3811		zone->present_pages = realsize;
3812#ifdef CONFIG_NUMA
3813		zone->node = nid;
3814		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3815						/ 100;
3816		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3817#endif
3818		zone->name = zone_names[j];
3819		spin_lock_init(&zone->lock);
3820		spin_lock_init(&zone->lru_lock);
3821		zone_seqlock_init(zone);
3822		zone->zone_pgdat = pgdat;
3823
3824		zone->prev_priority = DEF_PRIORITY;
3825
3826		zone_pcp_init(zone);
3827		for_each_lru(l) {
3828			INIT_LIST_HEAD(&zone->lru[l].list);
3829			zone->reclaim_stat.nr_saved_scan[l] = 0;
3830		}
3831		zone->reclaim_stat.recent_rotated[0] = 0;
3832		zone->reclaim_stat.recent_rotated[1] = 0;
3833		zone->reclaim_stat.recent_scanned[0] = 0;
3834		zone->reclaim_stat.recent_scanned[1] = 0;
3835		zap_zone_vm_stats(zone);
3836		zone->flags = 0;
3837		if (!size)
3838			continue;
3839
3840		set_pageblock_order(pageblock_default_order());
3841		setup_usemap(pgdat, zone, size);
3842		ret = init_currently_empty_zone(zone, zone_start_pfn,
3843						size, MEMMAP_EARLY);
3844		BUG_ON(ret);
3845		memmap_init(size, nid, j, zone_start_pfn);
3846		zone_start_pfn += size;
3847	}
3848}
3849
3850static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3851{
3852	/* Skip empty nodes */
3853	if (!pgdat->node_spanned_pages)
3854		return;
3855
3856#ifdef CONFIG_FLAT_NODE_MEM_MAP
3857	/* ia64 gets its own node_mem_map, before this, without bootmem */
3858	if (!pgdat->node_mem_map) {
3859		unsigned long size, start, end;
3860		struct page *map;
3861
3862		/*
3863		 * The zone's endpoints aren't required to be MAX_ORDER
3864		 * aligned but the node_mem_map endpoints must be in order
3865		 * for the buddy allocator to function correctly.
3866		 */
3867		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3868		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3869		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3870		size =  (end - start) * sizeof(struct page);
3871		map = alloc_remap(pgdat->node_id, size);
3872		if (!map)
3873			map = alloc_bootmem_node(pgdat, size);
3874		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3875	}
3876#ifndef CONFIG_NEED_MULTIPLE_NODES
3877	/*
3878	 * With no DISCONTIG, the global mem_map is just set as node 0's
3879	 */
3880	if (pgdat == NODE_DATA(0)) {
3881		mem_map = NODE_DATA(0)->node_mem_map;
3882#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3883		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3884			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3885#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3886	}
3887#endif
3888#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3889}
3890
3891void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3892		unsigned long node_start_pfn, unsigned long *zholes_size)
3893{
3894	pg_data_t *pgdat = NODE_DATA(nid);
3895
3896	pgdat->node_id = nid;
3897	pgdat->node_start_pfn = node_start_pfn;
3898	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3899
3900	alloc_node_mem_map(pgdat);
3901#ifdef CONFIG_FLAT_NODE_MEM_MAP
3902	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3903		nid, (unsigned long)pgdat,
3904		(unsigned long)pgdat->node_mem_map);
3905#endif
3906
3907	free_area_init_core(pgdat, zones_size, zholes_size);
3908}
3909
3910#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3911
3912#if MAX_NUMNODES > 1
3913/*
3914 * Figure out the number of possible node ids.
3915 */
3916static void __init setup_nr_node_ids(void)
3917{
3918	unsigned int node;
3919	unsigned int highest = 0;
3920
3921	for_each_node_mask(node, node_possible_map)
3922		highest = node;
3923	nr_node_ids = highest + 1;
3924}
3925#else
3926static inline void setup_nr_node_ids(void)
3927{
3928}
3929#endif
3930
3931/**
3932 * add_active_range - Register a range of PFNs backed by physical memory
3933 * @nid: The node ID the range resides on
3934 * @start_pfn: The start PFN of the available physical memory
3935 * @end_pfn: The end PFN of the available physical memory
3936 *
3937 * These ranges are stored in an early_node_map[] and later used by
3938 * free_area_init_nodes() to calculate zone sizes and holes. If the
3939 * range spans a memory hole, it is up to the architecture to ensure
3940 * the memory is not freed by the bootmem allocator. If possible
3941 * the range being registered will be merged with existing ranges.
3942 */
3943void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3944						unsigned long end_pfn)
3945{
3946	int i;
3947
3948	mminit_dprintk(MMINIT_TRACE, "memory_register",
3949			"Entering add_active_range(%d, %#lx, %#lx) "
3950			"%d entries of %d used\n",
3951			nid, start_pfn, end_pfn,
3952			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3953
3954	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3955
3956	/* Merge with existing active regions if possible */
3957	for (i = 0; i < nr_nodemap_entries; i++) {
3958		if (early_node_map[i].nid != nid)
3959			continue;
3960
3961		/* Skip if an existing region covers this new one */
3962		if (start_pfn >= early_node_map[i].start_pfn &&
3963				end_pfn <= early_node_map[i].end_pfn)
3964			return;
3965
3966		/* Merge forward if suitable */
3967		if (start_pfn <= early_node_map[i].end_pfn &&
3968				end_pfn > early_node_map[i].end_pfn) {
3969			early_node_map[i].end_pfn = end_pfn;
3970			return;
3971		}
3972
3973		/* Merge backward if suitable */
3974		if (start_pfn < early_node_map[i].end_pfn &&
3975				end_pfn >= early_node_map[i].start_pfn) {
3976			early_node_map[i].start_pfn = start_pfn;
3977			return;
3978		}
3979	}
3980
3981	/* Check that early_node_map is large enough */
3982	if (i >= MAX_ACTIVE_REGIONS) {
3983		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3984							MAX_ACTIVE_REGIONS);
3985		return;
3986	}
3987
3988	early_node_map[i].nid = nid;
3989	early_node_map[i].start_pfn = start_pfn;
3990	early_node_map[i].end_pfn = end_pfn;
3991	nr_nodemap_entries = i + 1;
3992}
3993
3994/**
3995 * remove_active_range - Shrink an existing registered range of PFNs
3996 * @nid: The node id the range is on that should be shrunk
3997 * @start_pfn: The new PFN of the range
3998 * @end_pfn: The new PFN of the range
3999 *
4000 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4001 * The map is kept near the end physical page range that has already been
4002 * registered. This function allows an arch to shrink an existing registered
4003 * range.
4004 */
4005void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4006				unsigned long end_pfn)
4007{
4008	int i, j;
4009	int removed = 0;
4010
4011	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4012			  nid, start_pfn, end_pfn);
4013
4014	/* Find the old active region end and shrink */
4015	for_each_active_range_index_in_nid(i, nid) {
4016		if (early_node_map[i].start_pfn >= start_pfn &&
4017		    early_node_map[i].end_pfn <= end_pfn) {
4018			/* clear it */
4019			early_node_map[i].start_pfn = 0;
4020			early_node_map[i].end_pfn = 0;
4021			removed = 1;
4022			continue;
4023		}
4024		if (early_node_map[i].start_pfn < start_pfn &&
4025		    early_node_map[i].end_pfn > start_pfn) {
4026			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4027			early_node_map[i].end_pfn = start_pfn;
4028			if (temp_end_pfn > end_pfn)
4029				add_active_range(nid, end_pfn, temp_end_pfn);
4030			continue;
4031		}
4032		if (early_node_map[i].start_pfn >= start_pfn &&
4033		    early_node_map[i].end_pfn > end_pfn &&
4034		    early_node_map[i].start_pfn < end_pfn) {
4035			early_node_map[i].start_pfn = end_pfn;
4036			continue;
4037		}
4038	}
4039
4040	if (!removed)
4041		return;
4042
4043	/* remove the blank ones */
4044	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4045		if (early_node_map[i].nid != nid)
4046			continue;
4047		if (early_node_map[i].end_pfn)
4048			continue;
4049		/* we found it, get rid of it */
4050		for (j = i; j < nr_nodemap_entries - 1; j++)
4051			memcpy(&early_node_map[j], &early_node_map[j+1],
4052				sizeof(early_node_map[j]));
4053		j = nr_nodemap_entries - 1;
4054		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4055		nr_nodemap_entries--;
4056	}
4057}
4058
4059/**
4060 * remove_all_active_ranges - Remove all currently registered regions
4061 *
4062 * During discovery, it may be found that a table like SRAT is invalid
4063 * and an alternative discovery method must be used. This function removes
4064 * all currently registered regions.
4065 */
4066void __init remove_all_active_ranges(void)
4067{
4068	memset(early_node_map, 0, sizeof(early_node_map));
4069	nr_nodemap_entries = 0;
4070}
4071
4072/* Compare two active node_active_regions */
4073static int __init cmp_node_active_region(const void *a, const void *b)
4074{
4075	struct node_active_region *arange = (struct node_active_region *)a;
4076	struct node_active_region *brange = (struct node_active_region *)b;
4077
4078	/* Done this way to avoid overflows */
4079	if (arange->start_pfn > brange->start_pfn)
4080		return 1;
4081	if (arange->start_pfn < brange->start_pfn)
4082		return -1;
4083
4084	return 0;
4085}
4086
4087/* sort the node_map by start_pfn */
4088static void __init sort_node_map(void)
4089{
4090	sort(early_node_map, (size_t)nr_nodemap_entries,
4091			sizeof(struct node_active_region),
4092			cmp_node_active_region, NULL);
4093}
4094
4095/* Find the lowest pfn for a node */
4096static unsigned long __init find_min_pfn_for_node(int nid)
4097{
4098	int i;
4099	unsigned long min_pfn = ULONG_MAX;
4100
4101	/* Assuming a sorted map, the first range found has the starting pfn */
4102	for_each_active_range_index_in_nid(i, nid)
4103		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4104
4105	if (min_pfn == ULONG_MAX) {
4106		printk(KERN_WARNING
4107			"Could not find start_pfn for node %d\n", nid);
4108		return 0;
4109	}
4110
4111	return min_pfn;
4112}
4113
4114/**
4115 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4116 *
4117 * It returns the minimum PFN based on information provided via
4118 * add_active_range().
4119 */
4120unsigned long __init find_min_pfn_with_active_regions(void)
4121{
4122	return find_min_pfn_for_node(MAX_NUMNODES);
4123}
4124
4125/*
4126 * early_calculate_totalpages()
4127 * Sum pages in active regions for movable zone.
4128 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4129 */
4130static unsigned long __init early_calculate_totalpages(void)
4131{
4132	int i;
4133	unsigned long totalpages = 0;
4134
4135	for (i = 0; i < nr_nodemap_entries; i++) {
4136		unsigned long pages = early_node_map[i].end_pfn -
4137						early_node_map[i].start_pfn;
4138		totalpages += pages;
4139		if (pages)
4140			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4141	}
4142  	return totalpages;
4143}
4144
4145/*
4146 * Find the PFN the Movable zone begins in each node. Kernel memory
4147 * is spread evenly between nodes as long as the nodes have enough
4148 * memory. When they don't, some nodes will have more kernelcore than
4149 * others
4150 */
4151static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4152{
4153	int i, nid;
4154	unsigned long usable_startpfn;
4155	unsigned long kernelcore_node, kernelcore_remaining;
4156	/* save the state before borrow the nodemask */
4157	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4158	unsigned long totalpages = early_calculate_totalpages();
4159	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4160
4161	/*
4162	 * If movablecore was specified, calculate what size of
4163	 * kernelcore that corresponds so that memory usable for
4164	 * any allocation type is evenly spread. If both kernelcore
4165	 * and movablecore are specified, then the value of kernelcore
4166	 * will be used for required_kernelcore if it's greater than
4167	 * what movablecore would have allowed.
4168	 */
4169	if (required_movablecore) {
4170		unsigned long corepages;
4171
4172		/*
4173		 * Round-up so that ZONE_MOVABLE is at least as large as what
4174		 * was requested by the user
4175		 */
4176		required_movablecore =
4177			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4178		corepages = totalpages - required_movablecore;
4179
4180		required_kernelcore = max(required_kernelcore, corepages);
4181	}
4182
4183	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4184	if (!required_kernelcore)
4185		goto out;
4186
4187	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4188	find_usable_zone_for_movable();
4189	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4190
4191restart:
4192	/* Spread kernelcore memory as evenly as possible throughout nodes */
4193	kernelcore_node = required_kernelcore / usable_nodes;
4194	for_each_node_state(nid, N_HIGH_MEMORY) {
4195		/*
4196		 * Recalculate kernelcore_node if the division per node
4197		 * now exceeds what is necessary to satisfy the requested
4198		 * amount of memory for the kernel
4199		 */
4200		if (required_kernelcore < kernelcore_node)
4201			kernelcore_node = required_kernelcore / usable_nodes;
4202
4203		/*
4204		 * As the map is walked, we track how much memory is usable
4205		 * by the kernel using kernelcore_remaining. When it is
4206		 * 0, the rest of the node is usable by ZONE_MOVABLE
4207		 */
4208		kernelcore_remaining = kernelcore_node;
4209
4210		/* Go through each range of PFNs within this node */
4211		for_each_active_range_index_in_nid(i, nid) {
4212			unsigned long start_pfn, end_pfn;
4213			unsigned long size_pages;
4214
4215			start_pfn = max(early_node_map[i].start_pfn,
4216						zone_movable_pfn[nid]);
4217			end_pfn = early_node_map[i].end_pfn;
4218			if (start_pfn >= end_pfn)
4219				continue;
4220
4221			/* Account for what is only usable for kernelcore */
4222			if (start_pfn < usable_startpfn) {
4223				unsigned long kernel_pages;
4224				kernel_pages = min(end_pfn, usable_startpfn)
4225								- start_pfn;
4226
4227				kernelcore_remaining -= min(kernel_pages,
4228							kernelcore_remaining);
4229				required_kernelcore -= min(kernel_pages,
4230							required_kernelcore);
4231
4232				/* Continue if range is now fully accounted */
4233				if (end_pfn <= usable_startpfn) {
4234
4235					/*
4236					 * Push zone_movable_pfn to the end so
4237					 * that if we have to rebalance
4238					 * kernelcore across nodes, we will
4239					 * not double account here
4240					 */
4241					zone_movable_pfn[nid] = end_pfn;
4242					continue;
4243				}
4244				start_pfn = usable_startpfn;
4245			}
4246
4247			/*
4248			 * The usable PFN range for ZONE_MOVABLE is from
4249			 * start_pfn->end_pfn. Calculate size_pages as the
4250			 * number of pages used as kernelcore
4251			 */
4252			size_pages = end_pfn - start_pfn;
4253			if (size_pages > kernelcore_remaining)
4254				size_pages = kernelcore_remaining;
4255			zone_movable_pfn[nid] = start_pfn + size_pages;
4256
4257			/*
4258			 * Some kernelcore has been met, update counts and
4259			 * break if the kernelcore for this node has been
4260			 * satisified
4261			 */
4262			required_kernelcore -= min(required_kernelcore,
4263								size_pages);
4264			kernelcore_remaining -= size_pages;
4265			if (!kernelcore_remaining)
4266				break;
4267		}
4268	}
4269
4270	/*
4271	 * If there is still required_kernelcore, we do another pass with one
4272	 * less node in the count. This will push zone_movable_pfn[nid] further
4273	 * along on the nodes that still have memory until kernelcore is
4274	 * satisified
4275	 */
4276	usable_nodes--;
4277	if (usable_nodes && required_kernelcore > usable_nodes)
4278		goto restart;
4279
4280	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4281	for (nid = 0; nid < MAX_NUMNODES; nid++)
4282		zone_movable_pfn[nid] =
4283			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4284
4285out:
4286	/* restore the node_state */
4287	node_states[N_HIGH_MEMORY] = saved_node_state;
4288}
4289
4290/* Any regular memory on that node ? */
4291static void check_for_regular_memory(pg_data_t *pgdat)
4292{
4293#ifdef CONFIG_HIGHMEM
4294	enum zone_type zone_type;
4295
4296	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4297		struct zone *zone = &pgdat->node_zones[zone_type];
4298		if (zone->present_pages)
4299			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4300	}
4301#endif
4302}
4303
4304/**
4305 * free_area_init_nodes - Initialise all pg_data_t and zone data
4306 * @max_zone_pfn: an array of max PFNs for each zone
4307 *
4308 * This will call free_area_init_node() for each active node in the system.
4309 * Using the page ranges provided by add_active_range(), the size of each
4310 * zone in each node and their holes is calculated. If the maximum PFN
4311 * between two adjacent zones match, it is assumed that the zone is empty.
4312 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4313 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4314 * starts where the previous one ended. For example, ZONE_DMA32 starts
4315 * at arch_max_dma_pfn.
4316 */
4317void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4318{
4319	unsigned long nid;
4320	int i;
4321
4322	/* Sort early_node_map as initialisation assumes it is sorted */
4323	sort_node_map();
4324
4325	/* Record where the zone boundaries are */
4326	memset(arch_zone_lowest_possible_pfn, 0,
4327				sizeof(arch_zone_lowest_possible_pfn));
4328	memset(arch_zone_highest_possible_pfn, 0,
4329				sizeof(arch_zone_highest_possible_pfn));
4330	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4331	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4332	for (i = 1; i < MAX_NR_ZONES; i++) {
4333		if (i == ZONE_MOVABLE)
4334			continue;
4335		arch_zone_lowest_possible_pfn[i] =
4336			arch_zone_highest_possible_pfn[i-1];
4337		arch_zone_highest_possible_pfn[i] =
4338			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4339	}
4340	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4341	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4342
4343	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4344	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4345	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4346
4347	/* Print out the zone ranges */
4348	printk("Zone PFN ranges:\n");
4349	for (i = 0; i < MAX_NR_ZONES; i++) {
4350		if (i == ZONE_MOVABLE)
4351			continue;
4352		printk("  %-8s %0#10lx -> %0#10lx\n",
4353				zone_names[i],
4354				arch_zone_lowest_possible_pfn[i],
4355				arch_zone_highest_possible_pfn[i]);
4356	}
4357
4358	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4359	printk("Movable zone start PFN for each node\n");
4360	for (i = 0; i < MAX_NUMNODES; i++) {
4361		if (zone_movable_pfn[i])
4362			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4363	}
4364
4365	/* Print out the early_node_map[] */
4366	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4367	for (i = 0; i < nr_nodemap_entries; i++)
4368		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4369						early_node_map[i].start_pfn,
4370						early_node_map[i].end_pfn);
4371
4372	/* Initialise every node */
4373	mminit_verify_pageflags_layout();
4374	setup_nr_node_ids();
4375	for_each_online_node(nid) {
4376		pg_data_t *pgdat = NODE_DATA(nid);
4377		free_area_init_node(nid, NULL,
4378				find_min_pfn_for_node(nid), NULL);
4379
4380		/* Any memory on that node */
4381		if (pgdat->node_present_pages)
4382			node_set_state(nid, N_HIGH_MEMORY);
4383		check_for_regular_memory(pgdat);
4384	}
4385}
4386
4387static int __init cmdline_parse_core(char *p, unsigned long *core)
4388{
4389	unsigned long long coremem;
4390	if (!p)
4391		return -EINVAL;
4392
4393	coremem = memparse(p, &p);
4394	*core = coremem >> PAGE_SHIFT;
4395
4396	/* Paranoid check that UL is enough for the coremem value */
4397	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4398
4399	return 0;
4400}
4401
4402/*
4403 * kernelcore=size sets the amount of memory for use for allocations that
4404 * cannot be reclaimed or migrated.
4405 */
4406static int __init cmdline_parse_kernelcore(char *p)
4407{
4408	return cmdline_parse_core(p, &required_kernelcore);
4409}
4410
4411/*
4412 * movablecore=size sets the amount of memory for use for allocations that
4413 * can be reclaimed or migrated.
4414 */
4415static int __init cmdline_parse_movablecore(char *p)
4416{
4417	return cmdline_parse_core(p, &required_movablecore);
4418}
4419
4420early_param("kernelcore", cmdline_parse_kernelcore);
4421early_param("movablecore", cmdline_parse_movablecore);
4422
4423#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4424
4425/**
4426 * set_dma_reserve - set the specified number of pages reserved in the first zone
4427 * @new_dma_reserve: The number of pages to mark reserved
4428 *
4429 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4430 * In the DMA zone, a significant percentage may be consumed by kernel image
4431 * and other unfreeable allocations which can skew the watermarks badly. This
4432 * function may optionally be used to account for unfreeable pages in the
4433 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4434 * smaller per-cpu batchsize.
4435 */
4436void __init set_dma_reserve(unsigned long new_dma_reserve)
4437{
4438	dma_reserve = new_dma_reserve;
4439}
4440
4441#ifndef CONFIG_NEED_MULTIPLE_NODES
4442struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4443EXPORT_SYMBOL(contig_page_data);
4444#endif
4445
4446void __init free_area_init(unsigned long *zones_size)
4447{
4448	free_area_init_node(0, zones_size,
4449			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4450}
4451
4452static int page_alloc_cpu_notify(struct notifier_block *self,
4453				 unsigned long action, void *hcpu)
4454{
4455	int cpu = (unsigned long)hcpu;
4456
4457	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4458		drain_pages(cpu);
4459
4460		/*
4461		 * Spill the event counters of the dead processor
4462		 * into the current processors event counters.
4463		 * This artificially elevates the count of the current
4464		 * processor.
4465		 */
4466		vm_events_fold_cpu(cpu);
4467
4468		/*
4469		 * Zero the differential counters of the dead processor
4470		 * so that the vm statistics are consistent.
4471		 *
4472		 * This is only okay since the processor is dead and cannot
4473		 * race with what we are doing.
4474		 */
4475		refresh_cpu_vm_stats(cpu);
4476	}
4477	return NOTIFY_OK;
4478}
4479
4480void __init page_alloc_init(void)
4481{
4482	hotcpu_notifier(page_alloc_cpu_notify, 0);
4483}
4484
4485/*
4486 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4487 *	or min_free_kbytes changes.
4488 */
4489static void calculate_totalreserve_pages(void)
4490{
4491	struct pglist_data *pgdat;
4492	unsigned long reserve_pages = 0;
4493	enum zone_type i, j;
4494
4495	for_each_online_pgdat(pgdat) {
4496		for (i = 0; i < MAX_NR_ZONES; i++) {
4497			struct zone *zone = pgdat->node_zones + i;
4498			unsigned long max = 0;
4499
4500			/* Find valid and maximum lowmem_reserve in the zone */
4501			for (j = i; j < MAX_NR_ZONES; j++) {
4502				if (zone->lowmem_reserve[j] > max)
4503					max = zone->lowmem_reserve[j];
4504			}
4505
4506			/* we treat the high watermark as reserved pages. */
4507			max += high_wmark_pages(zone);
4508
4509			if (max > zone->present_pages)
4510				max = zone->present_pages;
4511			reserve_pages += max;
4512		}
4513	}
4514	totalreserve_pages = reserve_pages;
4515}
4516
4517/*
4518 * setup_per_zone_lowmem_reserve - called whenever
4519 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4520 *	has a correct pages reserved value, so an adequate number of
4521 *	pages are left in the zone after a successful __alloc_pages().
4522 */
4523static void setup_per_zone_lowmem_reserve(void)
4524{
4525	struct pglist_data *pgdat;
4526	enum zone_type j, idx;
4527
4528	for_each_online_pgdat(pgdat) {
4529		for (j = 0; j < MAX_NR_ZONES; j++) {
4530			struct zone *zone = pgdat->node_zones + j;
4531			unsigned long present_pages = zone->present_pages;
4532
4533			zone->lowmem_reserve[j] = 0;
4534
4535			idx = j;
4536			while (idx) {
4537				struct zone *lower_zone;
4538
4539				idx--;
4540
4541				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4542					sysctl_lowmem_reserve_ratio[idx] = 1;
4543
4544				lower_zone = pgdat->node_zones + idx;
4545				lower_zone->lowmem_reserve[j] = present_pages /
4546					sysctl_lowmem_reserve_ratio[idx];
4547				present_pages += lower_zone->present_pages;
4548			}
4549		}
4550	}
4551
4552	/* update totalreserve_pages */
4553	calculate_totalreserve_pages();
4554}
4555
4556/**
4557 * setup_per_zone_wmarks - called when min_free_kbytes changes
4558 * or when memory is hot-{added|removed}
4559 *
4560 * Ensures that the watermark[min,low,high] values for each zone are set
4561 * correctly with respect to min_free_kbytes.
4562 */
4563void setup_per_zone_wmarks(void)
4564{
4565	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4566	unsigned long lowmem_pages = 0;
4567	struct zone *zone;
4568	unsigned long flags;
4569
4570	/* Calculate total number of !ZONE_HIGHMEM pages */
4571	for_each_zone(zone) {
4572		if (!is_highmem(zone))
4573			lowmem_pages += zone->present_pages;
4574	}
4575
4576	for_each_zone(zone) {
4577		u64 tmp;
4578
4579		spin_lock_irqsave(&zone->lock, flags);
4580		tmp = (u64)pages_min * zone->present_pages;
4581		do_div(tmp, lowmem_pages);
4582		if (is_highmem(zone)) {
4583			/*
4584			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4585			 * need highmem pages, so cap pages_min to a small
4586			 * value here.
4587			 *
4588			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4589			 * deltas controls asynch page reclaim, and so should
4590			 * not be capped for highmem.
4591			 */
4592			int min_pages;
4593
4594			min_pages = zone->present_pages / 1024;
4595			if (min_pages < SWAP_CLUSTER_MAX)
4596				min_pages = SWAP_CLUSTER_MAX;
4597			if (min_pages > 128)
4598				min_pages = 128;
4599			zone->watermark[WMARK_MIN] = min_pages;
4600		} else {
4601			/*
4602			 * If it's a lowmem zone, reserve a number of pages
4603			 * proportionate to the zone's size.
4604			 */
4605			zone->watermark[WMARK_MIN] = tmp;
4606		}
4607
4608		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4609		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4610		setup_zone_migrate_reserve(zone);
4611		spin_unlock_irqrestore(&zone->lock, flags);
4612	}
4613
4614	/* update totalreserve_pages */
4615	calculate_totalreserve_pages();
4616}
4617
4618/*
4619 * The inactive anon list should be small enough that the VM never has to
4620 * do too much work, but large enough that each inactive page has a chance
4621 * to be referenced again before it is swapped out.
4622 *
4623 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4624 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4625 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4626 * the anonymous pages are kept on the inactive list.
4627 *
4628 * total     target    max
4629 * memory    ratio     inactive anon
4630 * -------------------------------------
4631 *   10MB       1         5MB
4632 *  100MB       1        50MB
4633 *    1GB       3       250MB
4634 *   10GB      10       0.9GB
4635 *  100GB      31         3GB
4636 *    1TB     101        10GB
4637 *   10TB     320        32GB
4638 */
4639void calculate_zone_inactive_ratio(struct zone *zone)
4640{
4641	unsigned int gb, ratio;
4642
4643	/* Zone size in gigabytes */
4644	gb = zone->present_pages >> (30 - PAGE_SHIFT);
4645	if (gb)
4646		ratio = int_sqrt(10 * gb);
4647	else
4648		ratio = 1;
4649
4650	zone->inactive_ratio = ratio;
4651}
4652
4653static void __init setup_per_zone_inactive_ratio(void)
4654{
4655	struct zone *zone;
4656
4657	for_each_zone(zone)
4658		calculate_zone_inactive_ratio(zone);
4659}
4660
4661/*
4662 * Initialise min_free_kbytes.
4663 *
4664 * For small machines we want it small (128k min).  For large machines
4665 * we want it large (64MB max).  But it is not linear, because network
4666 * bandwidth does not increase linearly with machine size.  We use
4667 *
4668 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4669 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4670 *
4671 * which yields
4672 *
4673 * 16MB:	512k
4674 * 32MB:	724k
4675 * 64MB:	1024k
4676 * 128MB:	1448k
4677 * 256MB:	2048k
4678 * 512MB:	2896k
4679 * 1024MB:	4096k
4680 * 2048MB:	5792k
4681 * 4096MB:	8192k
4682 * 8192MB:	11584k
4683 * 16384MB:	16384k
4684 */
4685static int __init init_per_zone_wmark_min(void)
4686{
4687	unsigned long lowmem_kbytes;
4688
4689	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4690
4691	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4692	if (min_free_kbytes < 128)
4693		min_free_kbytes = 128;
4694	if (min_free_kbytes > 65536)
4695		min_free_kbytes = 65536;
4696	setup_per_zone_wmarks();
4697	setup_per_zone_lowmem_reserve();
4698	setup_per_zone_inactive_ratio();
4699	return 0;
4700}
4701module_init(init_per_zone_wmark_min)
4702
4703/*
4704 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4705 *	that we can call two helper functions whenever min_free_kbytes
4706 *	changes.
4707 */
4708int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4709	void __user *buffer, size_t *length, loff_t *ppos)
4710{
4711	proc_dointvec(table, write, buffer, length, ppos);
4712	if (write)
4713		setup_per_zone_wmarks();
4714	return 0;
4715}
4716
4717#ifdef CONFIG_NUMA
4718int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4719	void __user *buffer, size_t *length, loff_t *ppos)
4720{
4721	struct zone *zone;
4722	int rc;
4723
4724	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4725	if (rc)
4726		return rc;
4727
4728	for_each_zone(zone)
4729		zone->min_unmapped_pages = (zone->present_pages *
4730				sysctl_min_unmapped_ratio) / 100;
4731	return 0;
4732}
4733
4734int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4735	void __user *buffer, size_t *length, loff_t *ppos)
4736{
4737	struct zone *zone;
4738	int rc;
4739
4740	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4741	if (rc)
4742		return rc;
4743
4744	for_each_zone(zone)
4745		zone->min_slab_pages = (zone->present_pages *
4746				sysctl_min_slab_ratio) / 100;
4747	return 0;
4748}
4749#endif
4750
4751/*
4752 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4753 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4754 *	whenever sysctl_lowmem_reserve_ratio changes.
4755 *
4756 * The reserve ratio obviously has absolutely no relation with the
4757 * minimum watermarks. The lowmem reserve ratio can only make sense
4758 * if in function of the boot time zone sizes.
4759 */
4760int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4761	void __user *buffer, size_t *length, loff_t *ppos)
4762{
4763	proc_dointvec_minmax(table, write, buffer, length, ppos);
4764	setup_per_zone_lowmem_reserve();
4765	return 0;
4766}
4767
4768/*
4769 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4770 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4771 * can have before it gets flushed back to buddy allocator.
4772 */
4773
4774int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4775	void __user *buffer, size_t *length, loff_t *ppos)
4776{
4777	struct zone *zone;
4778	unsigned int cpu;
4779	int ret;
4780
4781	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
4782	if (!write || (ret == -EINVAL))
4783		return ret;
4784	for_each_populated_zone(zone) {
4785		for_each_online_cpu(cpu) {
4786			unsigned long  high;
4787			high = zone->present_pages / percpu_pagelist_fraction;
4788			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4789		}
4790	}
4791	return 0;
4792}
4793
4794int hashdist = HASHDIST_DEFAULT;
4795
4796#ifdef CONFIG_NUMA
4797static int __init set_hashdist(char *str)
4798{
4799	if (!str)
4800		return 0;
4801	hashdist = simple_strtoul(str, &str, 0);
4802	return 1;
4803}
4804__setup("hashdist=", set_hashdist);
4805#endif
4806
4807/*
4808 * allocate a large system hash table from bootmem
4809 * - it is assumed that the hash table must contain an exact power-of-2
4810 *   quantity of entries
4811 * - limit is the number of hash buckets, not the total allocation size
4812 */
4813void *__init alloc_large_system_hash(const char *tablename,
4814				     unsigned long bucketsize,
4815				     unsigned long numentries,
4816				     int scale,
4817				     int flags,
4818				     unsigned int *_hash_shift,
4819				     unsigned int *_hash_mask,
4820				     unsigned long limit)
4821{
4822	unsigned long long max = limit;
4823	unsigned long log2qty, size;
4824	void *table = NULL;
4825
4826	/* allow the kernel cmdline to have a say */
4827	if (!numentries) {
4828		/* round applicable memory size up to nearest megabyte */
4829		numentries = nr_kernel_pages;
4830		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4831		numentries >>= 20 - PAGE_SHIFT;
4832		numentries <<= 20 - PAGE_SHIFT;
4833
4834		/* limit to 1 bucket per 2^scale bytes of low memory */
4835		if (scale > PAGE_SHIFT)
4836			numentries >>= (scale - PAGE_SHIFT);
4837		else
4838			numentries <<= (PAGE_SHIFT - scale);
4839
4840		/* Make sure we've got at least a 0-order allocation.. */
4841		if (unlikely(flags & HASH_SMALL)) {
4842			/* Makes no sense without HASH_EARLY */
4843			WARN_ON(!(flags & HASH_EARLY));
4844			if (!(numentries >> *_hash_shift)) {
4845				numentries = 1UL << *_hash_shift;
4846				BUG_ON(!numentries);
4847			}
4848		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4849			numentries = PAGE_SIZE / bucketsize;
4850	}
4851	numentries = roundup_pow_of_two(numentries);
4852
4853	/* limit allocation size to 1/16 total memory by default */
4854	if (max == 0) {
4855		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4856		do_div(max, bucketsize);
4857	}
4858
4859	if (numentries > max)
4860		numentries = max;
4861
4862	log2qty = ilog2(numentries);
4863
4864	do {
4865		size = bucketsize << log2qty;
4866		if (flags & HASH_EARLY)
4867			table = alloc_bootmem_nopanic(size);
4868		else if (hashdist)
4869			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4870		else {
4871			/*
4872			 * If bucketsize is not a power-of-two, we may free
4873			 * some pages at the end of hash table which
4874			 * alloc_pages_exact() automatically does
4875			 */
4876			if (get_order(size) < MAX_ORDER) {
4877				table = alloc_pages_exact(size, GFP_ATOMIC);
4878				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4879			}
4880		}
4881	} while (!table && size > PAGE_SIZE && --log2qty);
4882
4883	if (!table)
4884		panic("Failed to allocate %s hash table\n", tablename);
4885
4886	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4887	       tablename,
4888	       (1U << log2qty),
4889	       ilog2(size) - PAGE_SHIFT,
4890	       size);
4891
4892	if (_hash_shift)
4893		*_hash_shift = log2qty;
4894	if (_hash_mask)
4895		*_hash_mask = (1 << log2qty) - 1;
4896
4897	return table;
4898}
4899
4900/* Return a pointer to the bitmap storing bits affecting a block of pages */
4901static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4902							unsigned long pfn)
4903{
4904#ifdef CONFIG_SPARSEMEM
4905	return __pfn_to_section(pfn)->pageblock_flags;
4906#else
4907	return zone->pageblock_flags;
4908#endif /* CONFIG_SPARSEMEM */
4909}
4910
4911static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4912{
4913#ifdef CONFIG_SPARSEMEM
4914	pfn &= (PAGES_PER_SECTION-1);
4915	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4916#else
4917	pfn = pfn - zone->zone_start_pfn;
4918	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4919#endif /* CONFIG_SPARSEMEM */
4920}
4921
4922/**
4923 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4924 * @page: The page within the block of interest
4925 * @start_bitidx: The first bit of interest to retrieve
4926 * @end_bitidx: The last bit of interest
4927 * returns pageblock_bits flags
4928 */
4929unsigned long get_pageblock_flags_group(struct page *page,
4930					int start_bitidx, int end_bitidx)
4931{
4932	struct zone *zone;
4933	unsigned long *bitmap;
4934	unsigned long pfn, bitidx;
4935	unsigned long flags = 0;
4936	unsigned long value = 1;
4937
4938	zone = page_zone(page);
4939	pfn = page_to_pfn(page);
4940	bitmap = get_pageblock_bitmap(zone, pfn);
4941	bitidx = pfn_to_bitidx(zone, pfn);
4942
4943	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4944		if (test_bit(bitidx + start_bitidx, bitmap))
4945			flags |= value;
4946
4947	return flags;
4948}
4949
4950/**
4951 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4952 * @page: The page within the block of interest
4953 * @start_bitidx: The first bit of interest
4954 * @end_bitidx: The last bit of interest
4955 * @flags: The flags to set
4956 */
4957void set_pageblock_flags_group(struct page *page, unsigned long flags,
4958					int start_bitidx, int end_bitidx)
4959{
4960	struct zone *zone;
4961	unsigned long *bitmap;
4962	unsigned long pfn, bitidx;
4963	unsigned long value = 1;
4964
4965	zone = page_zone(page);
4966	pfn = page_to_pfn(page);
4967	bitmap = get_pageblock_bitmap(zone, pfn);
4968	bitidx = pfn_to_bitidx(zone, pfn);
4969	VM_BUG_ON(pfn < zone->zone_start_pfn);
4970	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4971
4972	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4973		if (flags & value)
4974			__set_bit(bitidx + start_bitidx, bitmap);
4975		else
4976			__clear_bit(bitidx + start_bitidx, bitmap);
4977}
4978
4979/*
4980 * This is designed as sub function...plz see page_isolation.c also.
4981 * set/clear page block's type to be ISOLATE.
4982 * page allocater never alloc memory from ISOLATE block.
4983 */
4984
4985int set_migratetype_isolate(struct page *page)
4986{
4987	struct zone *zone;
4988	unsigned long flags;
4989	int ret = -EBUSY;
4990	int zone_idx;
4991
4992	zone = page_zone(page);
4993	zone_idx = zone_idx(zone);
4994	spin_lock_irqsave(&zone->lock, flags);
4995	/*
4996	 * In future, more migrate types will be able to be isolation target.
4997	 */
4998	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
4999	    zone_idx != ZONE_MOVABLE)
5000		goto out;
5001	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5002	move_freepages_block(zone, page, MIGRATE_ISOLATE);
5003	ret = 0;
5004out:
5005	spin_unlock_irqrestore(&zone->lock, flags);
5006	if (!ret)
5007		drain_all_pages();
5008	return ret;
5009}
5010
5011void unset_migratetype_isolate(struct page *page)
5012{
5013	struct zone *zone;
5014	unsigned long flags;
5015	zone = page_zone(page);
5016	spin_lock_irqsave(&zone->lock, flags);
5017	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5018		goto out;
5019	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5020	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5021out:
5022	spin_unlock_irqrestore(&zone->lock, flags);
5023}
5024
5025#ifdef CONFIG_MEMORY_HOTREMOVE
5026/*
5027 * All pages in the range must be isolated before calling this.
5028 */
5029void
5030__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5031{
5032	struct page *page;
5033	struct zone *zone;
5034	int order, i;
5035	unsigned long pfn;
5036	unsigned long flags;
5037	/* find the first valid pfn */
5038	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5039		if (pfn_valid(pfn))
5040			break;
5041	if (pfn == end_pfn)
5042		return;
5043	zone = page_zone(pfn_to_page(pfn));
5044	spin_lock_irqsave(&zone->lock, flags);
5045	pfn = start_pfn;
5046	while (pfn < end_pfn) {
5047		if (!pfn_valid(pfn)) {
5048			pfn++;
5049			continue;
5050		}
5051		page = pfn_to_page(pfn);
5052		BUG_ON(page_count(page));
5053		BUG_ON(!PageBuddy(page));
5054		order = page_order(page);
5055#ifdef CONFIG_DEBUG_VM
5056		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5057		       pfn, 1 << order, end_pfn);
5058#endif
5059		list_del(&page->lru);
5060		rmv_page_order(page);
5061		zone->free_area[order].nr_free--;
5062		__mod_zone_page_state(zone, NR_FREE_PAGES,
5063				      - (1UL << order));
5064		for (i = 0; i < (1 << order); i++)
5065			SetPageReserved((page+i));
5066		pfn += (1 << order);
5067	}
5068	spin_unlock_irqrestore(&zone->lock, flags);
5069}
5070#endif
5071