page_alloc.c revision 5c87eada68fe5d29a5f67528f81b6e45124f579b
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
31#include <linux/oom.h>
32#include <linux/notifier.h>
33#include <linux/topology.h>
34#include <linux/sysctl.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
37#include <linux/memory_hotplug.h>
38#include <linux/nodemask.h>
39#include <linux/vmalloc.h>
40#include <linux/mempolicy.h>
41#include <linux/stop_machine.h>
42#include <linux/sort.h>
43#include <linux/pfn.h>
44#include <linux/backing-dev.h>
45#include <linux/fault-inject.h>
46#include <linux/page-isolation.h>
47#include <linux/page_cgroup.h>
48#include <linux/debugobjects.h>
49#include <linux/kmemleak.h>
50
51#include <asm/tlbflush.h>
52#include <asm/div64.h>
53#include "internal.h"
54
55/*
56 * Array of node states.
57 */
58nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
59	[N_POSSIBLE] = NODE_MASK_ALL,
60	[N_ONLINE] = { { [0] = 1UL } },
61#ifndef CONFIG_NUMA
62	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
63#ifdef CONFIG_HIGHMEM
64	[N_HIGH_MEMORY] = { { [0] = 1UL } },
65#endif
66	[N_CPU] = { { [0] = 1UL } },
67#endif	/* NUMA */
68};
69EXPORT_SYMBOL(node_states);
70
71unsigned long totalram_pages __read_mostly;
72unsigned long totalreserve_pages __read_mostly;
73unsigned long highest_memmap_pfn __read_mostly;
74int percpu_pagelist_fraction;
75
76#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
77int pageblock_order __read_mostly;
78#endif
79
80static void __free_pages_ok(struct page *page, unsigned int order);
81
82/*
83 * results with 256, 32 in the lowmem_reserve sysctl:
84 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
85 *	1G machine -> (16M dma, 784M normal, 224M high)
86 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
87 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
88 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
89 *
90 * TBD: should special case ZONE_DMA32 machines here - in those we normally
91 * don't need any ZONE_NORMAL reservation
92 */
93int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
94#ifdef CONFIG_ZONE_DMA
95	 256,
96#endif
97#ifdef CONFIG_ZONE_DMA32
98	 256,
99#endif
100#ifdef CONFIG_HIGHMEM
101	 32,
102#endif
103	 32,
104};
105
106EXPORT_SYMBOL(totalram_pages);
107
108static char * const zone_names[MAX_NR_ZONES] = {
109#ifdef CONFIG_ZONE_DMA
110	 "DMA",
111#endif
112#ifdef CONFIG_ZONE_DMA32
113	 "DMA32",
114#endif
115	 "Normal",
116#ifdef CONFIG_HIGHMEM
117	 "HighMem",
118#endif
119	 "Movable",
120};
121
122int min_free_kbytes = 1024;
123
124unsigned long __meminitdata nr_kernel_pages;
125unsigned long __meminitdata nr_all_pages;
126static unsigned long __meminitdata dma_reserve;
127
128#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
129  /*
130   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
131   * ranges of memory (RAM) that may be registered with add_active_range().
132   * Ranges passed to add_active_range() will be merged if possible
133   * so the number of times add_active_range() can be called is
134   * related to the number of nodes and the number of holes
135   */
136  #ifdef CONFIG_MAX_ACTIVE_REGIONS
137    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
138    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
139  #else
140    #if MAX_NUMNODES >= 32
141      /* If there can be many nodes, allow up to 50 holes per node */
142      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
143    #else
144      /* By default, allow up to 256 distinct regions */
145      #define MAX_ACTIVE_REGIONS 256
146    #endif
147  #endif
148
149  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
150  static int __meminitdata nr_nodemap_entries;
151  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
152  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
153  static unsigned long __initdata required_kernelcore;
154  static unsigned long __initdata required_movablecore;
155  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
156
157  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
158  int movable_zone;
159  EXPORT_SYMBOL(movable_zone);
160#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
161
162#if MAX_NUMNODES > 1
163int nr_node_ids __read_mostly = MAX_NUMNODES;
164int nr_online_nodes __read_mostly = 1;
165EXPORT_SYMBOL(nr_node_ids);
166EXPORT_SYMBOL(nr_online_nodes);
167#endif
168
169int page_group_by_mobility_disabled __read_mostly;
170
171static void set_pageblock_migratetype(struct page *page, int migratetype)
172{
173
174	if (unlikely(page_group_by_mobility_disabled))
175		migratetype = MIGRATE_UNMOVABLE;
176
177	set_pageblock_flags_group(page, (unsigned long)migratetype,
178					PB_migrate, PB_migrate_end);
179}
180
181#ifdef CONFIG_DEBUG_VM
182static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
183{
184	int ret = 0;
185	unsigned seq;
186	unsigned long pfn = page_to_pfn(page);
187
188	do {
189		seq = zone_span_seqbegin(zone);
190		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
191			ret = 1;
192		else if (pfn < zone->zone_start_pfn)
193			ret = 1;
194	} while (zone_span_seqretry(zone, seq));
195
196	return ret;
197}
198
199static int page_is_consistent(struct zone *zone, struct page *page)
200{
201	if (!pfn_valid_within(page_to_pfn(page)))
202		return 0;
203	if (zone != page_zone(page))
204		return 0;
205
206	return 1;
207}
208/*
209 * Temporary debugging check for pages not lying within a given zone.
210 */
211static int bad_range(struct zone *zone, struct page *page)
212{
213	if (page_outside_zone_boundaries(zone, page))
214		return 1;
215	if (!page_is_consistent(zone, page))
216		return 1;
217
218	return 0;
219}
220#else
221static inline int bad_range(struct zone *zone, struct page *page)
222{
223	return 0;
224}
225#endif
226
227static void bad_page(struct page *page)
228{
229	static unsigned long resume;
230	static unsigned long nr_shown;
231	static unsigned long nr_unshown;
232
233	/*
234	 * Allow a burst of 60 reports, then keep quiet for that minute;
235	 * or allow a steady drip of one report per second.
236	 */
237	if (nr_shown == 60) {
238		if (time_before(jiffies, resume)) {
239			nr_unshown++;
240			goto out;
241		}
242		if (nr_unshown) {
243			printk(KERN_ALERT
244			      "BUG: Bad page state: %lu messages suppressed\n",
245				nr_unshown);
246			nr_unshown = 0;
247		}
248		nr_shown = 0;
249	}
250	if (nr_shown++ == 0)
251		resume = jiffies + 60 * HZ;
252
253	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
254		current->comm, page_to_pfn(page));
255	printk(KERN_ALERT
256		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
257		page, (void *)page->flags, page_count(page),
258		page_mapcount(page), page->mapping, page->index);
259
260	dump_stack();
261out:
262	/* Leave bad fields for debug, except PageBuddy could make trouble */
263	__ClearPageBuddy(page);
264	add_taint(TAINT_BAD_PAGE);
265}
266
267/*
268 * Higher-order pages are called "compound pages".  They are structured thusly:
269 *
270 * The first PAGE_SIZE page is called the "head page".
271 *
272 * The remaining PAGE_SIZE pages are called "tail pages".
273 *
274 * All pages have PG_compound set.  All pages have their ->private pointing at
275 * the head page (even the head page has this).
276 *
277 * The first tail page's ->lru.next holds the address of the compound page's
278 * put_page() function.  Its ->lru.prev holds the order of allocation.
279 * This usage means that zero-order pages may not be compound.
280 */
281
282static void free_compound_page(struct page *page)
283{
284	__free_pages_ok(page, compound_order(page));
285}
286
287void prep_compound_page(struct page *page, unsigned long order)
288{
289	int i;
290	int nr_pages = 1 << order;
291
292	set_compound_page_dtor(page, free_compound_page);
293	set_compound_order(page, order);
294	__SetPageHead(page);
295	for (i = 1; i < nr_pages; i++) {
296		struct page *p = page + i;
297
298		__SetPageTail(p);
299		p->first_page = page;
300	}
301}
302
303static int destroy_compound_page(struct page *page, unsigned long order)
304{
305	int i;
306	int nr_pages = 1 << order;
307	int bad = 0;
308
309	if (unlikely(compound_order(page) != order) ||
310	    unlikely(!PageHead(page))) {
311		bad_page(page);
312		bad++;
313	}
314
315	__ClearPageHead(page);
316
317	for (i = 1; i < nr_pages; i++) {
318		struct page *p = page + i;
319
320		if (unlikely(!PageTail(p) || (p->first_page != page))) {
321			bad_page(page);
322			bad++;
323		}
324		__ClearPageTail(p);
325	}
326
327	return bad;
328}
329
330static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
331{
332	int i;
333
334	/*
335	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
336	 * and __GFP_HIGHMEM from hard or soft interrupt context.
337	 */
338	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
339	for (i = 0; i < (1 << order); i++)
340		clear_highpage(page + i);
341}
342
343static inline void set_page_order(struct page *page, int order)
344{
345	set_page_private(page, order);
346	__SetPageBuddy(page);
347}
348
349static inline void rmv_page_order(struct page *page)
350{
351	__ClearPageBuddy(page);
352	set_page_private(page, 0);
353}
354
355/*
356 * Locate the struct page for both the matching buddy in our
357 * pair (buddy1) and the combined O(n+1) page they form (page).
358 *
359 * 1) Any buddy B1 will have an order O twin B2 which satisfies
360 * the following equation:
361 *     B2 = B1 ^ (1 << O)
362 * For example, if the starting buddy (buddy2) is #8 its order
363 * 1 buddy is #10:
364 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
365 *
366 * 2) Any buddy B will have an order O+1 parent P which
367 * satisfies the following equation:
368 *     P = B & ~(1 << O)
369 *
370 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
371 */
372static inline struct page *
373__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
374{
375	unsigned long buddy_idx = page_idx ^ (1 << order);
376
377	return page + (buddy_idx - page_idx);
378}
379
380static inline unsigned long
381__find_combined_index(unsigned long page_idx, unsigned int order)
382{
383	return (page_idx & ~(1 << order));
384}
385
386/*
387 * This function checks whether a page is free && is the buddy
388 * we can do coalesce a page and its buddy if
389 * (a) the buddy is not in a hole &&
390 * (b) the buddy is in the buddy system &&
391 * (c) a page and its buddy have the same order &&
392 * (d) a page and its buddy are in the same zone.
393 *
394 * For recording whether a page is in the buddy system, we use PG_buddy.
395 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
396 *
397 * For recording page's order, we use page_private(page).
398 */
399static inline int page_is_buddy(struct page *page, struct page *buddy,
400								int order)
401{
402	if (!pfn_valid_within(page_to_pfn(buddy)))
403		return 0;
404
405	if (page_zone_id(page) != page_zone_id(buddy))
406		return 0;
407
408	if (PageBuddy(buddy) && page_order(buddy) == order) {
409		VM_BUG_ON(page_count(buddy) != 0);
410		return 1;
411	}
412	return 0;
413}
414
415/*
416 * Freeing function for a buddy system allocator.
417 *
418 * The concept of a buddy system is to maintain direct-mapped table
419 * (containing bit values) for memory blocks of various "orders".
420 * The bottom level table contains the map for the smallest allocatable
421 * units of memory (here, pages), and each level above it describes
422 * pairs of units from the levels below, hence, "buddies".
423 * At a high level, all that happens here is marking the table entry
424 * at the bottom level available, and propagating the changes upward
425 * as necessary, plus some accounting needed to play nicely with other
426 * parts of the VM system.
427 * At each level, we keep a list of pages, which are heads of continuous
428 * free pages of length of (1 << order) and marked with PG_buddy. Page's
429 * order is recorded in page_private(page) field.
430 * So when we are allocating or freeing one, we can derive the state of the
431 * other.  That is, if we allocate a small block, and both were
432 * free, the remainder of the region must be split into blocks.
433 * If a block is freed, and its buddy is also free, then this
434 * triggers coalescing into a block of larger size.
435 *
436 * -- wli
437 */
438
439static inline void __free_one_page(struct page *page,
440		struct zone *zone, unsigned int order,
441		int migratetype)
442{
443	unsigned long page_idx;
444
445	if (unlikely(PageCompound(page)))
446		if (unlikely(destroy_compound_page(page, order)))
447			return;
448
449	VM_BUG_ON(migratetype == -1);
450
451	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
452
453	VM_BUG_ON(page_idx & ((1 << order) - 1));
454	VM_BUG_ON(bad_range(zone, page));
455
456	while (order < MAX_ORDER-1) {
457		unsigned long combined_idx;
458		struct page *buddy;
459
460		buddy = __page_find_buddy(page, page_idx, order);
461		if (!page_is_buddy(page, buddy, order))
462			break;
463
464		/* Our buddy is free, merge with it and move up one order. */
465		list_del(&buddy->lru);
466		zone->free_area[order].nr_free--;
467		rmv_page_order(buddy);
468		combined_idx = __find_combined_index(page_idx, order);
469		page = page + (combined_idx - page_idx);
470		page_idx = combined_idx;
471		order++;
472	}
473	set_page_order(page, order);
474	list_add(&page->lru,
475		&zone->free_area[order].free_list[migratetype]);
476	zone->free_area[order].nr_free++;
477}
478
479#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
480/*
481 * free_page_mlock() -- clean up attempts to free and mlocked() page.
482 * Page should not be on lru, so no need to fix that up.
483 * free_pages_check() will verify...
484 */
485static inline void free_page_mlock(struct page *page)
486{
487	__ClearPageMlocked(page);
488	__dec_zone_page_state(page, NR_MLOCK);
489	__count_vm_event(UNEVICTABLE_MLOCKFREED);
490}
491#else
492static void free_page_mlock(struct page *page) { }
493#endif
494
495static inline int free_pages_check(struct page *page)
496{
497	if (unlikely(page_mapcount(page) |
498		(page->mapping != NULL)  |
499		(atomic_read(&page->_count) != 0) |
500		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
501		bad_page(page);
502		return 1;
503	}
504	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
505		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
506	return 0;
507}
508
509/*
510 * Frees a list of pages.
511 * Assumes all pages on list are in same zone, and of same order.
512 * count is the number of pages to free.
513 *
514 * If the zone was previously in an "all pages pinned" state then look to
515 * see if this freeing clears that state.
516 *
517 * And clear the zone's pages_scanned counter, to hold off the "all pages are
518 * pinned" detection logic.
519 */
520static void free_pages_bulk(struct zone *zone, int count,
521					struct list_head *list, int order)
522{
523	spin_lock(&zone->lock);
524	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
525	zone->pages_scanned = 0;
526
527	__mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
528	while (count--) {
529		struct page *page;
530
531		VM_BUG_ON(list_empty(list));
532		page = list_entry(list->prev, struct page, lru);
533		/* have to delete it as __free_one_page list manipulates */
534		list_del(&page->lru);
535		__free_one_page(page, zone, order, page_private(page));
536	}
537	spin_unlock(&zone->lock);
538}
539
540static void free_one_page(struct zone *zone, struct page *page, int order,
541				int migratetype)
542{
543	spin_lock(&zone->lock);
544	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
545	zone->pages_scanned = 0;
546
547	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
548	__free_one_page(page, zone, order, migratetype);
549	spin_unlock(&zone->lock);
550}
551
552static void __free_pages_ok(struct page *page, unsigned int order)
553{
554	unsigned long flags;
555	int i;
556	int bad = 0;
557	int clearMlocked = PageMlocked(page);
558
559	for (i = 0 ; i < (1 << order) ; ++i)
560		bad += free_pages_check(page + i);
561	if (bad)
562		return;
563
564	if (!PageHighMem(page)) {
565		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
566		debug_check_no_obj_freed(page_address(page),
567					   PAGE_SIZE << order);
568	}
569	arch_free_page(page, order);
570	kernel_map_pages(page, 1 << order, 0);
571
572	local_irq_save(flags);
573	if (unlikely(clearMlocked))
574		free_page_mlock(page);
575	__count_vm_events(PGFREE, 1 << order);
576	free_one_page(page_zone(page), page, order,
577					get_pageblock_migratetype(page));
578	local_irq_restore(flags);
579}
580
581/*
582 * permit the bootmem allocator to evade page validation on high-order frees
583 */
584void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
585{
586	if (order == 0) {
587		__ClearPageReserved(page);
588		set_page_count(page, 0);
589		set_page_refcounted(page);
590		__free_page(page);
591	} else {
592		int loop;
593
594		prefetchw(page);
595		for (loop = 0; loop < BITS_PER_LONG; loop++) {
596			struct page *p = &page[loop];
597
598			if (loop + 1 < BITS_PER_LONG)
599				prefetchw(p + 1);
600			__ClearPageReserved(p);
601			set_page_count(p, 0);
602		}
603
604		set_page_refcounted(page);
605		__free_pages(page, order);
606	}
607}
608
609
610/*
611 * The order of subdivision here is critical for the IO subsystem.
612 * Please do not alter this order without good reasons and regression
613 * testing. Specifically, as large blocks of memory are subdivided,
614 * the order in which smaller blocks are delivered depends on the order
615 * they're subdivided in this function. This is the primary factor
616 * influencing the order in which pages are delivered to the IO
617 * subsystem according to empirical testing, and this is also justified
618 * by considering the behavior of a buddy system containing a single
619 * large block of memory acted on by a series of small allocations.
620 * This behavior is a critical factor in sglist merging's success.
621 *
622 * -- wli
623 */
624static inline void expand(struct zone *zone, struct page *page,
625	int low, int high, struct free_area *area,
626	int migratetype)
627{
628	unsigned long size = 1 << high;
629
630	while (high > low) {
631		area--;
632		high--;
633		size >>= 1;
634		VM_BUG_ON(bad_range(zone, &page[size]));
635		list_add(&page[size].lru, &area->free_list[migratetype]);
636		area->nr_free++;
637		set_page_order(&page[size], high);
638	}
639}
640
641/*
642 * This page is about to be returned from the page allocator
643 */
644static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
645{
646	if (unlikely(page_mapcount(page) |
647		(page->mapping != NULL)  |
648		(atomic_read(&page->_count) != 0)  |
649		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
650		bad_page(page);
651		return 1;
652	}
653
654	set_page_private(page, 0);
655	set_page_refcounted(page);
656
657	arch_alloc_page(page, order);
658	kernel_map_pages(page, 1 << order, 1);
659
660	if (gfp_flags & __GFP_ZERO)
661		prep_zero_page(page, order, gfp_flags);
662
663	if (order && (gfp_flags & __GFP_COMP))
664		prep_compound_page(page, order);
665
666	return 0;
667}
668
669/*
670 * Go through the free lists for the given migratetype and remove
671 * the smallest available page from the freelists
672 */
673static inline
674struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
675						int migratetype)
676{
677	unsigned int current_order;
678	struct free_area * area;
679	struct page *page;
680
681	/* Find a page of the appropriate size in the preferred list */
682	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
683		area = &(zone->free_area[current_order]);
684		if (list_empty(&area->free_list[migratetype]))
685			continue;
686
687		page = list_entry(area->free_list[migratetype].next,
688							struct page, lru);
689		list_del(&page->lru);
690		rmv_page_order(page);
691		area->nr_free--;
692		expand(zone, page, order, current_order, area, migratetype);
693		return page;
694	}
695
696	return NULL;
697}
698
699
700/*
701 * This array describes the order lists are fallen back to when
702 * the free lists for the desirable migrate type are depleted
703 */
704static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
705	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
706	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
707	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
708	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
709};
710
711/*
712 * Move the free pages in a range to the free lists of the requested type.
713 * Note that start_page and end_pages are not aligned on a pageblock
714 * boundary. If alignment is required, use move_freepages_block()
715 */
716static int move_freepages(struct zone *zone,
717			  struct page *start_page, struct page *end_page,
718			  int migratetype)
719{
720	struct page *page;
721	unsigned long order;
722	int pages_moved = 0;
723
724#ifndef CONFIG_HOLES_IN_ZONE
725	/*
726	 * page_zone is not safe to call in this context when
727	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
728	 * anyway as we check zone boundaries in move_freepages_block().
729	 * Remove at a later date when no bug reports exist related to
730	 * grouping pages by mobility
731	 */
732	BUG_ON(page_zone(start_page) != page_zone(end_page));
733#endif
734
735	for (page = start_page; page <= end_page;) {
736		/* Make sure we are not inadvertently changing nodes */
737		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
738
739		if (!pfn_valid_within(page_to_pfn(page))) {
740			page++;
741			continue;
742		}
743
744		if (!PageBuddy(page)) {
745			page++;
746			continue;
747		}
748
749		order = page_order(page);
750		list_del(&page->lru);
751		list_add(&page->lru,
752			&zone->free_area[order].free_list[migratetype]);
753		page += 1 << order;
754		pages_moved += 1 << order;
755	}
756
757	return pages_moved;
758}
759
760static int move_freepages_block(struct zone *zone, struct page *page,
761				int migratetype)
762{
763	unsigned long start_pfn, end_pfn;
764	struct page *start_page, *end_page;
765
766	start_pfn = page_to_pfn(page);
767	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
768	start_page = pfn_to_page(start_pfn);
769	end_page = start_page + pageblock_nr_pages - 1;
770	end_pfn = start_pfn + pageblock_nr_pages - 1;
771
772	/* Do not cross zone boundaries */
773	if (start_pfn < zone->zone_start_pfn)
774		start_page = page;
775	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
776		return 0;
777
778	return move_freepages(zone, start_page, end_page, migratetype);
779}
780
781/* Remove an element from the buddy allocator from the fallback list */
782static inline struct page *
783__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
784{
785	struct free_area * area;
786	int current_order;
787	struct page *page;
788	int migratetype, i;
789
790	/* Find the largest possible block of pages in the other list */
791	for (current_order = MAX_ORDER-1; current_order >= order;
792						--current_order) {
793		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
794			migratetype = fallbacks[start_migratetype][i];
795
796			/* MIGRATE_RESERVE handled later if necessary */
797			if (migratetype == MIGRATE_RESERVE)
798				continue;
799
800			area = &(zone->free_area[current_order]);
801			if (list_empty(&area->free_list[migratetype]))
802				continue;
803
804			page = list_entry(area->free_list[migratetype].next,
805					struct page, lru);
806			area->nr_free--;
807
808			/*
809			 * If breaking a large block of pages, move all free
810			 * pages to the preferred allocation list. If falling
811			 * back for a reclaimable kernel allocation, be more
812			 * agressive about taking ownership of free pages
813			 */
814			if (unlikely(current_order >= (pageblock_order >> 1)) ||
815					start_migratetype == MIGRATE_RECLAIMABLE) {
816				unsigned long pages;
817				pages = move_freepages_block(zone, page,
818								start_migratetype);
819
820				/* Claim the whole block if over half of it is free */
821				if (pages >= (1 << (pageblock_order-1)))
822					set_pageblock_migratetype(page,
823								start_migratetype);
824
825				migratetype = start_migratetype;
826			}
827
828			/* Remove the page from the freelists */
829			list_del(&page->lru);
830			rmv_page_order(page);
831
832			if (current_order == pageblock_order)
833				set_pageblock_migratetype(page,
834							start_migratetype);
835
836			expand(zone, page, order, current_order, area, migratetype);
837			return page;
838		}
839	}
840
841	return NULL;
842}
843
844/*
845 * Do the hard work of removing an element from the buddy allocator.
846 * Call me with the zone->lock already held.
847 */
848static struct page *__rmqueue(struct zone *zone, unsigned int order,
849						int migratetype)
850{
851	struct page *page;
852
853retry_reserve:
854	page = __rmqueue_smallest(zone, order, migratetype);
855
856	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
857		page = __rmqueue_fallback(zone, order, migratetype);
858
859		/*
860		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
861		 * is used because __rmqueue_smallest is an inline function
862		 * and we want just one call site
863		 */
864		if (!page) {
865			migratetype = MIGRATE_RESERVE;
866			goto retry_reserve;
867		}
868	}
869
870	return page;
871}
872
873/*
874 * Obtain a specified number of elements from the buddy allocator, all under
875 * a single hold of the lock, for efficiency.  Add them to the supplied list.
876 * Returns the number of new pages which were placed at *list.
877 */
878static int rmqueue_bulk(struct zone *zone, unsigned int order,
879			unsigned long count, struct list_head *list,
880			int migratetype)
881{
882	int i;
883
884	spin_lock(&zone->lock);
885	for (i = 0; i < count; ++i) {
886		struct page *page = __rmqueue(zone, order, migratetype);
887		if (unlikely(page == NULL))
888			break;
889
890		/*
891		 * Split buddy pages returned by expand() are received here
892		 * in physical page order. The page is added to the callers and
893		 * list and the list head then moves forward. From the callers
894		 * perspective, the linked list is ordered by page number in
895		 * some conditions. This is useful for IO devices that can
896		 * merge IO requests if the physical pages are ordered
897		 * properly.
898		 */
899		list_add(&page->lru, list);
900		set_page_private(page, migratetype);
901		list = &page->lru;
902	}
903	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
904	spin_unlock(&zone->lock);
905	return i;
906}
907
908#ifdef CONFIG_NUMA
909/*
910 * Called from the vmstat counter updater to drain pagesets of this
911 * currently executing processor on remote nodes after they have
912 * expired.
913 *
914 * Note that this function must be called with the thread pinned to
915 * a single processor.
916 */
917void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
918{
919	unsigned long flags;
920	int to_drain;
921
922	local_irq_save(flags);
923	if (pcp->count >= pcp->batch)
924		to_drain = pcp->batch;
925	else
926		to_drain = pcp->count;
927	free_pages_bulk(zone, to_drain, &pcp->list, 0);
928	pcp->count -= to_drain;
929	local_irq_restore(flags);
930}
931#endif
932
933/*
934 * Drain pages of the indicated processor.
935 *
936 * The processor must either be the current processor and the
937 * thread pinned to the current processor or a processor that
938 * is not online.
939 */
940static void drain_pages(unsigned int cpu)
941{
942	unsigned long flags;
943	struct zone *zone;
944
945	for_each_populated_zone(zone) {
946		struct per_cpu_pageset *pset;
947		struct per_cpu_pages *pcp;
948
949		pset = zone_pcp(zone, cpu);
950
951		pcp = &pset->pcp;
952		local_irq_save(flags);
953		free_pages_bulk(zone, pcp->count, &pcp->list, 0);
954		pcp->count = 0;
955		local_irq_restore(flags);
956	}
957}
958
959/*
960 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
961 */
962void drain_local_pages(void *arg)
963{
964	drain_pages(smp_processor_id());
965}
966
967/*
968 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
969 */
970void drain_all_pages(void)
971{
972	on_each_cpu(drain_local_pages, NULL, 1);
973}
974
975#ifdef CONFIG_HIBERNATION
976
977void mark_free_pages(struct zone *zone)
978{
979	unsigned long pfn, max_zone_pfn;
980	unsigned long flags;
981	int order, t;
982	struct list_head *curr;
983
984	if (!zone->spanned_pages)
985		return;
986
987	spin_lock_irqsave(&zone->lock, flags);
988
989	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
990	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
991		if (pfn_valid(pfn)) {
992			struct page *page = pfn_to_page(pfn);
993
994			if (!swsusp_page_is_forbidden(page))
995				swsusp_unset_page_free(page);
996		}
997
998	for_each_migratetype_order(order, t) {
999		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1000			unsigned long i;
1001
1002			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1003			for (i = 0; i < (1UL << order); i++)
1004				swsusp_set_page_free(pfn_to_page(pfn + i));
1005		}
1006	}
1007	spin_unlock_irqrestore(&zone->lock, flags);
1008}
1009#endif /* CONFIG_PM */
1010
1011/*
1012 * Free a 0-order page
1013 */
1014static void free_hot_cold_page(struct page *page, int cold)
1015{
1016	struct zone *zone = page_zone(page);
1017	struct per_cpu_pages *pcp;
1018	unsigned long flags;
1019	int clearMlocked = PageMlocked(page);
1020
1021	if (PageAnon(page))
1022		page->mapping = NULL;
1023	if (free_pages_check(page))
1024		return;
1025
1026	if (!PageHighMem(page)) {
1027		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1028		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1029	}
1030	arch_free_page(page, 0);
1031	kernel_map_pages(page, 1, 0);
1032
1033	pcp = &zone_pcp(zone, get_cpu())->pcp;
1034	set_page_private(page, get_pageblock_migratetype(page));
1035	local_irq_save(flags);
1036	if (unlikely(clearMlocked))
1037		free_page_mlock(page);
1038	__count_vm_event(PGFREE);
1039
1040	if (cold)
1041		list_add_tail(&page->lru, &pcp->list);
1042	else
1043		list_add(&page->lru, &pcp->list);
1044	pcp->count++;
1045	if (pcp->count >= pcp->high) {
1046		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1047		pcp->count -= pcp->batch;
1048	}
1049	local_irq_restore(flags);
1050	put_cpu();
1051}
1052
1053void free_hot_page(struct page *page)
1054{
1055	free_hot_cold_page(page, 0);
1056}
1057
1058void free_cold_page(struct page *page)
1059{
1060	free_hot_cold_page(page, 1);
1061}
1062
1063/*
1064 * split_page takes a non-compound higher-order page, and splits it into
1065 * n (1<<order) sub-pages: page[0..n]
1066 * Each sub-page must be freed individually.
1067 *
1068 * Note: this is probably too low level an operation for use in drivers.
1069 * Please consult with lkml before using this in your driver.
1070 */
1071void split_page(struct page *page, unsigned int order)
1072{
1073	int i;
1074
1075	VM_BUG_ON(PageCompound(page));
1076	VM_BUG_ON(!page_count(page));
1077	for (i = 1; i < (1 << order); i++)
1078		set_page_refcounted(page + i);
1079}
1080
1081/*
1082 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1083 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1084 * or two.
1085 */
1086static inline
1087struct page *buffered_rmqueue(struct zone *preferred_zone,
1088			struct zone *zone, int order, gfp_t gfp_flags,
1089			int migratetype)
1090{
1091	unsigned long flags;
1092	struct page *page;
1093	int cold = !!(gfp_flags & __GFP_COLD);
1094	int cpu;
1095
1096again:
1097	cpu  = get_cpu();
1098	if (likely(order == 0)) {
1099		struct per_cpu_pages *pcp;
1100
1101		pcp = &zone_pcp(zone, cpu)->pcp;
1102		local_irq_save(flags);
1103		if (!pcp->count) {
1104			pcp->count = rmqueue_bulk(zone, 0,
1105					pcp->batch, &pcp->list, migratetype);
1106			if (unlikely(!pcp->count))
1107				goto failed;
1108		}
1109
1110		/* Find a page of the appropriate migrate type */
1111		if (cold) {
1112			list_for_each_entry_reverse(page, &pcp->list, lru)
1113				if (page_private(page) == migratetype)
1114					break;
1115		} else {
1116			list_for_each_entry(page, &pcp->list, lru)
1117				if (page_private(page) == migratetype)
1118					break;
1119		}
1120
1121		/* Allocate more to the pcp list if necessary */
1122		if (unlikely(&page->lru == &pcp->list)) {
1123			pcp->count += rmqueue_bulk(zone, 0,
1124					pcp->batch, &pcp->list, migratetype);
1125			page = list_entry(pcp->list.next, struct page, lru);
1126		}
1127
1128		list_del(&page->lru);
1129		pcp->count--;
1130	} else {
1131		spin_lock_irqsave(&zone->lock, flags);
1132		page = __rmqueue(zone, order, migratetype);
1133		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1134		spin_unlock(&zone->lock);
1135		if (!page)
1136			goto failed;
1137	}
1138
1139	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1140	zone_statistics(preferred_zone, zone);
1141	local_irq_restore(flags);
1142	put_cpu();
1143
1144	VM_BUG_ON(bad_range(zone, page));
1145	if (prep_new_page(page, order, gfp_flags))
1146		goto again;
1147	return page;
1148
1149failed:
1150	local_irq_restore(flags);
1151	put_cpu();
1152	return NULL;
1153}
1154
1155/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1156#define ALLOC_WMARK_MIN		WMARK_MIN
1157#define ALLOC_WMARK_LOW		WMARK_LOW
1158#define ALLOC_WMARK_HIGH	WMARK_HIGH
1159#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1160
1161/* Mask to get the watermark bits */
1162#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1163
1164#define ALLOC_HARDER		0x10 /* try to alloc harder */
1165#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1166#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1167
1168#ifdef CONFIG_FAIL_PAGE_ALLOC
1169
1170static struct fail_page_alloc_attr {
1171	struct fault_attr attr;
1172
1173	u32 ignore_gfp_highmem;
1174	u32 ignore_gfp_wait;
1175	u32 min_order;
1176
1177#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1178
1179	struct dentry *ignore_gfp_highmem_file;
1180	struct dentry *ignore_gfp_wait_file;
1181	struct dentry *min_order_file;
1182
1183#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1184
1185} fail_page_alloc = {
1186	.attr = FAULT_ATTR_INITIALIZER,
1187	.ignore_gfp_wait = 1,
1188	.ignore_gfp_highmem = 1,
1189	.min_order = 1,
1190};
1191
1192static int __init setup_fail_page_alloc(char *str)
1193{
1194	return setup_fault_attr(&fail_page_alloc.attr, str);
1195}
1196__setup("fail_page_alloc=", setup_fail_page_alloc);
1197
1198static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1199{
1200	if (order < fail_page_alloc.min_order)
1201		return 0;
1202	if (gfp_mask & __GFP_NOFAIL)
1203		return 0;
1204	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1205		return 0;
1206	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1207		return 0;
1208
1209	return should_fail(&fail_page_alloc.attr, 1 << order);
1210}
1211
1212#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1213
1214static int __init fail_page_alloc_debugfs(void)
1215{
1216	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1217	struct dentry *dir;
1218	int err;
1219
1220	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1221				       "fail_page_alloc");
1222	if (err)
1223		return err;
1224	dir = fail_page_alloc.attr.dentries.dir;
1225
1226	fail_page_alloc.ignore_gfp_wait_file =
1227		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1228				      &fail_page_alloc.ignore_gfp_wait);
1229
1230	fail_page_alloc.ignore_gfp_highmem_file =
1231		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1232				      &fail_page_alloc.ignore_gfp_highmem);
1233	fail_page_alloc.min_order_file =
1234		debugfs_create_u32("min-order", mode, dir,
1235				   &fail_page_alloc.min_order);
1236
1237	if (!fail_page_alloc.ignore_gfp_wait_file ||
1238            !fail_page_alloc.ignore_gfp_highmem_file ||
1239            !fail_page_alloc.min_order_file) {
1240		err = -ENOMEM;
1241		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1242		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1243		debugfs_remove(fail_page_alloc.min_order_file);
1244		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1245	}
1246
1247	return err;
1248}
1249
1250late_initcall(fail_page_alloc_debugfs);
1251
1252#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1253
1254#else /* CONFIG_FAIL_PAGE_ALLOC */
1255
1256static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1257{
1258	return 0;
1259}
1260
1261#endif /* CONFIG_FAIL_PAGE_ALLOC */
1262
1263/*
1264 * Return 1 if free pages are above 'mark'. This takes into account the order
1265 * of the allocation.
1266 */
1267int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1268		      int classzone_idx, int alloc_flags)
1269{
1270	/* free_pages my go negative - that's OK */
1271	long min = mark;
1272	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1273	int o;
1274
1275	if (alloc_flags & ALLOC_HIGH)
1276		min -= min / 2;
1277	if (alloc_flags & ALLOC_HARDER)
1278		min -= min / 4;
1279
1280	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1281		return 0;
1282	for (o = 0; o < order; o++) {
1283		/* At the next order, this order's pages become unavailable */
1284		free_pages -= z->free_area[o].nr_free << o;
1285
1286		/* Require fewer higher order pages to be free */
1287		min >>= 1;
1288
1289		if (free_pages <= min)
1290			return 0;
1291	}
1292	return 1;
1293}
1294
1295#ifdef CONFIG_NUMA
1296/*
1297 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1298 * skip over zones that are not allowed by the cpuset, or that have
1299 * been recently (in last second) found to be nearly full.  See further
1300 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1301 * that have to skip over a lot of full or unallowed zones.
1302 *
1303 * If the zonelist cache is present in the passed in zonelist, then
1304 * returns a pointer to the allowed node mask (either the current
1305 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1306 *
1307 * If the zonelist cache is not available for this zonelist, does
1308 * nothing and returns NULL.
1309 *
1310 * If the fullzones BITMAP in the zonelist cache is stale (more than
1311 * a second since last zap'd) then we zap it out (clear its bits.)
1312 *
1313 * We hold off even calling zlc_setup, until after we've checked the
1314 * first zone in the zonelist, on the theory that most allocations will
1315 * be satisfied from that first zone, so best to examine that zone as
1316 * quickly as we can.
1317 */
1318static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1319{
1320	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1321	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1322
1323	zlc = zonelist->zlcache_ptr;
1324	if (!zlc)
1325		return NULL;
1326
1327	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1328		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1329		zlc->last_full_zap = jiffies;
1330	}
1331
1332	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1333					&cpuset_current_mems_allowed :
1334					&node_states[N_HIGH_MEMORY];
1335	return allowednodes;
1336}
1337
1338/*
1339 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1340 * if it is worth looking at further for free memory:
1341 *  1) Check that the zone isn't thought to be full (doesn't have its
1342 *     bit set in the zonelist_cache fullzones BITMAP).
1343 *  2) Check that the zones node (obtained from the zonelist_cache
1344 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1345 * Return true (non-zero) if zone is worth looking at further, or
1346 * else return false (zero) if it is not.
1347 *
1348 * This check -ignores- the distinction between various watermarks,
1349 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1350 * found to be full for any variation of these watermarks, it will
1351 * be considered full for up to one second by all requests, unless
1352 * we are so low on memory on all allowed nodes that we are forced
1353 * into the second scan of the zonelist.
1354 *
1355 * In the second scan we ignore this zonelist cache and exactly
1356 * apply the watermarks to all zones, even it is slower to do so.
1357 * We are low on memory in the second scan, and should leave no stone
1358 * unturned looking for a free page.
1359 */
1360static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1361						nodemask_t *allowednodes)
1362{
1363	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1364	int i;				/* index of *z in zonelist zones */
1365	int n;				/* node that zone *z is on */
1366
1367	zlc = zonelist->zlcache_ptr;
1368	if (!zlc)
1369		return 1;
1370
1371	i = z - zonelist->_zonerefs;
1372	n = zlc->z_to_n[i];
1373
1374	/* This zone is worth trying if it is allowed but not full */
1375	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1376}
1377
1378/*
1379 * Given 'z' scanning a zonelist, set the corresponding bit in
1380 * zlc->fullzones, so that subsequent attempts to allocate a page
1381 * from that zone don't waste time re-examining it.
1382 */
1383static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1384{
1385	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1386	int i;				/* index of *z in zonelist zones */
1387
1388	zlc = zonelist->zlcache_ptr;
1389	if (!zlc)
1390		return;
1391
1392	i = z - zonelist->_zonerefs;
1393
1394	set_bit(i, zlc->fullzones);
1395}
1396
1397#else	/* CONFIG_NUMA */
1398
1399static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1400{
1401	return NULL;
1402}
1403
1404static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1405				nodemask_t *allowednodes)
1406{
1407	return 1;
1408}
1409
1410static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1411{
1412}
1413#endif	/* CONFIG_NUMA */
1414
1415/*
1416 * get_page_from_freelist goes through the zonelist trying to allocate
1417 * a page.
1418 */
1419static struct page *
1420get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1421		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1422		struct zone *preferred_zone, int migratetype)
1423{
1424	struct zoneref *z;
1425	struct page *page = NULL;
1426	int classzone_idx;
1427	struct zone *zone;
1428	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1429	int zlc_active = 0;		/* set if using zonelist_cache */
1430	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1431
1432	classzone_idx = zone_idx(preferred_zone);
1433zonelist_scan:
1434	/*
1435	 * Scan zonelist, looking for a zone with enough free.
1436	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1437	 */
1438	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1439						high_zoneidx, nodemask) {
1440		if (NUMA_BUILD && zlc_active &&
1441			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1442				continue;
1443		if ((alloc_flags & ALLOC_CPUSET) &&
1444			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1445				goto try_next_zone;
1446
1447		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1448		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1449			unsigned long mark;
1450			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1451			if (!zone_watermark_ok(zone, order, mark,
1452				    classzone_idx, alloc_flags)) {
1453				if (!zone_reclaim_mode ||
1454				    !zone_reclaim(zone, gfp_mask, order))
1455					goto this_zone_full;
1456			}
1457		}
1458
1459		page = buffered_rmqueue(preferred_zone, zone, order,
1460						gfp_mask, migratetype);
1461		if (page)
1462			break;
1463this_zone_full:
1464		if (NUMA_BUILD)
1465			zlc_mark_zone_full(zonelist, z);
1466try_next_zone:
1467		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1468			/*
1469			 * we do zlc_setup after the first zone is tried but only
1470			 * if there are multiple nodes make it worthwhile
1471			 */
1472			allowednodes = zlc_setup(zonelist, alloc_flags);
1473			zlc_active = 1;
1474			did_zlc_setup = 1;
1475		}
1476	}
1477
1478	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1479		/* Disable zlc cache for second zonelist scan */
1480		zlc_active = 0;
1481		goto zonelist_scan;
1482	}
1483	return page;
1484}
1485
1486static inline int
1487should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1488				unsigned long pages_reclaimed)
1489{
1490	/* Do not loop if specifically requested */
1491	if (gfp_mask & __GFP_NORETRY)
1492		return 0;
1493
1494	/*
1495	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1496	 * means __GFP_NOFAIL, but that may not be true in other
1497	 * implementations.
1498	 */
1499	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1500		return 1;
1501
1502	/*
1503	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1504	 * specified, then we retry until we no longer reclaim any pages
1505	 * (above), or we've reclaimed an order of pages at least as
1506	 * large as the allocation's order. In both cases, if the
1507	 * allocation still fails, we stop retrying.
1508	 */
1509	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1510		return 1;
1511
1512	/*
1513	 * Don't let big-order allocations loop unless the caller
1514	 * explicitly requests that.
1515	 */
1516	if (gfp_mask & __GFP_NOFAIL)
1517		return 1;
1518
1519	return 0;
1520}
1521
1522static inline struct page *
1523__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1524	struct zonelist *zonelist, enum zone_type high_zoneidx,
1525	nodemask_t *nodemask, struct zone *preferred_zone,
1526	int migratetype)
1527{
1528	struct page *page;
1529
1530	/* Acquire the OOM killer lock for the zones in zonelist */
1531	if (!try_set_zone_oom(zonelist, gfp_mask)) {
1532		schedule_timeout_uninterruptible(1);
1533		return NULL;
1534	}
1535
1536	/*
1537	 * Go through the zonelist yet one more time, keep very high watermark
1538	 * here, this is only to catch a parallel oom killing, we must fail if
1539	 * we're still under heavy pressure.
1540	 */
1541	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1542		order, zonelist, high_zoneidx,
1543		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1544		preferred_zone, migratetype);
1545	if (page)
1546		goto out;
1547
1548	/* The OOM killer will not help higher order allocs */
1549	if (order > PAGE_ALLOC_COSTLY_ORDER)
1550		goto out;
1551
1552	/* Exhausted what can be done so it's blamo time */
1553	out_of_memory(zonelist, gfp_mask, order);
1554
1555out:
1556	clear_zonelist_oom(zonelist, gfp_mask);
1557	return page;
1558}
1559
1560/* The really slow allocator path where we enter direct reclaim */
1561static inline struct page *
1562__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1563	struct zonelist *zonelist, enum zone_type high_zoneidx,
1564	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1565	int migratetype, unsigned long *did_some_progress)
1566{
1567	struct page *page = NULL;
1568	struct reclaim_state reclaim_state;
1569	struct task_struct *p = current;
1570
1571	cond_resched();
1572
1573	/* We now go into synchronous reclaim */
1574	cpuset_memory_pressure_bump();
1575
1576	/*
1577	 * The task's cpuset might have expanded its set of allowable nodes
1578	 */
1579	p->flags |= PF_MEMALLOC;
1580	lockdep_set_current_reclaim_state(gfp_mask);
1581	reclaim_state.reclaimed_slab = 0;
1582	p->reclaim_state = &reclaim_state;
1583
1584	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1585
1586	p->reclaim_state = NULL;
1587	lockdep_clear_current_reclaim_state();
1588	p->flags &= ~PF_MEMALLOC;
1589
1590	cond_resched();
1591
1592	if (order != 0)
1593		drain_all_pages();
1594
1595	if (likely(*did_some_progress))
1596		page = get_page_from_freelist(gfp_mask, nodemask, order,
1597					zonelist, high_zoneidx,
1598					alloc_flags, preferred_zone,
1599					migratetype);
1600	return page;
1601}
1602
1603/*
1604 * This is called in the allocator slow-path if the allocation request is of
1605 * sufficient urgency to ignore watermarks and take other desperate measures
1606 */
1607static inline struct page *
1608__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1609	struct zonelist *zonelist, enum zone_type high_zoneidx,
1610	nodemask_t *nodemask, struct zone *preferred_zone,
1611	int migratetype)
1612{
1613	struct page *page;
1614
1615	do {
1616		page = get_page_from_freelist(gfp_mask, nodemask, order,
1617			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1618			preferred_zone, migratetype);
1619
1620		if (!page && gfp_mask & __GFP_NOFAIL)
1621			congestion_wait(WRITE, HZ/50);
1622	} while (!page && (gfp_mask & __GFP_NOFAIL));
1623
1624	return page;
1625}
1626
1627static inline
1628void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1629						enum zone_type high_zoneidx)
1630{
1631	struct zoneref *z;
1632	struct zone *zone;
1633
1634	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1635		wakeup_kswapd(zone, order);
1636}
1637
1638static inline int
1639gfp_to_alloc_flags(gfp_t gfp_mask)
1640{
1641	struct task_struct *p = current;
1642	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1643	const gfp_t wait = gfp_mask & __GFP_WAIT;
1644
1645	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1646	BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1647
1648	/*
1649	 * The caller may dip into page reserves a bit more if the caller
1650	 * cannot run direct reclaim, or if the caller has realtime scheduling
1651	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1652	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1653	 */
1654	alloc_flags |= (gfp_mask & __GFP_HIGH);
1655
1656	if (!wait) {
1657		alloc_flags |= ALLOC_HARDER;
1658		/*
1659		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1660		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1661		 */
1662		alloc_flags &= ~ALLOC_CPUSET;
1663	} else if (unlikely(rt_task(p)))
1664		alloc_flags |= ALLOC_HARDER;
1665
1666	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1667		if (!in_interrupt() &&
1668		    ((p->flags & PF_MEMALLOC) ||
1669		     unlikely(test_thread_flag(TIF_MEMDIE))))
1670			alloc_flags |= ALLOC_NO_WATERMARKS;
1671	}
1672
1673	return alloc_flags;
1674}
1675
1676static inline struct page *
1677__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1678	struct zonelist *zonelist, enum zone_type high_zoneidx,
1679	nodemask_t *nodemask, struct zone *preferred_zone,
1680	int migratetype)
1681{
1682	const gfp_t wait = gfp_mask & __GFP_WAIT;
1683	struct page *page = NULL;
1684	int alloc_flags;
1685	unsigned long pages_reclaimed = 0;
1686	unsigned long did_some_progress;
1687	struct task_struct *p = current;
1688
1689	/*
1690	 * In the slowpath, we sanity check order to avoid ever trying to
1691	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1692	 * be using allocators in order of preference for an area that is
1693	 * too large.
1694	 */
1695	if (WARN_ON_ONCE(order >= MAX_ORDER))
1696		return NULL;
1697
1698	/*
1699	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1700	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1701	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1702	 * using a larger set of nodes after it has established that the
1703	 * allowed per node queues are empty and that nodes are
1704	 * over allocated.
1705	 */
1706	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1707		goto nopage;
1708
1709	wake_all_kswapd(order, zonelist, high_zoneidx);
1710
1711	/*
1712	 * OK, we're below the kswapd watermark and have kicked background
1713	 * reclaim. Now things get more complex, so set up alloc_flags according
1714	 * to how we want to proceed.
1715	 */
1716	alloc_flags = gfp_to_alloc_flags(gfp_mask);
1717
1718restart:
1719	/* This is the last chance, in general, before the goto nopage. */
1720	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1721			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1722			preferred_zone, migratetype);
1723	if (page)
1724		goto got_pg;
1725
1726rebalance:
1727	/* Allocate without watermarks if the context allows */
1728	if (alloc_flags & ALLOC_NO_WATERMARKS) {
1729		page = __alloc_pages_high_priority(gfp_mask, order,
1730				zonelist, high_zoneidx, nodemask,
1731				preferred_zone, migratetype);
1732		if (page)
1733			goto got_pg;
1734	}
1735
1736	/* Atomic allocations - we can't balance anything */
1737	if (!wait)
1738		goto nopage;
1739
1740	/* Avoid recursion of direct reclaim */
1741	if (p->flags & PF_MEMALLOC)
1742		goto nopage;
1743
1744	/* Try direct reclaim and then allocating */
1745	page = __alloc_pages_direct_reclaim(gfp_mask, order,
1746					zonelist, high_zoneidx,
1747					nodemask,
1748					alloc_flags, preferred_zone,
1749					migratetype, &did_some_progress);
1750	if (page)
1751		goto got_pg;
1752
1753	/*
1754	 * If we failed to make any progress reclaiming, then we are
1755	 * running out of options and have to consider going OOM
1756	 */
1757	if (!did_some_progress) {
1758		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1759			page = __alloc_pages_may_oom(gfp_mask, order,
1760					zonelist, high_zoneidx,
1761					nodemask, preferred_zone,
1762					migratetype);
1763			if (page)
1764				goto got_pg;
1765
1766			/*
1767			 * The OOM killer does not trigger for high-order allocations
1768			 * but if no progress is being made, there are no other
1769			 * options and retrying is unlikely to help
1770			 */
1771			if (order > PAGE_ALLOC_COSTLY_ORDER)
1772				goto nopage;
1773
1774			goto restart;
1775		}
1776	}
1777
1778	/* Check if we should retry the allocation */
1779	pages_reclaimed += did_some_progress;
1780	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1781		/* Wait for some write requests to complete then retry */
1782		congestion_wait(WRITE, HZ/50);
1783		goto rebalance;
1784	}
1785
1786nopage:
1787	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1788		printk(KERN_WARNING "%s: page allocation failure."
1789			" order:%d, mode:0x%x\n",
1790			p->comm, order, gfp_mask);
1791		dump_stack();
1792		show_mem();
1793	}
1794got_pg:
1795	return page;
1796
1797}
1798
1799/*
1800 * This is the 'heart' of the zoned buddy allocator.
1801 */
1802struct page *
1803__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1804			struct zonelist *zonelist, nodemask_t *nodemask)
1805{
1806	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1807	struct zone *preferred_zone;
1808	struct page *page;
1809	int migratetype = allocflags_to_migratetype(gfp_mask);
1810
1811	lockdep_trace_alloc(gfp_mask);
1812
1813	might_sleep_if(gfp_mask & __GFP_WAIT);
1814
1815	if (should_fail_alloc_page(gfp_mask, order))
1816		return NULL;
1817
1818	/*
1819	 * Check the zones suitable for the gfp_mask contain at least one
1820	 * valid zone. It's possible to have an empty zonelist as a result
1821	 * of GFP_THISNODE and a memoryless node
1822	 */
1823	if (unlikely(!zonelist->_zonerefs->zone))
1824		return NULL;
1825
1826	/* The preferred zone is used for statistics later */
1827	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1828	if (!preferred_zone)
1829		return NULL;
1830
1831	/* First allocation attempt */
1832	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1833			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1834			preferred_zone, migratetype);
1835	if (unlikely(!page))
1836		page = __alloc_pages_slowpath(gfp_mask, order,
1837				zonelist, high_zoneidx, nodemask,
1838				preferred_zone, migratetype);
1839
1840	return page;
1841}
1842EXPORT_SYMBOL(__alloc_pages_nodemask);
1843
1844/*
1845 * Common helper functions.
1846 */
1847unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1848{
1849	struct page * page;
1850	page = alloc_pages(gfp_mask, order);
1851	if (!page)
1852		return 0;
1853	return (unsigned long) page_address(page);
1854}
1855
1856EXPORT_SYMBOL(__get_free_pages);
1857
1858unsigned long get_zeroed_page(gfp_t gfp_mask)
1859{
1860	struct page * page;
1861
1862	/*
1863	 * get_zeroed_page() returns a 32-bit address, which cannot represent
1864	 * a highmem page
1865	 */
1866	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1867
1868	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1869	if (page)
1870		return (unsigned long) page_address(page);
1871	return 0;
1872}
1873
1874EXPORT_SYMBOL(get_zeroed_page);
1875
1876void __pagevec_free(struct pagevec *pvec)
1877{
1878	int i = pagevec_count(pvec);
1879
1880	while (--i >= 0)
1881		free_hot_cold_page(pvec->pages[i], pvec->cold);
1882}
1883
1884void __free_pages(struct page *page, unsigned int order)
1885{
1886	if (put_page_testzero(page)) {
1887		if (order == 0)
1888			free_hot_page(page);
1889		else
1890			__free_pages_ok(page, order);
1891	}
1892}
1893
1894EXPORT_SYMBOL(__free_pages);
1895
1896void free_pages(unsigned long addr, unsigned int order)
1897{
1898	if (addr != 0) {
1899		VM_BUG_ON(!virt_addr_valid((void *)addr));
1900		__free_pages(virt_to_page((void *)addr), order);
1901	}
1902}
1903
1904EXPORT_SYMBOL(free_pages);
1905
1906/**
1907 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
1908 * @size: the number of bytes to allocate
1909 * @gfp_mask: GFP flags for the allocation
1910 *
1911 * This function is similar to alloc_pages(), except that it allocates the
1912 * minimum number of pages to satisfy the request.  alloc_pages() can only
1913 * allocate memory in power-of-two pages.
1914 *
1915 * This function is also limited by MAX_ORDER.
1916 *
1917 * Memory allocated by this function must be released by free_pages_exact().
1918 */
1919void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1920{
1921	unsigned int order = get_order(size);
1922	unsigned long addr;
1923
1924	addr = __get_free_pages(gfp_mask, order);
1925	if (addr) {
1926		unsigned long alloc_end = addr + (PAGE_SIZE << order);
1927		unsigned long used = addr + PAGE_ALIGN(size);
1928
1929		split_page(virt_to_page(addr), order);
1930		while (used < alloc_end) {
1931			free_page(used);
1932			used += PAGE_SIZE;
1933		}
1934	}
1935
1936	return (void *)addr;
1937}
1938EXPORT_SYMBOL(alloc_pages_exact);
1939
1940/**
1941 * free_pages_exact - release memory allocated via alloc_pages_exact()
1942 * @virt: the value returned by alloc_pages_exact.
1943 * @size: size of allocation, same value as passed to alloc_pages_exact().
1944 *
1945 * Release the memory allocated by a previous call to alloc_pages_exact.
1946 */
1947void free_pages_exact(void *virt, size_t size)
1948{
1949	unsigned long addr = (unsigned long)virt;
1950	unsigned long end = addr + PAGE_ALIGN(size);
1951
1952	while (addr < end) {
1953		free_page(addr);
1954		addr += PAGE_SIZE;
1955	}
1956}
1957EXPORT_SYMBOL(free_pages_exact);
1958
1959static unsigned int nr_free_zone_pages(int offset)
1960{
1961	struct zoneref *z;
1962	struct zone *zone;
1963
1964	/* Just pick one node, since fallback list is circular */
1965	unsigned int sum = 0;
1966
1967	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1968
1969	for_each_zone_zonelist(zone, z, zonelist, offset) {
1970		unsigned long size = zone->present_pages;
1971		unsigned long high = high_wmark_pages(zone);
1972		if (size > high)
1973			sum += size - high;
1974	}
1975
1976	return sum;
1977}
1978
1979/*
1980 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1981 */
1982unsigned int nr_free_buffer_pages(void)
1983{
1984	return nr_free_zone_pages(gfp_zone(GFP_USER));
1985}
1986EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1987
1988/*
1989 * Amount of free RAM allocatable within all zones
1990 */
1991unsigned int nr_free_pagecache_pages(void)
1992{
1993	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1994}
1995
1996static inline void show_node(struct zone *zone)
1997{
1998	if (NUMA_BUILD)
1999		printk("Node %d ", zone_to_nid(zone));
2000}
2001
2002void si_meminfo(struct sysinfo *val)
2003{
2004	val->totalram = totalram_pages;
2005	val->sharedram = 0;
2006	val->freeram = global_page_state(NR_FREE_PAGES);
2007	val->bufferram = nr_blockdev_pages();
2008	val->totalhigh = totalhigh_pages;
2009	val->freehigh = nr_free_highpages();
2010	val->mem_unit = PAGE_SIZE;
2011}
2012
2013EXPORT_SYMBOL(si_meminfo);
2014
2015#ifdef CONFIG_NUMA
2016void si_meminfo_node(struct sysinfo *val, int nid)
2017{
2018	pg_data_t *pgdat = NODE_DATA(nid);
2019
2020	val->totalram = pgdat->node_present_pages;
2021	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2022#ifdef CONFIG_HIGHMEM
2023	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2024	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2025			NR_FREE_PAGES);
2026#else
2027	val->totalhigh = 0;
2028	val->freehigh = 0;
2029#endif
2030	val->mem_unit = PAGE_SIZE;
2031}
2032#endif
2033
2034#define K(x) ((x) << (PAGE_SHIFT-10))
2035
2036/*
2037 * Show free area list (used inside shift_scroll-lock stuff)
2038 * We also calculate the percentage fragmentation. We do this by counting the
2039 * memory on each free list with the exception of the first item on the list.
2040 */
2041void show_free_areas(void)
2042{
2043	int cpu;
2044	struct zone *zone;
2045
2046	for_each_populated_zone(zone) {
2047		show_node(zone);
2048		printk("%s per-cpu:\n", zone->name);
2049
2050		for_each_online_cpu(cpu) {
2051			struct per_cpu_pageset *pageset;
2052
2053			pageset = zone_pcp(zone, cpu);
2054
2055			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2056			       cpu, pageset->pcp.high,
2057			       pageset->pcp.batch, pageset->pcp.count);
2058		}
2059	}
2060
2061	printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
2062		" inactive_file:%lu"
2063//TODO:  check/adjust line lengths
2064#ifdef CONFIG_UNEVICTABLE_LRU
2065		" unevictable:%lu"
2066#endif
2067		" dirty:%lu writeback:%lu unstable:%lu\n"
2068		" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
2069		global_page_state(NR_ACTIVE_ANON),
2070		global_page_state(NR_ACTIVE_FILE),
2071		global_page_state(NR_INACTIVE_ANON),
2072		global_page_state(NR_INACTIVE_FILE),
2073#ifdef CONFIG_UNEVICTABLE_LRU
2074		global_page_state(NR_UNEVICTABLE),
2075#endif
2076		global_page_state(NR_FILE_DIRTY),
2077		global_page_state(NR_WRITEBACK),
2078		global_page_state(NR_UNSTABLE_NFS),
2079		global_page_state(NR_FREE_PAGES),
2080		global_page_state(NR_SLAB_RECLAIMABLE) +
2081			global_page_state(NR_SLAB_UNRECLAIMABLE),
2082		global_page_state(NR_FILE_MAPPED),
2083		global_page_state(NR_PAGETABLE),
2084		global_page_state(NR_BOUNCE));
2085
2086	for_each_populated_zone(zone) {
2087		int i;
2088
2089		show_node(zone);
2090		printk("%s"
2091			" free:%lukB"
2092			" min:%lukB"
2093			" low:%lukB"
2094			" high:%lukB"
2095			" active_anon:%lukB"
2096			" inactive_anon:%lukB"
2097			" active_file:%lukB"
2098			" inactive_file:%lukB"
2099#ifdef CONFIG_UNEVICTABLE_LRU
2100			" unevictable:%lukB"
2101#endif
2102			" present:%lukB"
2103			" pages_scanned:%lu"
2104			" all_unreclaimable? %s"
2105			"\n",
2106			zone->name,
2107			K(zone_page_state(zone, NR_FREE_PAGES)),
2108			K(min_wmark_pages(zone)),
2109			K(low_wmark_pages(zone)),
2110			K(high_wmark_pages(zone)),
2111			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2112			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2113			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2114			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2115#ifdef CONFIG_UNEVICTABLE_LRU
2116			K(zone_page_state(zone, NR_UNEVICTABLE)),
2117#endif
2118			K(zone->present_pages),
2119			zone->pages_scanned,
2120			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
2121			);
2122		printk("lowmem_reserve[]:");
2123		for (i = 0; i < MAX_NR_ZONES; i++)
2124			printk(" %lu", zone->lowmem_reserve[i]);
2125		printk("\n");
2126	}
2127
2128	for_each_populated_zone(zone) {
2129 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2130
2131		show_node(zone);
2132		printk("%s: ", zone->name);
2133
2134		spin_lock_irqsave(&zone->lock, flags);
2135		for (order = 0; order < MAX_ORDER; order++) {
2136			nr[order] = zone->free_area[order].nr_free;
2137			total += nr[order] << order;
2138		}
2139		spin_unlock_irqrestore(&zone->lock, flags);
2140		for (order = 0; order < MAX_ORDER; order++)
2141			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2142		printk("= %lukB\n", K(total));
2143	}
2144
2145	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2146
2147	show_swap_cache_info();
2148}
2149
2150static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2151{
2152	zoneref->zone = zone;
2153	zoneref->zone_idx = zone_idx(zone);
2154}
2155
2156/*
2157 * Builds allocation fallback zone lists.
2158 *
2159 * Add all populated zones of a node to the zonelist.
2160 */
2161static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2162				int nr_zones, enum zone_type zone_type)
2163{
2164	struct zone *zone;
2165
2166	BUG_ON(zone_type >= MAX_NR_ZONES);
2167	zone_type++;
2168
2169	do {
2170		zone_type--;
2171		zone = pgdat->node_zones + zone_type;
2172		if (populated_zone(zone)) {
2173			zoneref_set_zone(zone,
2174				&zonelist->_zonerefs[nr_zones++]);
2175			check_highest_zone(zone_type);
2176		}
2177
2178	} while (zone_type);
2179	return nr_zones;
2180}
2181
2182
2183/*
2184 *  zonelist_order:
2185 *  0 = automatic detection of better ordering.
2186 *  1 = order by ([node] distance, -zonetype)
2187 *  2 = order by (-zonetype, [node] distance)
2188 *
2189 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2190 *  the same zonelist. So only NUMA can configure this param.
2191 */
2192#define ZONELIST_ORDER_DEFAULT  0
2193#define ZONELIST_ORDER_NODE     1
2194#define ZONELIST_ORDER_ZONE     2
2195
2196/* zonelist order in the kernel.
2197 * set_zonelist_order() will set this to NODE or ZONE.
2198 */
2199static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2200static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2201
2202
2203#ifdef CONFIG_NUMA
2204/* The value user specified ....changed by config */
2205static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2206/* string for sysctl */
2207#define NUMA_ZONELIST_ORDER_LEN	16
2208char numa_zonelist_order[16] = "default";
2209
2210/*
2211 * interface for configure zonelist ordering.
2212 * command line option "numa_zonelist_order"
2213 *	= "[dD]efault	- default, automatic configuration.
2214 *	= "[nN]ode 	- order by node locality, then by zone within node
2215 *	= "[zZ]one      - order by zone, then by locality within zone
2216 */
2217
2218static int __parse_numa_zonelist_order(char *s)
2219{
2220	if (*s == 'd' || *s == 'D') {
2221		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2222	} else if (*s == 'n' || *s == 'N') {
2223		user_zonelist_order = ZONELIST_ORDER_NODE;
2224	} else if (*s == 'z' || *s == 'Z') {
2225		user_zonelist_order = ZONELIST_ORDER_ZONE;
2226	} else {
2227		printk(KERN_WARNING
2228			"Ignoring invalid numa_zonelist_order value:  "
2229			"%s\n", s);
2230		return -EINVAL;
2231	}
2232	return 0;
2233}
2234
2235static __init int setup_numa_zonelist_order(char *s)
2236{
2237	if (s)
2238		return __parse_numa_zonelist_order(s);
2239	return 0;
2240}
2241early_param("numa_zonelist_order", setup_numa_zonelist_order);
2242
2243/*
2244 * sysctl handler for numa_zonelist_order
2245 */
2246int numa_zonelist_order_handler(ctl_table *table, int write,
2247		struct file *file, void __user *buffer, size_t *length,
2248		loff_t *ppos)
2249{
2250	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2251	int ret;
2252
2253	if (write)
2254		strncpy(saved_string, (char*)table->data,
2255			NUMA_ZONELIST_ORDER_LEN);
2256	ret = proc_dostring(table, write, file, buffer, length, ppos);
2257	if (ret)
2258		return ret;
2259	if (write) {
2260		int oldval = user_zonelist_order;
2261		if (__parse_numa_zonelist_order((char*)table->data)) {
2262			/*
2263			 * bogus value.  restore saved string
2264			 */
2265			strncpy((char*)table->data, saved_string,
2266				NUMA_ZONELIST_ORDER_LEN);
2267			user_zonelist_order = oldval;
2268		} else if (oldval != user_zonelist_order)
2269			build_all_zonelists();
2270	}
2271	return 0;
2272}
2273
2274
2275#define MAX_NODE_LOAD (nr_online_nodes)
2276static int node_load[MAX_NUMNODES];
2277
2278/**
2279 * find_next_best_node - find the next node that should appear in a given node's fallback list
2280 * @node: node whose fallback list we're appending
2281 * @used_node_mask: nodemask_t of already used nodes
2282 *
2283 * We use a number of factors to determine which is the next node that should
2284 * appear on a given node's fallback list.  The node should not have appeared
2285 * already in @node's fallback list, and it should be the next closest node
2286 * according to the distance array (which contains arbitrary distance values
2287 * from each node to each node in the system), and should also prefer nodes
2288 * with no CPUs, since presumably they'll have very little allocation pressure
2289 * on them otherwise.
2290 * It returns -1 if no node is found.
2291 */
2292static int find_next_best_node(int node, nodemask_t *used_node_mask)
2293{
2294	int n, val;
2295	int min_val = INT_MAX;
2296	int best_node = -1;
2297	const struct cpumask *tmp = cpumask_of_node(0);
2298
2299	/* Use the local node if we haven't already */
2300	if (!node_isset(node, *used_node_mask)) {
2301		node_set(node, *used_node_mask);
2302		return node;
2303	}
2304
2305	for_each_node_state(n, N_HIGH_MEMORY) {
2306
2307		/* Don't want a node to appear more than once */
2308		if (node_isset(n, *used_node_mask))
2309			continue;
2310
2311		/* Use the distance array to find the distance */
2312		val = node_distance(node, n);
2313
2314		/* Penalize nodes under us ("prefer the next node") */
2315		val += (n < node);
2316
2317		/* Give preference to headless and unused nodes */
2318		tmp = cpumask_of_node(n);
2319		if (!cpumask_empty(tmp))
2320			val += PENALTY_FOR_NODE_WITH_CPUS;
2321
2322		/* Slight preference for less loaded node */
2323		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2324		val += node_load[n];
2325
2326		if (val < min_val) {
2327			min_val = val;
2328			best_node = n;
2329		}
2330	}
2331
2332	if (best_node >= 0)
2333		node_set(best_node, *used_node_mask);
2334
2335	return best_node;
2336}
2337
2338
2339/*
2340 * Build zonelists ordered by node and zones within node.
2341 * This results in maximum locality--normal zone overflows into local
2342 * DMA zone, if any--but risks exhausting DMA zone.
2343 */
2344static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2345{
2346	int j;
2347	struct zonelist *zonelist;
2348
2349	zonelist = &pgdat->node_zonelists[0];
2350	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2351		;
2352	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2353							MAX_NR_ZONES - 1);
2354	zonelist->_zonerefs[j].zone = NULL;
2355	zonelist->_zonerefs[j].zone_idx = 0;
2356}
2357
2358/*
2359 * Build gfp_thisnode zonelists
2360 */
2361static void build_thisnode_zonelists(pg_data_t *pgdat)
2362{
2363	int j;
2364	struct zonelist *zonelist;
2365
2366	zonelist = &pgdat->node_zonelists[1];
2367	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2368	zonelist->_zonerefs[j].zone = NULL;
2369	zonelist->_zonerefs[j].zone_idx = 0;
2370}
2371
2372/*
2373 * Build zonelists ordered by zone and nodes within zones.
2374 * This results in conserving DMA zone[s] until all Normal memory is
2375 * exhausted, but results in overflowing to remote node while memory
2376 * may still exist in local DMA zone.
2377 */
2378static int node_order[MAX_NUMNODES];
2379
2380static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2381{
2382	int pos, j, node;
2383	int zone_type;		/* needs to be signed */
2384	struct zone *z;
2385	struct zonelist *zonelist;
2386
2387	zonelist = &pgdat->node_zonelists[0];
2388	pos = 0;
2389	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2390		for (j = 0; j < nr_nodes; j++) {
2391			node = node_order[j];
2392			z = &NODE_DATA(node)->node_zones[zone_type];
2393			if (populated_zone(z)) {
2394				zoneref_set_zone(z,
2395					&zonelist->_zonerefs[pos++]);
2396				check_highest_zone(zone_type);
2397			}
2398		}
2399	}
2400	zonelist->_zonerefs[pos].zone = NULL;
2401	zonelist->_zonerefs[pos].zone_idx = 0;
2402}
2403
2404static int default_zonelist_order(void)
2405{
2406	int nid, zone_type;
2407	unsigned long low_kmem_size,total_size;
2408	struct zone *z;
2409	int average_size;
2410	/*
2411         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2412	 * If they are really small and used heavily, the system can fall
2413	 * into OOM very easily.
2414	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2415	 */
2416	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2417	low_kmem_size = 0;
2418	total_size = 0;
2419	for_each_online_node(nid) {
2420		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2421			z = &NODE_DATA(nid)->node_zones[zone_type];
2422			if (populated_zone(z)) {
2423				if (zone_type < ZONE_NORMAL)
2424					low_kmem_size += z->present_pages;
2425				total_size += z->present_pages;
2426			}
2427		}
2428	}
2429	if (!low_kmem_size ||  /* there are no DMA area. */
2430	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2431		return ZONELIST_ORDER_NODE;
2432	/*
2433	 * look into each node's config.
2434  	 * If there is a node whose DMA/DMA32 memory is very big area on
2435 	 * local memory, NODE_ORDER may be suitable.
2436         */
2437	average_size = total_size /
2438				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2439	for_each_online_node(nid) {
2440		low_kmem_size = 0;
2441		total_size = 0;
2442		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2443			z = &NODE_DATA(nid)->node_zones[zone_type];
2444			if (populated_zone(z)) {
2445				if (zone_type < ZONE_NORMAL)
2446					low_kmem_size += z->present_pages;
2447				total_size += z->present_pages;
2448			}
2449		}
2450		if (low_kmem_size &&
2451		    total_size > average_size && /* ignore small node */
2452		    low_kmem_size > total_size * 70/100)
2453			return ZONELIST_ORDER_NODE;
2454	}
2455	return ZONELIST_ORDER_ZONE;
2456}
2457
2458static void set_zonelist_order(void)
2459{
2460	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2461		current_zonelist_order = default_zonelist_order();
2462	else
2463		current_zonelist_order = user_zonelist_order;
2464}
2465
2466static void build_zonelists(pg_data_t *pgdat)
2467{
2468	int j, node, load;
2469	enum zone_type i;
2470	nodemask_t used_mask;
2471	int local_node, prev_node;
2472	struct zonelist *zonelist;
2473	int order = current_zonelist_order;
2474
2475	/* initialize zonelists */
2476	for (i = 0; i < MAX_ZONELISTS; i++) {
2477		zonelist = pgdat->node_zonelists + i;
2478		zonelist->_zonerefs[0].zone = NULL;
2479		zonelist->_zonerefs[0].zone_idx = 0;
2480	}
2481
2482	/* NUMA-aware ordering of nodes */
2483	local_node = pgdat->node_id;
2484	load = nr_online_nodes;
2485	prev_node = local_node;
2486	nodes_clear(used_mask);
2487
2488	memset(node_load, 0, sizeof(node_load));
2489	memset(node_order, 0, sizeof(node_order));
2490	j = 0;
2491
2492	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2493		int distance = node_distance(local_node, node);
2494
2495		/*
2496		 * If another node is sufficiently far away then it is better
2497		 * to reclaim pages in a zone before going off node.
2498		 */
2499		if (distance > RECLAIM_DISTANCE)
2500			zone_reclaim_mode = 1;
2501
2502		/*
2503		 * We don't want to pressure a particular node.
2504		 * So adding penalty to the first node in same
2505		 * distance group to make it round-robin.
2506		 */
2507		if (distance != node_distance(local_node, prev_node))
2508			node_load[node] = load;
2509
2510		prev_node = node;
2511		load--;
2512		if (order == ZONELIST_ORDER_NODE)
2513			build_zonelists_in_node_order(pgdat, node);
2514		else
2515			node_order[j++] = node;	/* remember order */
2516	}
2517
2518	if (order == ZONELIST_ORDER_ZONE) {
2519		/* calculate node order -- i.e., DMA last! */
2520		build_zonelists_in_zone_order(pgdat, j);
2521	}
2522
2523	build_thisnode_zonelists(pgdat);
2524}
2525
2526/* Construct the zonelist performance cache - see further mmzone.h */
2527static void build_zonelist_cache(pg_data_t *pgdat)
2528{
2529	struct zonelist *zonelist;
2530	struct zonelist_cache *zlc;
2531	struct zoneref *z;
2532
2533	zonelist = &pgdat->node_zonelists[0];
2534	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2535	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2536	for (z = zonelist->_zonerefs; z->zone; z++)
2537		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2538}
2539
2540
2541#else	/* CONFIG_NUMA */
2542
2543static void set_zonelist_order(void)
2544{
2545	current_zonelist_order = ZONELIST_ORDER_ZONE;
2546}
2547
2548static void build_zonelists(pg_data_t *pgdat)
2549{
2550	int node, local_node;
2551	enum zone_type j;
2552	struct zonelist *zonelist;
2553
2554	local_node = pgdat->node_id;
2555
2556	zonelist = &pgdat->node_zonelists[0];
2557	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2558
2559	/*
2560	 * Now we build the zonelist so that it contains the zones
2561	 * of all the other nodes.
2562	 * We don't want to pressure a particular node, so when
2563	 * building the zones for node N, we make sure that the
2564	 * zones coming right after the local ones are those from
2565	 * node N+1 (modulo N)
2566	 */
2567	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2568		if (!node_online(node))
2569			continue;
2570		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2571							MAX_NR_ZONES - 1);
2572	}
2573	for (node = 0; node < local_node; node++) {
2574		if (!node_online(node))
2575			continue;
2576		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2577							MAX_NR_ZONES - 1);
2578	}
2579
2580	zonelist->_zonerefs[j].zone = NULL;
2581	zonelist->_zonerefs[j].zone_idx = 0;
2582}
2583
2584/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2585static void build_zonelist_cache(pg_data_t *pgdat)
2586{
2587	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2588}
2589
2590#endif	/* CONFIG_NUMA */
2591
2592/* return values int ....just for stop_machine() */
2593static int __build_all_zonelists(void *dummy)
2594{
2595	int nid;
2596
2597	for_each_online_node(nid) {
2598		pg_data_t *pgdat = NODE_DATA(nid);
2599
2600		build_zonelists(pgdat);
2601		build_zonelist_cache(pgdat);
2602	}
2603	return 0;
2604}
2605
2606void build_all_zonelists(void)
2607{
2608	set_zonelist_order();
2609
2610	if (system_state == SYSTEM_BOOTING) {
2611		__build_all_zonelists(NULL);
2612		mminit_verify_zonelist();
2613		cpuset_init_current_mems_allowed();
2614	} else {
2615		/* we have to stop all cpus to guarantee there is no user
2616		   of zonelist */
2617		stop_machine(__build_all_zonelists, NULL, NULL);
2618		/* cpuset refresh routine should be here */
2619	}
2620	vm_total_pages = nr_free_pagecache_pages();
2621	/*
2622	 * Disable grouping by mobility if the number of pages in the
2623	 * system is too low to allow the mechanism to work. It would be
2624	 * more accurate, but expensive to check per-zone. This check is
2625	 * made on memory-hotadd so a system can start with mobility
2626	 * disabled and enable it later
2627	 */
2628	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2629		page_group_by_mobility_disabled = 1;
2630	else
2631		page_group_by_mobility_disabled = 0;
2632
2633	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2634		"Total pages: %ld\n",
2635			nr_online_nodes,
2636			zonelist_order_name[current_zonelist_order],
2637			page_group_by_mobility_disabled ? "off" : "on",
2638			vm_total_pages);
2639#ifdef CONFIG_NUMA
2640	printk("Policy zone: %s\n", zone_names[policy_zone]);
2641#endif
2642}
2643
2644/*
2645 * Helper functions to size the waitqueue hash table.
2646 * Essentially these want to choose hash table sizes sufficiently
2647 * large so that collisions trying to wait on pages are rare.
2648 * But in fact, the number of active page waitqueues on typical
2649 * systems is ridiculously low, less than 200. So this is even
2650 * conservative, even though it seems large.
2651 *
2652 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2653 * waitqueues, i.e. the size of the waitq table given the number of pages.
2654 */
2655#define PAGES_PER_WAITQUEUE	256
2656
2657#ifndef CONFIG_MEMORY_HOTPLUG
2658static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2659{
2660	unsigned long size = 1;
2661
2662	pages /= PAGES_PER_WAITQUEUE;
2663
2664	while (size < pages)
2665		size <<= 1;
2666
2667	/*
2668	 * Once we have dozens or even hundreds of threads sleeping
2669	 * on IO we've got bigger problems than wait queue collision.
2670	 * Limit the size of the wait table to a reasonable size.
2671	 */
2672	size = min(size, 4096UL);
2673
2674	return max(size, 4UL);
2675}
2676#else
2677/*
2678 * A zone's size might be changed by hot-add, so it is not possible to determine
2679 * a suitable size for its wait_table.  So we use the maximum size now.
2680 *
2681 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2682 *
2683 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2684 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2685 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2686 *
2687 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2688 * or more by the traditional way. (See above).  It equals:
2689 *
2690 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2691 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2692 *    powerpc (64K page size)             : =  (32G +16M)byte.
2693 */
2694static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2695{
2696	return 4096UL;
2697}
2698#endif
2699
2700/*
2701 * This is an integer logarithm so that shifts can be used later
2702 * to extract the more random high bits from the multiplicative
2703 * hash function before the remainder is taken.
2704 */
2705static inline unsigned long wait_table_bits(unsigned long size)
2706{
2707	return ffz(~size);
2708}
2709
2710#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2711
2712/*
2713 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2714 * of blocks reserved is based on min_wmark_pages(zone). The memory within
2715 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2716 * higher will lead to a bigger reserve which will get freed as contiguous
2717 * blocks as reclaim kicks in
2718 */
2719static void setup_zone_migrate_reserve(struct zone *zone)
2720{
2721	unsigned long start_pfn, pfn, end_pfn;
2722	struct page *page;
2723	unsigned long reserve, block_migratetype;
2724
2725	/* Get the start pfn, end pfn and the number of blocks to reserve */
2726	start_pfn = zone->zone_start_pfn;
2727	end_pfn = start_pfn + zone->spanned_pages;
2728	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2729							pageblock_order;
2730
2731	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2732		if (!pfn_valid(pfn))
2733			continue;
2734		page = pfn_to_page(pfn);
2735
2736		/* Watch out for overlapping nodes */
2737		if (page_to_nid(page) != zone_to_nid(zone))
2738			continue;
2739
2740		/* Blocks with reserved pages will never free, skip them. */
2741		if (PageReserved(page))
2742			continue;
2743
2744		block_migratetype = get_pageblock_migratetype(page);
2745
2746		/* If this block is reserved, account for it */
2747		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2748			reserve--;
2749			continue;
2750		}
2751
2752		/* Suitable for reserving if this block is movable */
2753		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2754			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2755			move_freepages_block(zone, page, MIGRATE_RESERVE);
2756			reserve--;
2757			continue;
2758		}
2759
2760		/*
2761		 * If the reserve is met and this is a previous reserved block,
2762		 * take it back
2763		 */
2764		if (block_migratetype == MIGRATE_RESERVE) {
2765			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2766			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2767		}
2768	}
2769}
2770
2771/*
2772 * Initially all pages are reserved - free ones are freed
2773 * up by free_all_bootmem() once the early boot process is
2774 * done. Non-atomic initialization, single-pass.
2775 */
2776void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2777		unsigned long start_pfn, enum memmap_context context)
2778{
2779	struct page *page;
2780	unsigned long end_pfn = start_pfn + size;
2781	unsigned long pfn;
2782	struct zone *z;
2783
2784	if (highest_memmap_pfn < end_pfn - 1)
2785		highest_memmap_pfn = end_pfn - 1;
2786
2787	z = &NODE_DATA(nid)->node_zones[zone];
2788	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2789		/*
2790		 * There can be holes in boot-time mem_map[]s
2791		 * handed to this function.  They do not
2792		 * exist on hotplugged memory.
2793		 */
2794		if (context == MEMMAP_EARLY) {
2795			if (!early_pfn_valid(pfn))
2796				continue;
2797			if (!early_pfn_in_nid(pfn, nid))
2798				continue;
2799		}
2800		page = pfn_to_page(pfn);
2801		set_page_links(page, zone, nid, pfn);
2802		mminit_verify_page_links(page, zone, nid, pfn);
2803		init_page_count(page);
2804		reset_page_mapcount(page);
2805		SetPageReserved(page);
2806		/*
2807		 * Mark the block movable so that blocks are reserved for
2808		 * movable at startup. This will force kernel allocations
2809		 * to reserve their blocks rather than leaking throughout
2810		 * the address space during boot when many long-lived
2811		 * kernel allocations are made. Later some blocks near
2812		 * the start are marked MIGRATE_RESERVE by
2813		 * setup_zone_migrate_reserve()
2814		 *
2815		 * bitmap is created for zone's valid pfn range. but memmap
2816		 * can be created for invalid pages (for alignment)
2817		 * check here not to call set_pageblock_migratetype() against
2818		 * pfn out of zone.
2819		 */
2820		if ((z->zone_start_pfn <= pfn)
2821		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2822		    && !(pfn & (pageblock_nr_pages - 1)))
2823			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2824
2825		INIT_LIST_HEAD(&page->lru);
2826#ifdef WANT_PAGE_VIRTUAL
2827		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2828		if (!is_highmem_idx(zone))
2829			set_page_address(page, __va(pfn << PAGE_SHIFT));
2830#endif
2831	}
2832}
2833
2834static void __meminit zone_init_free_lists(struct zone *zone)
2835{
2836	int order, t;
2837	for_each_migratetype_order(order, t) {
2838		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2839		zone->free_area[order].nr_free = 0;
2840	}
2841}
2842
2843#ifndef __HAVE_ARCH_MEMMAP_INIT
2844#define memmap_init(size, nid, zone, start_pfn) \
2845	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2846#endif
2847
2848static int zone_batchsize(struct zone *zone)
2849{
2850#ifdef CONFIG_MMU
2851	int batch;
2852
2853	/*
2854	 * The per-cpu-pages pools are set to around 1000th of the
2855	 * size of the zone.  But no more than 1/2 of a meg.
2856	 *
2857	 * OK, so we don't know how big the cache is.  So guess.
2858	 */
2859	batch = zone->present_pages / 1024;
2860	if (batch * PAGE_SIZE > 512 * 1024)
2861		batch = (512 * 1024) / PAGE_SIZE;
2862	batch /= 4;		/* We effectively *= 4 below */
2863	if (batch < 1)
2864		batch = 1;
2865
2866	/*
2867	 * Clamp the batch to a 2^n - 1 value. Having a power
2868	 * of 2 value was found to be more likely to have
2869	 * suboptimal cache aliasing properties in some cases.
2870	 *
2871	 * For example if 2 tasks are alternately allocating
2872	 * batches of pages, one task can end up with a lot
2873	 * of pages of one half of the possible page colors
2874	 * and the other with pages of the other colors.
2875	 */
2876	batch = rounddown_pow_of_two(batch + batch/2) - 1;
2877
2878	return batch;
2879
2880#else
2881	/* The deferral and batching of frees should be suppressed under NOMMU
2882	 * conditions.
2883	 *
2884	 * The problem is that NOMMU needs to be able to allocate large chunks
2885	 * of contiguous memory as there's no hardware page translation to
2886	 * assemble apparent contiguous memory from discontiguous pages.
2887	 *
2888	 * Queueing large contiguous runs of pages for batching, however,
2889	 * causes the pages to actually be freed in smaller chunks.  As there
2890	 * can be a significant delay between the individual batches being
2891	 * recycled, this leads to the once large chunks of space being
2892	 * fragmented and becoming unavailable for high-order allocations.
2893	 */
2894	return 0;
2895#endif
2896}
2897
2898static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2899{
2900	struct per_cpu_pages *pcp;
2901
2902	memset(p, 0, sizeof(*p));
2903
2904	pcp = &p->pcp;
2905	pcp->count = 0;
2906	pcp->high = 6 * batch;
2907	pcp->batch = max(1UL, 1 * batch);
2908	INIT_LIST_HEAD(&pcp->list);
2909}
2910
2911/*
2912 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2913 * to the value high for the pageset p.
2914 */
2915
2916static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2917				unsigned long high)
2918{
2919	struct per_cpu_pages *pcp;
2920
2921	pcp = &p->pcp;
2922	pcp->high = high;
2923	pcp->batch = max(1UL, high/4);
2924	if ((high/4) > (PAGE_SHIFT * 8))
2925		pcp->batch = PAGE_SHIFT * 8;
2926}
2927
2928
2929#ifdef CONFIG_NUMA
2930/*
2931 * Boot pageset table. One per cpu which is going to be used for all
2932 * zones and all nodes. The parameters will be set in such a way
2933 * that an item put on a list will immediately be handed over to
2934 * the buddy list. This is safe since pageset manipulation is done
2935 * with interrupts disabled.
2936 *
2937 * Some NUMA counter updates may also be caught by the boot pagesets.
2938 *
2939 * The boot_pagesets must be kept even after bootup is complete for
2940 * unused processors and/or zones. They do play a role for bootstrapping
2941 * hotplugged processors.
2942 *
2943 * zoneinfo_show() and maybe other functions do
2944 * not check if the processor is online before following the pageset pointer.
2945 * Other parts of the kernel may not check if the zone is available.
2946 */
2947static struct per_cpu_pageset boot_pageset[NR_CPUS];
2948
2949/*
2950 * Dynamically allocate memory for the
2951 * per cpu pageset array in struct zone.
2952 */
2953static int __cpuinit process_zones(int cpu)
2954{
2955	struct zone *zone, *dzone;
2956	int node = cpu_to_node(cpu);
2957
2958	node_set_state(node, N_CPU);	/* this node has a cpu */
2959
2960	for_each_populated_zone(zone) {
2961		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2962					 GFP_KERNEL, node);
2963		if (!zone_pcp(zone, cpu))
2964			goto bad;
2965
2966		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2967
2968		if (percpu_pagelist_fraction)
2969			setup_pagelist_highmark(zone_pcp(zone, cpu),
2970			 	(zone->present_pages / percpu_pagelist_fraction));
2971	}
2972
2973	return 0;
2974bad:
2975	for_each_zone(dzone) {
2976		if (!populated_zone(dzone))
2977			continue;
2978		if (dzone == zone)
2979			break;
2980		kfree(zone_pcp(dzone, cpu));
2981		zone_pcp(dzone, cpu) = NULL;
2982	}
2983	return -ENOMEM;
2984}
2985
2986static inline void free_zone_pagesets(int cpu)
2987{
2988	struct zone *zone;
2989
2990	for_each_zone(zone) {
2991		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2992
2993		/* Free per_cpu_pageset if it is slab allocated */
2994		if (pset != &boot_pageset[cpu])
2995			kfree(pset);
2996		zone_pcp(zone, cpu) = NULL;
2997	}
2998}
2999
3000static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3001		unsigned long action,
3002		void *hcpu)
3003{
3004	int cpu = (long)hcpu;
3005	int ret = NOTIFY_OK;
3006
3007	switch (action) {
3008	case CPU_UP_PREPARE:
3009	case CPU_UP_PREPARE_FROZEN:
3010		if (process_zones(cpu))
3011			ret = NOTIFY_BAD;
3012		break;
3013	case CPU_UP_CANCELED:
3014	case CPU_UP_CANCELED_FROZEN:
3015	case CPU_DEAD:
3016	case CPU_DEAD_FROZEN:
3017		free_zone_pagesets(cpu);
3018		break;
3019	default:
3020		break;
3021	}
3022	return ret;
3023}
3024
3025static struct notifier_block __cpuinitdata pageset_notifier =
3026	{ &pageset_cpuup_callback, NULL, 0 };
3027
3028void __init setup_per_cpu_pageset(void)
3029{
3030	int err;
3031
3032	/* Initialize per_cpu_pageset for cpu 0.
3033	 * A cpuup callback will do this for every cpu
3034	 * as it comes online
3035	 */
3036	err = process_zones(smp_processor_id());
3037	BUG_ON(err);
3038	register_cpu_notifier(&pageset_notifier);
3039}
3040
3041#endif
3042
3043static noinline __init_refok
3044int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3045{
3046	int i;
3047	struct pglist_data *pgdat = zone->zone_pgdat;
3048	size_t alloc_size;
3049
3050	/*
3051	 * The per-page waitqueue mechanism uses hashed waitqueues
3052	 * per zone.
3053	 */
3054	zone->wait_table_hash_nr_entries =
3055		 wait_table_hash_nr_entries(zone_size_pages);
3056	zone->wait_table_bits =
3057		wait_table_bits(zone->wait_table_hash_nr_entries);
3058	alloc_size = zone->wait_table_hash_nr_entries
3059					* sizeof(wait_queue_head_t);
3060
3061	if (!slab_is_available()) {
3062		zone->wait_table = (wait_queue_head_t *)
3063			alloc_bootmem_node(pgdat, alloc_size);
3064	} else {
3065		/*
3066		 * This case means that a zone whose size was 0 gets new memory
3067		 * via memory hot-add.
3068		 * But it may be the case that a new node was hot-added.  In
3069		 * this case vmalloc() will not be able to use this new node's
3070		 * memory - this wait_table must be initialized to use this new
3071		 * node itself as well.
3072		 * To use this new node's memory, further consideration will be
3073		 * necessary.
3074		 */
3075		zone->wait_table = vmalloc(alloc_size);
3076	}
3077	if (!zone->wait_table)
3078		return -ENOMEM;
3079
3080	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3081		init_waitqueue_head(zone->wait_table + i);
3082
3083	return 0;
3084}
3085
3086static __meminit void zone_pcp_init(struct zone *zone)
3087{
3088	int cpu;
3089	unsigned long batch = zone_batchsize(zone);
3090
3091	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3092#ifdef CONFIG_NUMA
3093		/* Early boot. Slab allocator not functional yet */
3094		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3095		setup_pageset(&boot_pageset[cpu],0);
3096#else
3097		setup_pageset(zone_pcp(zone,cpu), batch);
3098#endif
3099	}
3100	if (zone->present_pages)
3101		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
3102			zone->name, zone->present_pages, batch);
3103}
3104
3105__meminit int init_currently_empty_zone(struct zone *zone,
3106					unsigned long zone_start_pfn,
3107					unsigned long size,
3108					enum memmap_context context)
3109{
3110	struct pglist_data *pgdat = zone->zone_pgdat;
3111	int ret;
3112	ret = zone_wait_table_init(zone, size);
3113	if (ret)
3114		return ret;
3115	pgdat->nr_zones = zone_idx(zone) + 1;
3116
3117	zone->zone_start_pfn = zone_start_pfn;
3118
3119	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3120			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3121			pgdat->node_id,
3122			(unsigned long)zone_idx(zone),
3123			zone_start_pfn, (zone_start_pfn + size));
3124
3125	zone_init_free_lists(zone);
3126
3127	return 0;
3128}
3129
3130#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3131/*
3132 * Basic iterator support. Return the first range of PFNs for a node
3133 * Note: nid == MAX_NUMNODES returns first region regardless of node
3134 */
3135static int __meminit first_active_region_index_in_nid(int nid)
3136{
3137	int i;
3138
3139	for (i = 0; i < nr_nodemap_entries; i++)
3140		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3141			return i;
3142
3143	return -1;
3144}
3145
3146/*
3147 * Basic iterator support. Return the next active range of PFNs for a node
3148 * Note: nid == MAX_NUMNODES returns next region regardless of node
3149 */
3150static int __meminit next_active_region_index_in_nid(int index, int nid)
3151{
3152	for (index = index + 1; index < nr_nodemap_entries; index++)
3153		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3154			return index;
3155
3156	return -1;
3157}
3158
3159#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3160/*
3161 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3162 * Architectures may implement their own version but if add_active_range()
3163 * was used and there are no special requirements, this is a convenient
3164 * alternative
3165 */
3166int __meminit __early_pfn_to_nid(unsigned long pfn)
3167{
3168	int i;
3169
3170	for (i = 0; i < nr_nodemap_entries; i++) {
3171		unsigned long start_pfn = early_node_map[i].start_pfn;
3172		unsigned long end_pfn = early_node_map[i].end_pfn;
3173
3174		if (start_pfn <= pfn && pfn < end_pfn)
3175			return early_node_map[i].nid;
3176	}
3177	/* This is a memory hole */
3178	return -1;
3179}
3180#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3181
3182int __meminit early_pfn_to_nid(unsigned long pfn)
3183{
3184	int nid;
3185
3186	nid = __early_pfn_to_nid(pfn);
3187	if (nid >= 0)
3188		return nid;
3189	/* just returns 0 */
3190	return 0;
3191}
3192
3193#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3194bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3195{
3196	int nid;
3197
3198	nid = __early_pfn_to_nid(pfn);
3199	if (nid >= 0 && nid != node)
3200		return false;
3201	return true;
3202}
3203#endif
3204
3205/* Basic iterator support to walk early_node_map[] */
3206#define for_each_active_range_index_in_nid(i, nid) \
3207	for (i = first_active_region_index_in_nid(nid); i != -1; \
3208				i = next_active_region_index_in_nid(i, nid))
3209
3210/**
3211 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3212 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3213 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3214 *
3215 * If an architecture guarantees that all ranges registered with
3216 * add_active_ranges() contain no holes and may be freed, this
3217 * this function may be used instead of calling free_bootmem() manually.
3218 */
3219void __init free_bootmem_with_active_regions(int nid,
3220						unsigned long max_low_pfn)
3221{
3222	int i;
3223
3224	for_each_active_range_index_in_nid(i, nid) {
3225		unsigned long size_pages = 0;
3226		unsigned long end_pfn = early_node_map[i].end_pfn;
3227
3228		if (early_node_map[i].start_pfn >= max_low_pfn)
3229			continue;
3230
3231		if (end_pfn > max_low_pfn)
3232			end_pfn = max_low_pfn;
3233
3234		size_pages = end_pfn - early_node_map[i].start_pfn;
3235		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3236				PFN_PHYS(early_node_map[i].start_pfn),
3237				size_pages << PAGE_SHIFT);
3238	}
3239}
3240
3241void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3242{
3243	int i;
3244	int ret;
3245
3246	for_each_active_range_index_in_nid(i, nid) {
3247		ret = work_fn(early_node_map[i].start_pfn,
3248			      early_node_map[i].end_pfn, data);
3249		if (ret)
3250			break;
3251	}
3252}
3253/**
3254 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3255 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3256 *
3257 * If an architecture guarantees that all ranges registered with
3258 * add_active_ranges() contain no holes and may be freed, this
3259 * function may be used instead of calling memory_present() manually.
3260 */
3261void __init sparse_memory_present_with_active_regions(int nid)
3262{
3263	int i;
3264
3265	for_each_active_range_index_in_nid(i, nid)
3266		memory_present(early_node_map[i].nid,
3267				early_node_map[i].start_pfn,
3268				early_node_map[i].end_pfn);
3269}
3270
3271/**
3272 * get_pfn_range_for_nid - Return the start and end page frames for a node
3273 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3274 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3275 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3276 *
3277 * It returns the start and end page frame of a node based on information
3278 * provided by an arch calling add_active_range(). If called for a node
3279 * with no available memory, a warning is printed and the start and end
3280 * PFNs will be 0.
3281 */
3282void __meminit get_pfn_range_for_nid(unsigned int nid,
3283			unsigned long *start_pfn, unsigned long *end_pfn)
3284{
3285	int i;
3286	*start_pfn = -1UL;
3287	*end_pfn = 0;
3288
3289	for_each_active_range_index_in_nid(i, nid) {
3290		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3291		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3292	}
3293
3294	if (*start_pfn == -1UL)
3295		*start_pfn = 0;
3296}
3297
3298/*
3299 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3300 * assumption is made that zones within a node are ordered in monotonic
3301 * increasing memory addresses so that the "highest" populated zone is used
3302 */
3303static void __init find_usable_zone_for_movable(void)
3304{
3305	int zone_index;
3306	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3307		if (zone_index == ZONE_MOVABLE)
3308			continue;
3309
3310		if (arch_zone_highest_possible_pfn[zone_index] >
3311				arch_zone_lowest_possible_pfn[zone_index])
3312			break;
3313	}
3314
3315	VM_BUG_ON(zone_index == -1);
3316	movable_zone = zone_index;
3317}
3318
3319/*
3320 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3321 * because it is sized independant of architecture. Unlike the other zones,
3322 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3323 * in each node depending on the size of each node and how evenly kernelcore
3324 * is distributed. This helper function adjusts the zone ranges
3325 * provided by the architecture for a given node by using the end of the
3326 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3327 * zones within a node are in order of monotonic increases memory addresses
3328 */
3329static void __meminit adjust_zone_range_for_zone_movable(int nid,
3330					unsigned long zone_type,
3331					unsigned long node_start_pfn,
3332					unsigned long node_end_pfn,
3333					unsigned long *zone_start_pfn,
3334					unsigned long *zone_end_pfn)
3335{
3336	/* Only adjust if ZONE_MOVABLE is on this node */
3337	if (zone_movable_pfn[nid]) {
3338		/* Size ZONE_MOVABLE */
3339		if (zone_type == ZONE_MOVABLE) {
3340			*zone_start_pfn = zone_movable_pfn[nid];
3341			*zone_end_pfn = min(node_end_pfn,
3342				arch_zone_highest_possible_pfn[movable_zone]);
3343
3344		/* Adjust for ZONE_MOVABLE starting within this range */
3345		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3346				*zone_end_pfn > zone_movable_pfn[nid]) {
3347			*zone_end_pfn = zone_movable_pfn[nid];
3348
3349		/* Check if this whole range is within ZONE_MOVABLE */
3350		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3351			*zone_start_pfn = *zone_end_pfn;
3352	}
3353}
3354
3355/*
3356 * Return the number of pages a zone spans in a node, including holes
3357 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3358 */
3359static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3360					unsigned long zone_type,
3361					unsigned long *ignored)
3362{
3363	unsigned long node_start_pfn, node_end_pfn;
3364	unsigned long zone_start_pfn, zone_end_pfn;
3365
3366	/* Get the start and end of the node and zone */
3367	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3368	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3369	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3370	adjust_zone_range_for_zone_movable(nid, zone_type,
3371				node_start_pfn, node_end_pfn,
3372				&zone_start_pfn, &zone_end_pfn);
3373
3374	/* Check that this node has pages within the zone's required range */
3375	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3376		return 0;
3377
3378	/* Move the zone boundaries inside the node if necessary */
3379	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3380	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3381
3382	/* Return the spanned pages */
3383	return zone_end_pfn - zone_start_pfn;
3384}
3385
3386/*
3387 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3388 * then all holes in the requested range will be accounted for.
3389 */
3390static unsigned long __meminit __absent_pages_in_range(int nid,
3391				unsigned long range_start_pfn,
3392				unsigned long range_end_pfn)
3393{
3394	int i = 0;
3395	unsigned long prev_end_pfn = 0, hole_pages = 0;
3396	unsigned long start_pfn;
3397
3398	/* Find the end_pfn of the first active range of pfns in the node */
3399	i = first_active_region_index_in_nid(nid);
3400	if (i == -1)
3401		return 0;
3402
3403	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3404
3405	/* Account for ranges before physical memory on this node */
3406	if (early_node_map[i].start_pfn > range_start_pfn)
3407		hole_pages = prev_end_pfn - range_start_pfn;
3408
3409	/* Find all holes for the zone within the node */
3410	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3411
3412		/* No need to continue if prev_end_pfn is outside the zone */
3413		if (prev_end_pfn >= range_end_pfn)
3414			break;
3415
3416		/* Make sure the end of the zone is not within the hole */
3417		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3418		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3419
3420		/* Update the hole size cound and move on */
3421		if (start_pfn > range_start_pfn) {
3422			BUG_ON(prev_end_pfn > start_pfn);
3423			hole_pages += start_pfn - prev_end_pfn;
3424		}
3425		prev_end_pfn = early_node_map[i].end_pfn;
3426	}
3427
3428	/* Account for ranges past physical memory on this node */
3429	if (range_end_pfn > prev_end_pfn)
3430		hole_pages += range_end_pfn -
3431				max(range_start_pfn, prev_end_pfn);
3432
3433	return hole_pages;
3434}
3435
3436/**
3437 * absent_pages_in_range - Return number of page frames in holes within a range
3438 * @start_pfn: The start PFN to start searching for holes
3439 * @end_pfn: The end PFN to stop searching for holes
3440 *
3441 * It returns the number of pages frames in memory holes within a range.
3442 */
3443unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3444							unsigned long end_pfn)
3445{
3446	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3447}
3448
3449/* Return the number of page frames in holes in a zone on a node */
3450static unsigned long __meminit zone_absent_pages_in_node(int nid,
3451					unsigned long zone_type,
3452					unsigned long *ignored)
3453{
3454	unsigned long node_start_pfn, node_end_pfn;
3455	unsigned long zone_start_pfn, zone_end_pfn;
3456
3457	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3458	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3459							node_start_pfn);
3460	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3461							node_end_pfn);
3462
3463	adjust_zone_range_for_zone_movable(nid, zone_type,
3464			node_start_pfn, node_end_pfn,
3465			&zone_start_pfn, &zone_end_pfn);
3466	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3467}
3468
3469#else
3470static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3471					unsigned long zone_type,
3472					unsigned long *zones_size)
3473{
3474	return zones_size[zone_type];
3475}
3476
3477static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3478						unsigned long zone_type,
3479						unsigned long *zholes_size)
3480{
3481	if (!zholes_size)
3482		return 0;
3483
3484	return zholes_size[zone_type];
3485}
3486
3487#endif
3488
3489static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3490		unsigned long *zones_size, unsigned long *zholes_size)
3491{
3492	unsigned long realtotalpages, totalpages = 0;
3493	enum zone_type i;
3494
3495	for (i = 0; i < MAX_NR_ZONES; i++)
3496		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3497								zones_size);
3498	pgdat->node_spanned_pages = totalpages;
3499
3500	realtotalpages = totalpages;
3501	for (i = 0; i < MAX_NR_ZONES; i++)
3502		realtotalpages -=
3503			zone_absent_pages_in_node(pgdat->node_id, i,
3504								zholes_size);
3505	pgdat->node_present_pages = realtotalpages;
3506	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3507							realtotalpages);
3508}
3509
3510#ifndef CONFIG_SPARSEMEM
3511/*
3512 * Calculate the size of the zone->blockflags rounded to an unsigned long
3513 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3514 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3515 * round what is now in bits to nearest long in bits, then return it in
3516 * bytes.
3517 */
3518static unsigned long __init usemap_size(unsigned long zonesize)
3519{
3520	unsigned long usemapsize;
3521
3522	usemapsize = roundup(zonesize, pageblock_nr_pages);
3523	usemapsize = usemapsize >> pageblock_order;
3524	usemapsize *= NR_PAGEBLOCK_BITS;
3525	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3526
3527	return usemapsize / 8;
3528}
3529
3530static void __init setup_usemap(struct pglist_data *pgdat,
3531				struct zone *zone, unsigned long zonesize)
3532{
3533	unsigned long usemapsize = usemap_size(zonesize);
3534	zone->pageblock_flags = NULL;
3535	if (usemapsize)
3536		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3537}
3538#else
3539static void inline setup_usemap(struct pglist_data *pgdat,
3540				struct zone *zone, unsigned long zonesize) {}
3541#endif /* CONFIG_SPARSEMEM */
3542
3543#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3544
3545/* Return a sensible default order for the pageblock size. */
3546static inline int pageblock_default_order(void)
3547{
3548	if (HPAGE_SHIFT > PAGE_SHIFT)
3549		return HUGETLB_PAGE_ORDER;
3550
3551	return MAX_ORDER-1;
3552}
3553
3554/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3555static inline void __init set_pageblock_order(unsigned int order)
3556{
3557	/* Check that pageblock_nr_pages has not already been setup */
3558	if (pageblock_order)
3559		return;
3560
3561	/*
3562	 * Assume the largest contiguous order of interest is a huge page.
3563	 * This value may be variable depending on boot parameters on IA64
3564	 */
3565	pageblock_order = order;
3566}
3567#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3568
3569/*
3570 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3571 * and pageblock_default_order() are unused as pageblock_order is set
3572 * at compile-time. See include/linux/pageblock-flags.h for the values of
3573 * pageblock_order based on the kernel config
3574 */
3575static inline int pageblock_default_order(unsigned int order)
3576{
3577	return MAX_ORDER-1;
3578}
3579#define set_pageblock_order(x)	do {} while (0)
3580
3581#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3582
3583/*
3584 * Set up the zone data structures:
3585 *   - mark all pages reserved
3586 *   - mark all memory queues empty
3587 *   - clear the memory bitmaps
3588 */
3589static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3590		unsigned long *zones_size, unsigned long *zholes_size)
3591{
3592	enum zone_type j;
3593	int nid = pgdat->node_id;
3594	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3595	int ret;
3596
3597	pgdat_resize_init(pgdat);
3598	pgdat->nr_zones = 0;
3599	init_waitqueue_head(&pgdat->kswapd_wait);
3600	pgdat->kswapd_max_order = 0;
3601	pgdat_page_cgroup_init(pgdat);
3602
3603	for (j = 0; j < MAX_NR_ZONES; j++) {
3604		struct zone *zone = pgdat->node_zones + j;
3605		unsigned long size, realsize, memmap_pages;
3606		enum lru_list l;
3607
3608		size = zone_spanned_pages_in_node(nid, j, zones_size);
3609		realsize = size - zone_absent_pages_in_node(nid, j,
3610								zholes_size);
3611
3612		/*
3613		 * Adjust realsize so that it accounts for how much memory
3614		 * is used by this zone for memmap. This affects the watermark
3615		 * and per-cpu initialisations
3616		 */
3617		memmap_pages =
3618			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3619		if (realsize >= memmap_pages) {
3620			realsize -= memmap_pages;
3621			if (memmap_pages)
3622				printk(KERN_DEBUG
3623				       "  %s zone: %lu pages used for memmap\n",
3624				       zone_names[j], memmap_pages);
3625		} else
3626			printk(KERN_WARNING
3627				"  %s zone: %lu pages exceeds realsize %lu\n",
3628				zone_names[j], memmap_pages, realsize);
3629
3630		/* Account for reserved pages */
3631		if (j == 0 && realsize > dma_reserve) {
3632			realsize -= dma_reserve;
3633			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3634					zone_names[0], dma_reserve);
3635		}
3636
3637		if (!is_highmem_idx(j))
3638			nr_kernel_pages += realsize;
3639		nr_all_pages += realsize;
3640
3641		zone->spanned_pages = size;
3642		zone->present_pages = realsize;
3643#ifdef CONFIG_NUMA
3644		zone->node = nid;
3645		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3646						/ 100;
3647		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3648#endif
3649		zone->name = zone_names[j];
3650		spin_lock_init(&zone->lock);
3651		spin_lock_init(&zone->lru_lock);
3652		zone_seqlock_init(zone);
3653		zone->zone_pgdat = pgdat;
3654
3655		zone->prev_priority = DEF_PRIORITY;
3656
3657		zone_pcp_init(zone);
3658		for_each_lru(l) {
3659			INIT_LIST_HEAD(&zone->lru[l].list);
3660			zone->lru[l].nr_saved_scan = 0;
3661		}
3662		zone->reclaim_stat.recent_rotated[0] = 0;
3663		zone->reclaim_stat.recent_rotated[1] = 0;
3664		zone->reclaim_stat.recent_scanned[0] = 0;
3665		zone->reclaim_stat.recent_scanned[1] = 0;
3666		zap_zone_vm_stats(zone);
3667		zone->flags = 0;
3668		if (!size)
3669			continue;
3670
3671		set_pageblock_order(pageblock_default_order());
3672		setup_usemap(pgdat, zone, size);
3673		ret = init_currently_empty_zone(zone, zone_start_pfn,
3674						size, MEMMAP_EARLY);
3675		BUG_ON(ret);
3676		memmap_init(size, nid, j, zone_start_pfn);
3677		zone_start_pfn += size;
3678	}
3679}
3680
3681static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3682{
3683	/* Skip empty nodes */
3684	if (!pgdat->node_spanned_pages)
3685		return;
3686
3687#ifdef CONFIG_FLAT_NODE_MEM_MAP
3688	/* ia64 gets its own node_mem_map, before this, without bootmem */
3689	if (!pgdat->node_mem_map) {
3690		unsigned long size, start, end;
3691		struct page *map;
3692
3693		/*
3694		 * The zone's endpoints aren't required to be MAX_ORDER
3695		 * aligned but the node_mem_map endpoints must be in order
3696		 * for the buddy allocator to function correctly.
3697		 */
3698		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3699		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3700		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3701		size =  (end - start) * sizeof(struct page);
3702		map = alloc_remap(pgdat->node_id, size);
3703		if (!map)
3704			map = alloc_bootmem_node(pgdat, size);
3705		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3706	}
3707#ifndef CONFIG_NEED_MULTIPLE_NODES
3708	/*
3709	 * With no DISCONTIG, the global mem_map is just set as node 0's
3710	 */
3711	if (pgdat == NODE_DATA(0)) {
3712		mem_map = NODE_DATA(0)->node_mem_map;
3713#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3714		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3715			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3716#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3717	}
3718#endif
3719#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3720}
3721
3722void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3723		unsigned long node_start_pfn, unsigned long *zholes_size)
3724{
3725	pg_data_t *pgdat = NODE_DATA(nid);
3726
3727	pgdat->node_id = nid;
3728	pgdat->node_start_pfn = node_start_pfn;
3729	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3730
3731	alloc_node_mem_map(pgdat);
3732#ifdef CONFIG_FLAT_NODE_MEM_MAP
3733	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3734		nid, (unsigned long)pgdat,
3735		(unsigned long)pgdat->node_mem_map);
3736#endif
3737
3738	free_area_init_core(pgdat, zones_size, zholes_size);
3739}
3740
3741#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3742
3743#if MAX_NUMNODES > 1
3744/*
3745 * Figure out the number of possible node ids.
3746 */
3747static void __init setup_nr_node_ids(void)
3748{
3749	unsigned int node;
3750	unsigned int highest = 0;
3751
3752	for_each_node_mask(node, node_possible_map)
3753		highest = node;
3754	nr_node_ids = highest + 1;
3755}
3756#else
3757static inline void setup_nr_node_ids(void)
3758{
3759}
3760#endif
3761
3762/**
3763 * add_active_range - Register a range of PFNs backed by physical memory
3764 * @nid: The node ID the range resides on
3765 * @start_pfn: The start PFN of the available physical memory
3766 * @end_pfn: The end PFN of the available physical memory
3767 *
3768 * These ranges are stored in an early_node_map[] and later used by
3769 * free_area_init_nodes() to calculate zone sizes and holes. If the
3770 * range spans a memory hole, it is up to the architecture to ensure
3771 * the memory is not freed by the bootmem allocator. If possible
3772 * the range being registered will be merged with existing ranges.
3773 */
3774void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3775						unsigned long end_pfn)
3776{
3777	int i;
3778
3779	mminit_dprintk(MMINIT_TRACE, "memory_register",
3780			"Entering add_active_range(%d, %#lx, %#lx) "
3781			"%d entries of %d used\n",
3782			nid, start_pfn, end_pfn,
3783			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3784
3785	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3786
3787	/* Merge with existing active regions if possible */
3788	for (i = 0; i < nr_nodemap_entries; i++) {
3789		if (early_node_map[i].nid != nid)
3790			continue;
3791
3792		/* Skip if an existing region covers this new one */
3793		if (start_pfn >= early_node_map[i].start_pfn &&
3794				end_pfn <= early_node_map[i].end_pfn)
3795			return;
3796
3797		/* Merge forward if suitable */
3798		if (start_pfn <= early_node_map[i].end_pfn &&
3799				end_pfn > early_node_map[i].end_pfn) {
3800			early_node_map[i].end_pfn = end_pfn;
3801			return;
3802		}
3803
3804		/* Merge backward if suitable */
3805		if (start_pfn < early_node_map[i].end_pfn &&
3806				end_pfn >= early_node_map[i].start_pfn) {
3807			early_node_map[i].start_pfn = start_pfn;
3808			return;
3809		}
3810	}
3811
3812	/* Check that early_node_map is large enough */
3813	if (i >= MAX_ACTIVE_REGIONS) {
3814		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3815							MAX_ACTIVE_REGIONS);
3816		return;
3817	}
3818
3819	early_node_map[i].nid = nid;
3820	early_node_map[i].start_pfn = start_pfn;
3821	early_node_map[i].end_pfn = end_pfn;
3822	nr_nodemap_entries = i + 1;
3823}
3824
3825/**
3826 * remove_active_range - Shrink an existing registered range of PFNs
3827 * @nid: The node id the range is on that should be shrunk
3828 * @start_pfn: The new PFN of the range
3829 * @end_pfn: The new PFN of the range
3830 *
3831 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3832 * The map is kept near the end physical page range that has already been
3833 * registered. This function allows an arch to shrink an existing registered
3834 * range.
3835 */
3836void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3837				unsigned long end_pfn)
3838{
3839	int i, j;
3840	int removed = 0;
3841
3842	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3843			  nid, start_pfn, end_pfn);
3844
3845	/* Find the old active region end and shrink */
3846	for_each_active_range_index_in_nid(i, nid) {
3847		if (early_node_map[i].start_pfn >= start_pfn &&
3848		    early_node_map[i].end_pfn <= end_pfn) {
3849			/* clear it */
3850			early_node_map[i].start_pfn = 0;
3851			early_node_map[i].end_pfn = 0;
3852			removed = 1;
3853			continue;
3854		}
3855		if (early_node_map[i].start_pfn < start_pfn &&
3856		    early_node_map[i].end_pfn > start_pfn) {
3857			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3858			early_node_map[i].end_pfn = start_pfn;
3859			if (temp_end_pfn > end_pfn)
3860				add_active_range(nid, end_pfn, temp_end_pfn);
3861			continue;
3862		}
3863		if (early_node_map[i].start_pfn >= start_pfn &&
3864		    early_node_map[i].end_pfn > end_pfn &&
3865		    early_node_map[i].start_pfn < end_pfn) {
3866			early_node_map[i].start_pfn = end_pfn;
3867			continue;
3868		}
3869	}
3870
3871	if (!removed)
3872		return;
3873
3874	/* remove the blank ones */
3875	for (i = nr_nodemap_entries - 1; i > 0; i--) {
3876		if (early_node_map[i].nid != nid)
3877			continue;
3878		if (early_node_map[i].end_pfn)
3879			continue;
3880		/* we found it, get rid of it */
3881		for (j = i; j < nr_nodemap_entries - 1; j++)
3882			memcpy(&early_node_map[j], &early_node_map[j+1],
3883				sizeof(early_node_map[j]));
3884		j = nr_nodemap_entries - 1;
3885		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3886		nr_nodemap_entries--;
3887	}
3888}
3889
3890/**
3891 * remove_all_active_ranges - Remove all currently registered regions
3892 *
3893 * During discovery, it may be found that a table like SRAT is invalid
3894 * and an alternative discovery method must be used. This function removes
3895 * all currently registered regions.
3896 */
3897void __init remove_all_active_ranges(void)
3898{
3899	memset(early_node_map, 0, sizeof(early_node_map));
3900	nr_nodemap_entries = 0;
3901}
3902
3903/* Compare two active node_active_regions */
3904static int __init cmp_node_active_region(const void *a, const void *b)
3905{
3906	struct node_active_region *arange = (struct node_active_region *)a;
3907	struct node_active_region *brange = (struct node_active_region *)b;
3908
3909	/* Done this way to avoid overflows */
3910	if (arange->start_pfn > brange->start_pfn)
3911		return 1;
3912	if (arange->start_pfn < brange->start_pfn)
3913		return -1;
3914
3915	return 0;
3916}
3917
3918/* sort the node_map by start_pfn */
3919static void __init sort_node_map(void)
3920{
3921	sort(early_node_map, (size_t)nr_nodemap_entries,
3922			sizeof(struct node_active_region),
3923			cmp_node_active_region, NULL);
3924}
3925
3926/* Find the lowest pfn for a node */
3927static unsigned long __init find_min_pfn_for_node(int nid)
3928{
3929	int i;
3930	unsigned long min_pfn = ULONG_MAX;
3931
3932	/* Assuming a sorted map, the first range found has the starting pfn */
3933	for_each_active_range_index_in_nid(i, nid)
3934		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3935
3936	if (min_pfn == ULONG_MAX) {
3937		printk(KERN_WARNING
3938			"Could not find start_pfn for node %d\n", nid);
3939		return 0;
3940	}
3941
3942	return min_pfn;
3943}
3944
3945/**
3946 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3947 *
3948 * It returns the minimum PFN based on information provided via
3949 * add_active_range().
3950 */
3951unsigned long __init find_min_pfn_with_active_regions(void)
3952{
3953	return find_min_pfn_for_node(MAX_NUMNODES);
3954}
3955
3956/*
3957 * early_calculate_totalpages()
3958 * Sum pages in active regions for movable zone.
3959 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3960 */
3961static unsigned long __init early_calculate_totalpages(void)
3962{
3963	int i;
3964	unsigned long totalpages = 0;
3965
3966	for (i = 0; i < nr_nodemap_entries; i++) {
3967		unsigned long pages = early_node_map[i].end_pfn -
3968						early_node_map[i].start_pfn;
3969		totalpages += pages;
3970		if (pages)
3971			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3972	}
3973  	return totalpages;
3974}
3975
3976/*
3977 * Find the PFN the Movable zone begins in each node. Kernel memory
3978 * is spread evenly between nodes as long as the nodes have enough
3979 * memory. When they don't, some nodes will have more kernelcore than
3980 * others
3981 */
3982static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3983{
3984	int i, nid;
3985	unsigned long usable_startpfn;
3986	unsigned long kernelcore_node, kernelcore_remaining;
3987	unsigned long totalpages = early_calculate_totalpages();
3988	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3989
3990	/*
3991	 * If movablecore was specified, calculate what size of
3992	 * kernelcore that corresponds so that memory usable for
3993	 * any allocation type is evenly spread. If both kernelcore
3994	 * and movablecore are specified, then the value of kernelcore
3995	 * will be used for required_kernelcore if it's greater than
3996	 * what movablecore would have allowed.
3997	 */
3998	if (required_movablecore) {
3999		unsigned long corepages;
4000
4001		/*
4002		 * Round-up so that ZONE_MOVABLE is at least as large as what
4003		 * was requested by the user
4004		 */
4005		required_movablecore =
4006			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4007		corepages = totalpages - required_movablecore;
4008
4009		required_kernelcore = max(required_kernelcore, corepages);
4010	}
4011
4012	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4013	if (!required_kernelcore)
4014		return;
4015
4016	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4017	find_usable_zone_for_movable();
4018	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4019
4020restart:
4021	/* Spread kernelcore memory as evenly as possible throughout nodes */
4022	kernelcore_node = required_kernelcore / usable_nodes;
4023	for_each_node_state(nid, N_HIGH_MEMORY) {
4024		/*
4025		 * Recalculate kernelcore_node if the division per node
4026		 * now exceeds what is necessary to satisfy the requested
4027		 * amount of memory for the kernel
4028		 */
4029		if (required_kernelcore < kernelcore_node)
4030			kernelcore_node = required_kernelcore / usable_nodes;
4031
4032		/*
4033		 * As the map is walked, we track how much memory is usable
4034		 * by the kernel using kernelcore_remaining. When it is
4035		 * 0, the rest of the node is usable by ZONE_MOVABLE
4036		 */
4037		kernelcore_remaining = kernelcore_node;
4038
4039		/* Go through each range of PFNs within this node */
4040		for_each_active_range_index_in_nid(i, nid) {
4041			unsigned long start_pfn, end_pfn;
4042			unsigned long size_pages;
4043
4044			start_pfn = max(early_node_map[i].start_pfn,
4045						zone_movable_pfn[nid]);
4046			end_pfn = early_node_map[i].end_pfn;
4047			if (start_pfn >= end_pfn)
4048				continue;
4049
4050			/* Account for what is only usable for kernelcore */
4051			if (start_pfn < usable_startpfn) {
4052				unsigned long kernel_pages;
4053				kernel_pages = min(end_pfn, usable_startpfn)
4054								- start_pfn;
4055
4056				kernelcore_remaining -= min(kernel_pages,
4057							kernelcore_remaining);
4058				required_kernelcore -= min(kernel_pages,
4059							required_kernelcore);
4060
4061				/* Continue if range is now fully accounted */
4062				if (end_pfn <= usable_startpfn) {
4063
4064					/*
4065					 * Push zone_movable_pfn to the end so
4066					 * that if we have to rebalance
4067					 * kernelcore across nodes, we will
4068					 * not double account here
4069					 */
4070					zone_movable_pfn[nid] = end_pfn;
4071					continue;
4072				}
4073				start_pfn = usable_startpfn;
4074			}
4075
4076			/*
4077			 * The usable PFN range for ZONE_MOVABLE is from
4078			 * start_pfn->end_pfn. Calculate size_pages as the
4079			 * number of pages used as kernelcore
4080			 */
4081			size_pages = end_pfn - start_pfn;
4082			if (size_pages > kernelcore_remaining)
4083				size_pages = kernelcore_remaining;
4084			zone_movable_pfn[nid] = start_pfn + size_pages;
4085
4086			/*
4087			 * Some kernelcore has been met, update counts and
4088			 * break if the kernelcore for this node has been
4089			 * satisified
4090			 */
4091			required_kernelcore -= min(required_kernelcore,
4092								size_pages);
4093			kernelcore_remaining -= size_pages;
4094			if (!kernelcore_remaining)
4095				break;
4096		}
4097	}
4098
4099	/*
4100	 * If there is still required_kernelcore, we do another pass with one
4101	 * less node in the count. This will push zone_movable_pfn[nid] further
4102	 * along on the nodes that still have memory until kernelcore is
4103	 * satisified
4104	 */
4105	usable_nodes--;
4106	if (usable_nodes && required_kernelcore > usable_nodes)
4107		goto restart;
4108
4109	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4110	for (nid = 0; nid < MAX_NUMNODES; nid++)
4111		zone_movable_pfn[nid] =
4112			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4113}
4114
4115/* Any regular memory on that node ? */
4116static void check_for_regular_memory(pg_data_t *pgdat)
4117{
4118#ifdef CONFIG_HIGHMEM
4119	enum zone_type zone_type;
4120
4121	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4122		struct zone *zone = &pgdat->node_zones[zone_type];
4123		if (zone->present_pages)
4124			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4125	}
4126#endif
4127}
4128
4129/**
4130 * free_area_init_nodes - Initialise all pg_data_t and zone data
4131 * @max_zone_pfn: an array of max PFNs for each zone
4132 *
4133 * This will call free_area_init_node() for each active node in the system.
4134 * Using the page ranges provided by add_active_range(), the size of each
4135 * zone in each node and their holes is calculated. If the maximum PFN
4136 * between two adjacent zones match, it is assumed that the zone is empty.
4137 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4138 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4139 * starts where the previous one ended. For example, ZONE_DMA32 starts
4140 * at arch_max_dma_pfn.
4141 */
4142void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4143{
4144	unsigned long nid;
4145	int i;
4146
4147	/* Sort early_node_map as initialisation assumes it is sorted */
4148	sort_node_map();
4149
4150	/* Record where the zone boundaries are */
4151	memset(arch_zone_lowest_possible_pfn, 0,
4152				sizeof(arch_zone_lowest_possible_pfn));
4153	memset(arch_zone_highest_possible_pfn, 0,
4154				sizeof(arch_zone_highest_possible_pfn));
4155	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4156	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4157	for (i = 1; i < MAX_NR_ZONES; i++) {
4158		if (i == ZONE_MOVABLE)
4159			continue;
4160		arch_zone_lowest_possible_pfn[i] =
4161			arch_zone_highest_possible_pfn[i-1];
4162		arch_zone_highest_possible_pfn[i] =
4163			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4164	}
4165	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4166	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4167
4168	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4169	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4170	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4171
4172	/* Print out the zone ranges */
4173	printk("Zone PFN ranges:\n");
4174	for (i = 0; i < MAX_NR_ZONES; i++) {
4175		if (i == ZONE_MOVABLE)
4176			continue;
4177		printk("  %-8s %0#10lx -> %0#10lx\n",
4178				zone_names[i],
4179				arch_zone_lowest_possible_pfn[i],
4180				arch_zone_highest_possible_pfn[i]);
4181	}
4182
4183	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4184	printk("Movable zone start PFN for each node\n");
4185	for (i = 0; i < MAX_NUMNODES; i++) {
4186		if (zone_movable_pfn[i])
4187			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4188	}
4189
4190	/* Print out the early_node_map[] */
4191	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4192	for (i = 0; i < nr_nodemap_entries; i++)
4193		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4194						early_node_map[i].start_pfn,
4195						early_node_map[i].end_pfn);
4196
4197	/* Initialise every node */
4198	mminit_verify_pageflags_layout();
4199	setup_nr_node_ids();
4200	for_each_online_node(nid) {
4201		pg_data_t *pgdat = NODE_DATA(nid);
4202		free_area_init_node(nid, NULL,
4203				find_min_pfn_for_node(nid), NULL);
4204
4205		/* Any memory on that node */
4206		if (pgdat->node_present_pages)
4207			node_set_state(nid, N_HIGH_MEMORY);
4208		check_for_regular_memory(pgdat);
4209	}
4210}
4211
4212static int __init cmdline_parse_core(char *p, unsigned long *core)
4213{
4214	unsigned long long coremem;
4215	if (!p)
4216		return -EINVAL;
4217
4218	coremem = memparse(p, &p);
4219	*core = coremem >> PAGE_SHIFT;
4220
4221	/* Paranoid check that UL is enough for the coremem value */
4222	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4223
4224	return 0;
4225}
4226
4227/*
4228 * kernelcore=size sets the amount of memory for use for allocations that
4229 * cannot be reclaimed or migrated.
4230 */
4231static int __init cmdline_parse_kernelcore(char *p)
4232{
4233	return cmdline_parse_core(p, &required_kernelcore);
4234}
4235
4236/*
4237 * movablecore=size sets the amount of memory for use for allocations that
4238 * can be reclaimed or migrated.
4239 */
4240static int __init cmdline_parse_movablecore(char *p)
4241{
4242	return cmdline_parse_core(p, &required_movablecore);
4243}
4244
4245early_param("kernelcore", cmdline_parse_kernelcore);
4246early_param("movablecore", cmdline_parse_movablecore);
4247
4248#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4249
4250/**
4251 * set_dma_reserve - set the specified number of pages reserved in the first zone
4252 * @new_dma_reserve: The number of pages to mark reserved
4253 *
4254 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4255 * In the DMA zone, a significant percentage may be consumed by kernel image
4256 * and other unfreeable allocations which can skew the watermarks badly. This
4257 * function may optionally be used to account for unfreeable pages in the
4258 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4259 * smaller per-cpu batchsize.
4260 */
4261void __init set_dma_reserve(unsigned long new_dma_reserve)
4262{
4263	dma_reserve = new_dma_reserve;
4264}
4265
4266#ifndef CONFIG_NEED_MULTIPLE_NODES
4267struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4268EXPORT_SYMBOL(contig_page_data);
4269#endif
4270
4271void __init free_area_init(unsigned long *zones_size)
4272{
4273	free_area_init_node(0, zones_size,
4274			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4275}
4276
4277static int page_alloc_cpu_notify(struct notifier_block *self,
4278				 unsigned long action, void *hcpu)
4279{
4280	int cpu = (unsigned long)hcpu;
4281
4282	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4283		drain_pages(cpu);
4284
4285		/*
4286		 * Spill the event counters of the dead processor
4287		 * into the current processors event counters.
4288		 * This artificially elevates the count of the current
4289		 * processor.
4290		 */
4291		vm_events_fold_cpu(cpu);
4292
4293		/*
4294		 * Zero the differential counters of the dead processor
4295		 * so that the vm statistics are consistent.
4296		 *
4297		 * This is only okay since the processor is dead and cannot
4298		 * race with what we are doing.
4299		 */
4300		refresh_cpu_vm_stats(cpu);
4301	}
4302	return NOTIFY_OK;
4303}
4304
4305void __init page_alloc_init(void)
4306{
4307	hotcpu_notifier(page_alloc_cpu_notify, 0);
4308}
4309
4310/*
4311 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4312 *	or min_free_kbytes changes.
4313 */
4314static void calculate_totalreserve_pages(void)
4315{
4316	struct pglist_data *pgdat;
4317	unsigned long reserve_pages = 0;
4318	enum zone_type i, j;
4319
4320	for_each_online_pgdat(pgdat) {
4321		for (i = 0; i < MAX_NR_ZONES; i++) {
4322			struct zone *zone = pgdat->node_zones + i;
4323			unsigned long max = 0;
4324
4325			/* Find valid and maximum lowmem_reserve in the zone */
4326			for (j = i; j < MAX_NR_ZONES; j++) {
4327				if (zone->lowmem_reserve[j] > max)
4328					max = zone->lowmem_reserve[j];
4329			}
4330
4331			/* we treat the high watermark as reserved pages. */
4332			max += high_wmark_pages(zone);
4333
4334			if (max > zone->present_pages)
4335				max = zone->present_pages;
4336			reserve_pages += max;
4337		}
4338	}
4339	totalreserve_pages = reserve_pages;
4340}
4341
4342/*
4343 * setup_per_zone_lowmem_reserve - called whenever
4344 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4345 *	has a correct pages reserved value, so an adequate number of
4346 *	pages are left in the zone after a successful __alloc_pages().
4347 */
4348static void setup_per_zone_lowmem_reserve(void)
4349{
4350	struct pglist_data *pgdat;
4351	enum zone_type j, idx;
4352
4353	for_each_online_pgdat(pgdat) {
4354		for (j = 0; j < MAX_NR_ZONES; j++) {
4355			struct zone *zone = pgdat->node_zones + j;
4356			unsigned long present_pages = zone->present_pages;
4357
4358			zone->lowmem_reserve[j] = 0;
4359
4360			idx = j;
4361			while (idx) {
4362				struct zone *lower_zone;
4363
4364				idx--;
4365
4366				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4367					sysctl_lowmem_reserve_ratio[idx] = 1;
4368
4369				lower_zone = pgdat->node_zones + idx;
4370				lower_zone->lowmem_reserve[j] = present_pages /
4371					sysctl_lowmem_reserve_ratio[idx];
4372				present_pages += lower_zone->present_pages;
4373			}
4374		}
4375	}
4376
4377	/* update totalreserve_pages */
4378	calculate_totalreserve_pages();
4379}
4380
4381/**
4382 * setup_per_zone_pages_min - called when min_free_kbytes changes.
4383 *
4384 * Ensures that the pages_{min,low,high} values for each zone are set correctly
4385 * with respect to min_free_kbytes.
4386 */
4387void setup_per_zone_pages_min(void)
4388{
4389	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4390	unsigned long lowmem_pages = 0;
4391	struct zone *zone;
4392	unsigned long flags;
4393
4394	/* Calculate total number of !ZONE_HIGHMEM pages */
4395	for_each_zone(zone) {
4396		if (!is_highmem(zone))
4397			lowmem_pages += zone->present_pages;
4398	}
4399
4400	for_each_zone(zone) {
4401		u64 tmp;
4402
4403		spin_lock_irqsave(&zone->lock, flags);
4404		tmp = (u64)pages_min * zone->present_pages;
4405		do_div(tmp, lowmem_pages);
4406		if (is_highmem(zone)) {
4407			/*
4408			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4409			 * need highmem pages, so cap pages_min to a small
4410			 * value here.
4411			 *
4412			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4413			 * deltas controls asynch page reclaim, and so should
4414			 * not be capped for highmem.
4415			 */
4416			int min_pages;
4417
4418			min_pages = zone->present_pages / 1024;
4419			if (min_pages < SWAP_CLUSTER_MAX)
4420				min_pages = SWAP_CLUSTER_MAX;
4421			if (min_pages > 128)
4422				min_pages = 128;
4423			zone->watermark[WMARK_MIN] = min_pages;
4424		} else {
4425			/*
4426			 * If it's a lowmem zone, reserve a number of pages
4427			 * proportionate to the zone's size.
4428			 */
4429			zone->watermark[WMARK_MIN] = tmp;
4430		}
4431
4432		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4433		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4434		setup_zone_migrate_reserve(zone);
4435		spin_unlock_irqrestore(&zone->lock, flags);
4436	}
4437
4438	/* update totalreserve_pages */
4439	calculate_totalreserve_pages();
4440}
4441
4442/**
4443 * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
4444 *
4445 * The inactive anon list should be small enough that the VM never has to
4446 * do too much work, but large enough that each inactive page has a chance
4447 * to be referenced again before it is swapped out.
4448 *
4449 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4450 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4451 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4452 * the anonymous pages are kept on the inactive list.
4453 *
4454 * total     target    max
4455 * memory    ratio     inactive anon
4456 * -------------------------------------
4457 *   10MB       1         5MB
4458 *  100MB       1        50MB
4459 *    1GB       3       250MB
4460 *   10GB      10       0.9GB
4461 *  100GB      31         3GB
4462 *    1TB     101        10GB
4463 *   10TB     320        32GB
4464 */
4465static void setup_per_zone_inactive_ratio(void)
4466{
4467	struct zone *zone;
4468
4469	for_each_zone(zone) {
4470		unsigned int gb, ratio;
4471
4472		/* Zone size in gigabytes */
4473		gb = zone->present_pages >> (30 - PAGE_SHIFT);
4474		if (gb)
4475			ratio = int_sqrt(10 * gb);
4476		else
4477			ratio = 1;
4478
4479		zone->inactive_ratio = ratio;
4480	}
4481}
4482
4483/*
4484 * Initialise min_free_kbytes.
4485 *
4486 * For small machines we want it small (128k min).  For large machines
4487 * we want it large (64MB max).  But it is not linear, because network
4488 * bandwidth does not increase linearly with machine size.  We use
4489 *
4490 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4491 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4492 *
4493 * which yields
4494 *
4495 * 16MB:	512k
4496 * 32MB:	724k
4497 * 64MB:	1024k
4498 * 128MB:	1448k
4499 * 256MB:	2048k
4500 * 512MB:	2896k
4501 * 1024MB:	4096k
4502 * 2048MB:	5792k
4503 * 4096MB:	8192k
4504 * 8192MB:	11584k
4505 * 16384MB:	16384k
4506 */
4507static int __init init_per_zone_pages_min(void)
4508{
4509	unsigned long lowmem_kbytes;
4510
4511	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4512
4513	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4514	if (min_free_kbytes < 128)
4515		min_free_kbytes = 128;
4516	if (min_free_kbytes > 65536)
4517		min_free_kbytes = 65536;
4518	setup_per_zone_pages_min();
4519	setup_per_zone_lowmem_reserve();
4520	setup_per_zone_inactive_ratio();
4521	return 0;
4522}
4523module_init(init_per_zone_pages_min)
4524
4525/*
4526 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4527 *	that we can call two helper functions whenever min_free_kbytes
4528 *	changes.
4529 */
4530int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4531	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4532{
4533	proc_dointvec(table, write, file, buffer, length, ppos);
4534	if (write)
4535		setup_per_zone_pages_min();
4536	return 0;
4537}
4538
4539#ifdef CONFIG_NUMA
4540int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4541	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4542{
4543	struct zone *zone;
4544	int rc;
4545
4546	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4547	if (rc)
4548		return rc;
4549
4550	for_each_zone(zone)
4551		zone->min_unmapped_pages = (zone->present_pages *
4552				sysctl_min_unmapped_ratio) / 100;
4553	return 0;
4554}
4555
4556int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4557	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4558{
4559	struct zone *zone;
4560	int rc;
4561
4562	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4563	if (rc)
4564		return rc;
4565
4566	for_each_zone(zone)
4567		zone->min_slab_pages = (zone->present_pages *
4568				sysctl_min_slab_ratio) / 100;
4569	return 0;
4570}
4571#endif
4572
4573/*
4574 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4575 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4576 *	whenever sysctl_lowmem_reserve_ratio changes.
4577 *
4578 * The reserve ratio obviously has absolutely no relation with the
4579 * minimum watermarks. The lowmem reserve ratio can only make sense
4580 * if in function of the boot time zone sizes.
4581 */
4582int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4583	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4584{
4585	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4586	setup_per_zone_lowmem_reserve();
4587	return 0;
4588}
4589
4590/*
4591 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4592 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4593 * can have before it gets flushed back to buddy allocator.
4594 */
4595
4596int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4597	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4598{
4599	struct zone *zone;
4600	unsigned int cpu;
4601	int ret;
4602
4603	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4604	if (!write || (ret == -EINVAL))
4605		return ret;
4606	for_each_zone(zone) {
4607		for_each_online_cpu(cpu) {
4608			unsigned long  high;
4609			high = zone->present_pages / percpu_pagelist_fraction;
4610			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4611		}
4612	}
4613	return 0;
4614}
4615
4616int hashdist = HASHDIST_DEFAULT;
4617
4618#ifdef CONFIG_NUMA
4619static int __init set_hashdist(char *str)
4620{
4621	if (!str)
4622		return 0;
4623	hashdist = simple_strtoul(str, &str, 0);
4624	return 1;
4625}
4626__setup("hashdist=", set_hashdist);
4627#endif
4628
4629/*
4630 * allocate a large system hash table from bootmem
4631 * - it is assumed that the hash table must contain an exact power-of-2
4632 *   quantity of entries
4633 * - limit is the number of hash buckets, not the total allocation size
4634 */
4635void *__init alloc_large_system_hash(const char *tablename,
4636				     unsigned long bucketsize,
4637				     unsigned long numentries,
4638				     int scale,
4639				     int flags,
4640				     unsigned int *_hash_shift,
4641				     unsigned int *_hash_mask,
4642				     unsigned long limit)
4643{
4644	unsigned long long max = limit;
4645	unsigned long log2qty, size;
4646	void *table = NULL;
4647
4648	/* allow the kernel cmdline to have a say */
4649	if (!numentries) {
4650		/* round applicable memory size up to nearest megabyte */
4651		numentries = nr_kernel_pages;
4652		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4653		numentries >>= 20 - PAGE_SHIFT;
4654		numentries <<= 20 - PAGE_SHIFT;
4655
4656		/* limit to 1 bucket per 2^scale bytes of low memory */
4657		if (scale > PAGE_SHIFT)
4658			numentries >>= (scale - PAGE_SHIFT);
4659		else
4660			numentries <<= (PAGE_SHIFT - scale);
4661
4662		/* Make sure we've got at least a 0-order allocation.. */
4663		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4664			numentries = PAGE_SIZE / bucketsize;
4665	}
4666	numentries = roundup_pow_of_two(numentries);
4667
4668	/* limit allocation size to 1/16 total memory by default */
4669	if (max == 0) {
4670		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4671		do_div(max, bucketsize);
4672	}
4673
4674	if (numentries > max)
4675		numentries = max;
4676
4677	log2qty = ilog2(numentries);
4678
4679	do {
4680		size = bucketsize << log2qty;
4681		if (flags & HASH_EARLY)
4682			table = alloc_bootmem_nopanic(size);
4683		else if (hashdist)
4684			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4685		else {
4686			/*
4687			 * If bucketsize is not a power-of-two, we may free
4688			 * some pages at the end of hash table which
4689			 * alloc_pages_exact() automatically does
4690			 */
4691			if (get_order(size) < MAX_ORDER)
4692				table = alloc_pages_exact(size, GFP_ATOMIC);
4693		}
4694	} while (!table && size > PAGE_SIZE && --log2qty);
4695
4696	if (!table)
4697		panic("Failed to allocate %s hash table\n", tablename);
4698
4699	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4700	       tablename,
4701	       (1U << log2qty),
4702	       ilog2(size) - PAGE_SHIFT,
4703	       size);
4704
4705	if (_hash_shift)
4706		*_hash_shift = log2qty;
4707	if (_hash_mask)
4708		*_hash_mask = (1 << log2qty) - 1;
4709
4710	/*
4711	 * If hashdist is set, the table allocation is done with __vmalloc()
4712	 * which invokes the kmemleak_alloc() callback. This function may also
4713	 * be called before the slab and kmemleak are initialised when
4714	 * kmemleak simply buffers the request to be executed later
4715	 * (GFP_ATOMIC flag ignored in this case).
4716	 */
4717	if (!hashdist)
4718		kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4719
4720	return table;
4721}
4722
4723/* Return a pointer to the bitmap storing bits affecting a block of pages */
4724static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4725							unsigned long pfn)
4726{
4727#ifdef CONFIG_SPARSEMEM
4728	return __pfn_to_section(pfn)->pageblock_flags;
4729#else
4730	return zone->pageblock_flags;
4731#endif /* CONFIG_SPARSEMEM */
4732}
4733
4734static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4735{
4736#ifdef CONFIG_SPARSEMEM
4737	pfn &= (PAGES_PER_SECTION-1);
4738	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4739#else
4740	pfn = pfn - zone->zone_start_pfn;
4741	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4742#endif /* CONFIG_SPARSEMEM */
4743}
4744
4745/**
4746 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4747 * @page: The page within the block of interest
4748 * @start_bitidx: The first bit of interest to retrieve
4749 * @end_bitidx: The last bit of interest
4750 * returns pageblock_bits flags
4751 */
4752unsigned long get_pageblock_flags_group(struct page *page,
4753					int start_bitidx, int end_bitidx)
4754{
4755	struct zone *zone;
4756	unsigned long *bitmap;
4757	unsigned long pfn, bitidx;
4758	unsigned long flags = 0;
4759	unsigned long value = 1;
4760
4761	zone = page_zone(page);
4762	pfn = page_to_pfn(page);
4763	bitmap = get_pageblock_bitmap(zone, pfn);
4764	bitidx = pfn_to_bitidx(zone, pfn);
4765
4766	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4767		if (test_bit(bitidx + start_bitidx, bitmap))
4768			flags |= value;
4769
4770	return flags;
4771}
4772
4773/**
4774 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4775 * @page: The page within the block of interest
4776 * @start_bitidx: The first bit of interest
4777 * @end_bitidx: The last bit of interest
4778 * @flags: The flags to set
4779 */
4780void set_pageblock_flags_group(struct page *page, unsigned long flags,
4781					int start_bitidx, int end_bitidx)
4782{
4783	struct zone *zone;
4784	unsigned long *bitmap;
4785	unsigned long pfn, bitidx;
4786	unsigned long value = 1;
4787
4788	zone = page_zone(page);
4789	pfn = page_to_pfn(page);
4790	bitmap = get_pageblock_bitmap(zone, pfn);
4791	bitidx = pfn_to_bitidx(zone, pfn);
4792	VM_BUG_ON(pfn < zone->zone_start_pfn);
4793	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4794
4795	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4796		if (flags & value)
4797			__set_bit(bitidx + start_bitidx, bitmap);
4798		else
4799			__clear_bit(bitidx + start_bitidx, bitmap);
4800}
4801
4802/*
4803 * This is designed as sub function...plz see page_isolation.c also.
4804 * set/clear page block's type to be ISOLATE.
4805 * page allocater never alloc memory from ISOLATE block.
4806 */
4807
4808int set_migratetype_isolate(struct page *page)
4809{
4810	struct zone *zone;
4811	unsigned long flags;
4812	int ret = -EBUSY;
4813
4814	zone = page_zone(page);
4815	spin_lock_irqsave(&zone->lock, flags);
4816	/*
4817	 * In future, more migrate types will be able to be isolation target.
4818	 */
4819	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4820		goto out;
4821	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4822	move_freepages_block(zone, page, MIGRATE_ISOLATE);
4823	ret = 0;
4824out:
4825	spin_unlock_irqrestore(&zone->lock, flags);
4826	if (!ret)
4827		drain_all_pages();
4828	return ret;
4829}
4830
4831void unset_migratetype_isolate(struct page *page)
4832{
4833	struct zone *zone;
4834	unsigned long flags;
4835	zone = page_zone(page);
4836	spin_lock_irqsave(&zone->lock, flags);
4837	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4838		goto out;
4839	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4840	move_freepages_block(zone, page, MIGRATE_MOVABLE);
4841out:
4842	spin_unlock_irqrestore(&zone->lock, flags);
4843}
4844
4845#ifdef CONFIG_MEMORY_HOTREMOVE
4846/*
4847 * All pages in the range must be isolated before calling this.
4848 */
4849void
4850__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4851{
4852	struct page *page;
4853	struct zone *zone;
4854	int order, i;
4855	unsigned long pfn;
4856	unsigned long flags;
4857	/* find the first valid pfn */
4858	for (pfn = start_pfn; pfn < end_pfn; pfn++)
4859		if (pfn_valid(pfn))
4860			break;
4861	if (pfn == end_pfn)
4862		return;
4863	zone = page_zone(pfn_to_page(pfn));
4864	spin_lock_irqsave(&zone->lock, flags);
4865	pfn = start_pfn;
4866	while (pfn < end_pfn) {
4867		if (!pfn_valid(pfn)) {
4868			pfn++;
4869			continue;
4870		}
4871		page = pfn_to_page(pfn);
4872		BUG_ON(page_count(page));
4873		BUG_ON(!PageBuddy(page));
4874		order = page_order(page);
4875#ifdef CONFIG_DEBUG_VM
4876		printk(KERN_INFO "remove from free list %lx %d %lx\n",
4877		       pfn, 1 << order, end_pfn);
4878#endif
4879		list_del(&page->lru);
4880		rmv_page_order(page);
4881		zone->free_area[order].nr_free--;
4882		__mod_zone_page_state(zone, NR_FREE_PAGES,
4883				      - (1UL << order));
4884		for (i = 0; i < (1 << order); i++)
4885			SetPageReserved((page+i));
4886		pfn += (1 << order);
4887	}
4888	spin_unlock_irqrestore(&zone->lock, flags);
4889}
4890#endif
4891