page_alloc.c revision 6c0db4664b49417d80988953e69c323721353227
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
31#include <linux/oom.h>
32#include <linux/notifier.h>
33#include <linux/topology.h>
34#include <linux/sysctl.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
37#include <linux/memory_hotplug.h>
38#include <linux/nodemask.h>
39#include <linux/vmalloc.h>
40#include <linux/mempolicy.h>
41#include <linux/stop_machine.h>
42#include <linux/sort.h>
43#include <linux/pfn.h>
44#include <linux/backing-dev.h>
45#include <linux/fault-inject.h>
46#include <linux/page-isolation.h>
47#include <linux/page_cgroup.h>
48#include <linux/debugobjects.h>
49#include <linux/kmemleak.h>
50
51#include <asm/tlbflush.h>
52#include <asm/div64.h>
53#include "internal.h"
54
55/*
56 * Array of node states.
57 */
58nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
59	[N_POSSIBLE] = NODE_MASK_ALL,
60	[N_ONLINE] = { { [0] = 1UL } },
61#ifndef CONFIG_NUMA
62	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
63#ifdef CONFIG_HIGHMEM
64	[N_HIGH_MEMORY] = { { [0] = 1UL } },
65#endif
66	[N_CPU] = { { [0] = 1UL } },
67#endif	/* NUMA */
68};
69EXPORT_SYMBOL(node_states);
70
71unsigned long totalram_pages __read_mostly;
72unsigned long totalreserve_pages __read_mostly;
73unsigned long highest_memmap_pfn __read_mostly;
74int percpu_pagelist_fraction;
75
76#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
77int pageblock_order __read_mostly;
78#endif
79
80static void __free_pages_ok(struct page *page, unsigned int order);
81
82/*
83 * results with 256, 32 in the lowmem_reserve sysctl:
84 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
85 *	1G machine -> (16M dma, 784M normal, 224M high)
86 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
87 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
88 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
89 *
90 * TBD: should special case ZONE_DMA32 machines here - in those we normally
91 * don't need any ZONE_NORMAL reservation
92 */
93int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
94#ifdef CONFIG_ZONE_DMA
95	 256,
96#endif
97#ifdef CONFIG_ZONE_DMA32
98	 256,
99#endif
100#ifdef CONFIG_HIGHMEM
101	 32,
102#endif
103	 32,
104};
105
106EXPORT_SYMBOL(totalram_pages);
107
108static char * const zone_names[MAX_NR_ZONES] = {
109#ifdef CONFIG_ZONE_DMA
110	 "DMA",
111#endif
112#ifdef CONFIG_ZONE_DMA32
113	 "DMA32",
114#endif
115	 "Normal",
116#ifdef CONFIG_HIGHMEM
117	 "HighMem",
118#endif
119	 "Movable",
120};
121
122int min_free_kbytes = 1024;
123
124unsigned long __meminitdata nr_kernel_pages;
125unsigned long __meminitdata nr_all_pages;
126static unsigned long __meminitdata dma_reserve;
127
128#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
129  /*
130   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
131   * ranges of memory (RAM) that may be registered with add_active_range().
132   * Ranges passed to add_active_range() will be merged if possible
133   * so the number of times add_active_range() can be called is
134   * related to the number of nodes and the number of holes
135   */
136  #ifdef CONFIG_MAX_ACTIVE_REGIONS
137    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
138    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
139  #else
140    #if MAX_NUMNODES >= 32
141      /* If there can be many nodes, allow up to 50 holes per node */
142      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
143    #else
144      /* By default, allow up to 256 distinct regions */
145      #define MAX_ACTIVE_REGIONS 256
146    #endif
147  #endif
148
149  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
150  static int __meminitdata nr_nodemap_entries;
151  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
152  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
153  static unsigned long __initdata required_kernelcore;
154  static unsigned long __initdata required_movablecore;
155  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
156
157  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
158  int movable_zone;
159  EXPORT_SYMBOL(movable_zone);
160#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
161
162#if MAX_NUMNODES > 1
163int nr_node_ids __read_mostly = MAX_NUMNODES;
164EXPORT_SYMBOL(nr_node_ids);
165#endif
166
167int page_group_by_mobility_disabled __read_mostly;
168
169static void set_pageblock_migratetype(struct page *page, int migratetype)
170{
171	set_pageblock_flags_group(page, (unsigned long)migratetype,
172					PB_migrate, PB_migrate_end);
173}
174
175#ifdef CONFIG_DEBUG_VM
176static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
177{
178	int ret = 0;
179	unsigned seq;
180	unsigned long pfn = page_to_pfn(page);
181
182	do {
183		seq = zone_span_seqbegin(zone);
184		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
185			ret = 1;
186		else if (pfn < zone->zone_start_pfn)
187			ret = 1;
188	} while (zone_span_seqretry(zone, seq));
189
190	return ret;
191}
192
193static int page_is_consistent(struct zone *zone, struct page *page)
194{
195	if (!pfn_valid_within(page_to_pfn(page)))
196		return 0;
197	if (zone != page_zone(page))
198		return 0;
199
200	return 1;
201}
202/*
203 * Temporary debugging check for pages not lying within a given zone.
204 */
205static int bad_range(struct zone *zone, struct page *page)
206{
207	if (page_outside_zone_boundaries(zone, page))
208		return 1;
209	if (!page_is_consistent(zone, page))
210		return 1;
211
212	return 0;
213}
214#else
215static inline int bad_range(struct zone *zone, struct page *page)
216{
217	return 0;
218}
219#endif
220
221static void bad_page(struct page *page)
222{
223	static unsigned long resume;
224	static unsigned long nr_shown;
225	static unsigned long nr_unshown;
226
227	/*
228	 * Allow a burst of 60 reports, then keep quiet for that minute;
229	 * or allow a steady drip of one report per second.
230	 */
231	if (nr_shown == 60) {
232		if (time_before(jiffies, resume)) {
233			nr_unshown++;
234			goto out;
235		}
236		if (nr_unshown) {
237			printk(KERN_ALERT
238			      "BUG: Bad page state: %lu messages suppressed\n",
239				nr_unshown);
240			nr_unshown = 0;
241		}
242		nr_shown = 0;
243	}
244	if (nr_shown++ == 0)
245		resume = jiffies + 60 * HZ;
246
247	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
248		current->comm, page_to_pfn(page));
249	printk(KERN_ALERT
250		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
251		page, (void *)page->flags, page_count(page),
252		page_mapcount(page), page->mapping, page->index);
253
254	dump_stack();
255out:
256	/* Leave bad fields for debug, except PageBuddy could make trouble */
257	__ClearPageBuddy(page);
258	add_taint(TAINT_BAD_PAGE);
259}
260
261/*
262 * Higher-order pages are called "compound pages".  They are structured thusly:
263 *
264 * The first PAGE_SIZE page is called the "head page".
265 *
266 * The remaining PAGE_SIZE pages are called "tail pages".
267 *
268 * All pages have PG_compound set.  All pages have their ->private pointing at
269 * the head page (even the head page has this).
270 *
271 * The first tail page's ->lru.next holds the address of the compound page's
272 * put_page() function.  Its ->lru.prev holds the order of allocation.
273 * This usage means that zero-order pages may not be compound.
274 */
275
276static void free_compound_page(struct page *page)
277{
278	__free_pages_ok(page, compound_order(page));
279}
280
281void prep_compound_page(struct page *page, unsigned long order)
282{
283	int i;
284	int nr_pages = 1 << order;
285
286	set_compound_page_dtor(page, free_compound_page);
287	set_compound_order(page, order);
288	__SetPageHead(page);
289	for (i = 1; i < nr_pages; i++) {
290		struct page *p = page + i;
291
292		__SetPageTail(p);
293		p->first_page = page;
294	}
295}
296
297#ifdef CONFIG_HUGETLBFS
298void prep_compound_gigantic_page(struct page *page, unsigned long order)
299{
300	int i;
301	int nr_pages = 1 << order;
302	struct page *p = page + 1;
303
304	set_compound_page_dtor(page, free_compound_page);
305	set_compound_order(page, order);
306	__SetPageHead(page);
307	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
308		__SetPageTail(p);
309		p->first_page = page;
310	}
311}
312#endif
313
314static int destroy_compound_page(struct page *page, unsigned long order)
315{
316	int i;
317	int nr_pages = 1 << order;
318	int bad = 0;
319
320	if (unlikely(compound_order(page) != order) ||
321	    unlikely(!PageHead(page))) {
322		bad_page(page);
323		bad++;
324	}
325
326	__ClearPageHead(page);
327
328	for (i = 1; i < nr_pages; i++) {
329		struct page *p = page + i;
330
331		if (unlikely(!PageTail(p) || (p->first_page != page))) {
332			bad_page(page);
333			bad++;
334		}
335		__ClearPageTail(p);
336	}
337
338	return bad;
339}
340
341static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
342{
343	int i;
344
345	/*
346	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
347	 * and __GFP_HIGHMEM from hard or soft interrupt context.
348	 */
349	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
350	for (i = 0; i < (1 << order); i++)
351		clear_highpage(page + i);
352}
353
354static inline void set_page_order(struct page *page, int order)
355{
356	set_page_private(page, order);
357	__SetPageBuddy(page);
358}
359
360static inline void rmv_page_order(struct page *page)
361{
362	__ClearPageBuddy(page);
363	set_page_private(page, 0);
364}
365
366/*
367 * Locate the struct page for both the matching buddy in our
368 * pair (buddy1) and the combined O(n+1) page they form (page).
369 *
370 * 1) Any buddy B1 will have an order O twin B2 which satisfies
371 * the following equation:
372 *     B2 = B1 ^ (1 << O)
373 * For example, if the starting buddy (buddy2) is #8 its order
374 * 1 buddy is #10:
375 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
376 *
377 * 2) Any buddy B will have an order O+1 parent P which
378 * satisfies the following equation:
379 *     P = B & ~(1 << O)
380 *
381 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
382 */
383static inline struct page *
384__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
385{
386	unsigned long buddy_idx = page_idx ^ (1 << order);
387
388	return page + (buddy_idx - page_idx);
389}
390
391static inline unsigned long
392__find_combined_index(unsigned long page_idx, unsigned int order)
393{
394	return (page_idx & ~(1 << order));
395}
396
397/*
398 * This function checks whether a page is free && is the buddy
399 * we can do coalesce a page and its buddy if
400 * (a) the buddy is not in a hole &&
401 * (b) the buddy is in the buddy system &&
402 * (c) a page and its buddy have the same order &&
403 * (d) a page and its buddy are in the same zone.
404 *
405 * For recording whether a page is in the buddy system, we use PG_buddy.
406 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
407 *
408 * For recording page's order, we use page_private(page).
409 */
410static inline int page_is_buddy(struct page *page, struct page *buddy,
411								int order)
412{
413	if (!pfn_valid_within(page_to_pfn(buddy)))
414		return 0;
415
416	if (page_zone_id(page) != page_zone_id(buddy))
417		return 0;
418
419	if (PageBuddy(buddy) && page_order(buddy) == order) {
420		BUG_ON(page_count(buddy) != 0);
421		return 1;
422	}
423	return 0;
424}
425
426/*
427 * Freeing function for a buddy system allocator.
428 *
429 * The concept of a buddy system is to maintain direct-mapped table
430 * (containing bit values) for memory blocks of various "orders".
431 * The bottom level table contains the map for the smallest allocatable
432 * units of memory (here, pages), and each level above it describes
433 * pairs of units from the levels below, hence, "buddies".
434 * At a high level, all that happens here is marking the table entry
435 * at the bottom level available, and propagating the changes upward
436 * as necessary, plus some accounting needed to play nicely with other
437 * parts of the VM system.
438 * At each level, we keep a list of pages, which are heads of continuous
439 * free pages of length of (1 << order) and marked with PG_buddy. Page's
440 * order is recorded in page_private(page) field.
441 * So when we are allocating or freeing one, we can derive the state of the
442 * other.  That is, if we allocate a small block, and both were
443 * free, the remainder of the region must be split into blocks.
444 * If a block is freed, and its buddy is also free, then this
445 * triggers coalescing into a block of larger size.
446 *
447 * -- wli
448 */
449
450static inline void __free_one_page(struct page *page,
451		struct zone *zone, unsigned int order)
452{
453	unsigned long page_idx;
454	int order_size = 1 << order;
455	int migratetype = get_pageblock_migratetype(page);
456
457	if (unlikely(PageCompound(page)))
458		if (unlikely(destroy_compound_page(page, order)))
459			return;
460
461	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
462
463	VM_BUG_ON(page_idx & (order_size - 1));
464	VM_BUG_ON(bad_range(zone, page));
465
466	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
467	while (order < MAX_ORDER-1) {
468		unsigned long combined_idx;
469		struct page *buddy;
470
471		buddy = __page_find_buddy(page, page_idx, order);
472		if (!page_is_buddy(page, buddy, order))
473			break;
474
475		/* Our buddy is free, merge with it and move up one order. */
476		list_del(&buddy->lru);
477		zone->free_area[order].nr_free--;
478		rmv_page_order(buddy);
479		combined_idx = __find_combined_index(page_idx, order);
480		page = page + (combined_idx - page_idx);
481		page_idx = combined_idx;
482		order++;
483	}
484	set_page_order(page, order);
485	list_add(&page->lru,
486		&zone->free_area[order].free_list[migratetype]);
487	zone->free_area[order].nr_free++;
488}
489
490static inline int free_pages_check(struct page *page)
491{
492	free_page_mlock(page);
493	if (unlikely(page_mapcount(page) |
494		(page->mapping != NULL)  |
495		(page_count(page) != 0)  |
496		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
497		bad_page(page);
498		return 1;
499	}
500	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
501		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
502	return 0;
503}
504
505/*
506 * Frees a list of pages.
507 * Assumes all pages on list are in same zone, and of same order.
508 * count is the number of pages to free.
509 *
510 * If the zone was previously in an "all pages pinned" state then look to
511 * see if this freeing clears that state.
512 *
513 * And clear the zone's pages_scanned counter, to hold off the "all pages are
514 * pinned" detection logic.
515 */
516static void free_pages_bulk(struct zone *zone, int count,
517					struct list_head *list, int order)
518{
519	spin_lock(&zone->lock);
520	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
521	zone->pages_scanned = 0;
522	while (count--) {
523		struct page *page;
524
525		VM_BUG_ON(list_empty(list));
526		page = list_entry(list->prev, struct page, lru);
527		/* have to delete it as __free_one_page list manipulates */
528		list_del(&page->lru);
529		__free_one_page(page, zone, order);
530	}
531	spin_unlock(&zone->lock);
532}
533
534static void free_one_page(struct zone *zone, struct page *page, int order)
535{
536	spin_lock(&zone->lock);
537	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
538	zone->pages_scanned = 0;
539	__free_one_page(page, zone, order);
540	spin_unlock(&zone->lock);
541}
542
543static void __free_pages_ok(struct page *page, unsigned int order)
544{
545	unsigned long flags;
546	int i;
547	int bad = 0;
548
549	for (i = 0 ; i < (1 << order) ; ++i)
550		bad += free_pages_check(page + i);
551	if (bad)
552		return;
553
554	if (!PageHighMem(page)) {
555		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
556		debug_check_no_obj_freed(page_address(page),
557					   PAGE_SIZE << order);
558	}
559	arch_free_page(page, order);
560	kernel_map_pages(page, 1 << order, 0);
561
562	local_irq_save(flags);
563	__count_vm_events(PGFREE, 1 << order);
564	free_one_page(page_zone(page), page, order);
565	local_irq_restore(flags);
566}
567
568/*
569 * permit the bootmem allocator to evade page validation on high-order frees
570 */
571void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
572{
573	if (order == 0) {
574		__ClearPageReserved(page);
575		set_page_count(page, 0);
576		set_page_refcounted(page);
577		__free_page(page);
578	} else {
579		int loop;
580
581		prefetchw(page);
582		for (loop = 0; loop < BITS_PER_LONG; loop++) {
583			struct page *p = &page[loop];
584
585			if (loop + 1 < BITS_PER_LONG)
586				prefetchw(p + 1);
587			__ClearPageReserved(p);
588			set_page_count(p, 0);
589		}
590
591		set_page_refcounted(page);
592		__free_pages(page, order);
593	}
594}
595
596
597/*
598 * The order of subdivision here is critical for the IO subsystem.
599 * Please do not alter this order without good reasons and regression
600 * testing. Specifically, as large blocks of memory are subdivided,
601 * the order in which smaller blocks are delivered depends on the order
602 * they're subdivided in this function. This is the primary factor
603 * influencing the order in which pages are delivered to the IO
604 * subsystem according to empirical testing, and this is also justified
605 * by considering the behavior of a buddy system containing a single
606 * large block of memory acted on by a series of small allocations.
607 * This behavior is a critical factor in sglist merging's success.
608 *
609 * -- wli
610 */
611static inline void expand(struct zone *zone, struct page *page,
612	int low, int high, struct free_area *area,
613	int migratetype)
614{
615	unsigned long size = 1 << high;
616
617	while (high > low) {
618		area--;
619		high--;
620		size >>= 1;
621		VM_BUG_ON(bad_range(zone, &page[size]));
622		list_add(&page[size].lru, &area->free_list[migratetype]);
623		area->nr_free++;
624		set_page_order(&page[size], high);
625	}
626}
627
628/*
629 * This page is about to be returned from the page allocator
630 */
631static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
632{
633	if (unlikely(page_mapcount(page) |
634		(page->mapping != NULL)  |
635		(page_count(page) != 0)  |
636		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
637		bad_page(page);
638		return 1;
639	}
640
641	set_page_private(page, 0);
642	set_page_refcounted(page);
643
644	arch_alloc_page(page, order);
645	kernel_map_pages(page, 1 << order, 1);
646
647	if (gfp_flags & __GFP_ZERO)
648		prep_zero_page(page, order, gfp_flags);
649
650	if (order && (gfp_flags & __GFP_COMP))
651		prep_compound_page(page, order);
652
653	return 0;
654}
655
656/*
657 * Go through the free lists for the given migratetype and remove
658 * the smallest available page from the freelists
659 */
660static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
661						int migratetype)
662{
663	unsigned int current_order;
664	struct free_area * area;
665	struct page *page;
666
667	/* Find a page of the appropriate size in the preferred list */
668	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
669		area = &(zone->free_area[current_order]);
670		if (list_empty(&area->free_list[migratetype]))
671			continue;
672
673		page = list_entry(area->free_list[migratetype].next,
674							struct page, lru);
675		list_del(&page->lru);
676		rmv_page_order(page);
677		area->nr_free--;
678		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
679		expand(zone, page, order, current_order, area, migratetype);
680		return page;
681	}
682
683	return NULL;
684}
685
686
687/*
688 * This array describes the order lists are fallen back to when
689 * the free lists for the desirable migrate type are depleted
690 */
691static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
692	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
693	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
694	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
695	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
696};
697
698/*
699 * Move the free pages in a range to the free lists of the requested type.
700 * Note that start_page and end_pages are not aligned on a pageblock
701 * boundary. If alignment is required, use move_freepages_block()
702 */
703static int move_freepages(struct zone *zone,
704			  struct page *start_page, struct page *end_page,
705			  int migratetype)
706{
707	struct page *page;
708	unsigned long order;
709	int pages_moved = 0;
710
711#ifndef CONFIG_HOLES_IN_ZONE
712	/*
713	 * page_zone is not safe to call in this context when
714	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
715	 * anyway as we check zone boundaries in move_freepages_block().
716	 * Remove at a later date when no bug reports exist related to
717	 * grouping pages by mobility
718	 */
719	BUG_ON(page_zone(start_page) != page_zone(end_page));
720#endif
721
722	for (page = start_page; page <= end_page;) {
723		/* Make sure we are not inadvertently changing nodes */
724		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
725
726		if (!pfn_valid_within(page_to_pfn(page))) {
727			page++;
728			continue;
729		}
730
731		if (!PageBuddy(page)) {
732			page++;
733			continue;
734		}
735
736		order = page_order(page);
737		list_del(&page->lru);
738		list_add(&page->lru,
739			&zone->free_area[order].free_list[migratetype]);
740		page += 1 << order;
741		pages_moved += 1 << order;
742	}
743
744	return pages_moved;
745}
746
747static int move_freepages_block(struct zone *zone, struct page *page,
748				int migratetype)
749{
750	unsigned long start_pfn, end_pfn;
751	struct page *start_page, *end_page;
752
753	start_pfn = page_to_pfn(page);
754	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
755	start_page = pfn_to_page(start_pfn);
756	end_page = start_page + pageblock_nr_pages - 1;
757	end_pfn = start_pfn + pageblock_nr_pages - 1;
758
759	/* Do not cross zone boundaries */
760	if (start_pfn < zone->zone_start_pfn)
761		start_page = page;
762	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
763		return 0;
764
765	return move_freepages(zone, start_page, end_page, migratetype);
766}
767
768/* Remove an element from the buddy allocator from the fallback list */
769static struct page *__rmqueue_fallback(struct zone *zone, int order,
770						int start_migratetype)
771{
772	struct free_area * area;
773	int current_order;
774	struct page *page;
775	int migratetype, i;
776
777	/* Find the largest possible block of pages in the other list */
778	for (current_order = MAX_ORDER-1; current_order >= order;
779						--current_order) {
780		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
781			migratetype = fallbacks[start_migratetype][i];
782
783			/* MIGRATE_RESERVE handled later if necessary */
784			if (migratetype == MIGRATE_RESERVE)
785				continue;
786
787			area = &(zone->free_area[current_order]);
788			if (list_empty(&area->free_list[migratetype]))
789				continue;
790
791			page = list_entry(area->free_list[migratetype].next,
792					struct page, lru);
793			area->nr_free--;
794
795			/*
796			 * If breaking a large block of pages, move all free
797			 * pages to the preferred allocation list. If falling
798			 * back for a reclaimable kernel allocation, be more
799			 * agressive about taking ownership of free pages
800			 */
801			if (unlikely(current_order >= (pageblock_order >> 1)) ||
802					start_migratetype == MIGRATE_RECLAIMABLE) {
803				unsigned long pages;
804				pages = move_freepages_block(zone, page,
805								start_migratetype);
806
807				/* Claim the whole block if over half of it is free */
808				if (pages >= (1 << (pageblock_order-1)))
809					set_pageblock_migratetype(page,
810								start_migratetype);
811
812				migratetype = start_migratetype;
813			}
814
815			/* Remove the page from the freelists */
816			list_del(&page->lru);
817			rmv_page_order(page);
818			__mod_zone_page_state(zone, NR_FREE_PAGES,
819							-(1UL << order));
820
821			if (current_order == pageblock_order)
822				set_pageblock_migratetype(page,
823							start_migratetype);
824
825			expand(zone, page, order, current_order, area, migratetype);
826			return page;
827		}
828	}
829
830	/* Use MIGRATE_RESERVE rather than fail an allocation */
831	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
832}
833
834/*
835 * Do the hard work of removing an element from the buddy allocator.
836 * Call me with the zone->lock already held.
837 */
838static struct page *__rmqueue(struct zone *zone, unsigned int order,
839						int migratetype)
840{
841	struct page *page;
842
843	page = __rmqueue_smallest(zone, order, migratetype);
844
845	if (unlikely(!page))
846		page = __rmqueue_fallback(zone, order, migratetype);
847
848	return page;
849}
850
851/*
852 * Obtain a specified number of elements from the buddy allocator, all under
853 * a single hold of the lock, for efficiency.  Add them to the supplied list.
854 * Returns the number of new pages which were placed at *list.
855 */
856static int rmqueue_bulk(struct zone *zone, unsigned int order,
857			unsigned long count, struct list_head *list,
858			int migratetype)
859{
860	int i;
861
862	spin_lock(&zone->lock);
863	for (i = 0; i < count; ++i) {
864		struct page *page = __rmqueue(zone, order, migratetype);
865		if (unlikely(page == NULL))
866			break;
867
868		/*
869		 * Split buddy pages returned by expand() are received here
870		 * in physical page order. The page is added to the callers and
871		 * list and the list head then moves forward. From the callers
872		 * perspective, the linked list is ordered by page number in
873		 * some conditions. This is useful for IO devices that can
874		 * merge IO requests if the physical pages are ordered
875		 * properly.
876		 */
877		list_add(&page->lru, list);
878		set_page_private(page, migratetype);
879		list = &page->lru;
880	}
881	spin_unlock(&zone->lock);
882	return i;
883}
884
885#ifdef CONFIG_NUMA
886/*
887 * Called from the vmstat counter updater to drain pagesets of this
888 * currently executing processor on remote nodes after they have
889 * expired.
890 *
891 * Note that this function must be called with the thread pinned to
892 * a single processor.
893 */
894void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
895{
896	unsigned long flags;
897	int to_drain;
898
899	local_irq_save(flags);
900	if (pcp->count >= pcp->batch)
901		to_drain = pcp->batch;
902	else
903		to_drain = pcp->count;
904	free_pages_bulk(zone, to_drain, &pcp->list, 0);
905	pcp->count -= to_drain;
906	local_irq_restore(flags);
907}
908#endif
909
910/*
911 * Drain pages of the indicated processor.
912 *
913 * The processor must either be the current processor and the
914 * thread pinned to the current processor or a processor that
915 * is not online.
916 */
917static void drain_pages(unsigned int cpu)
918{
919	unsigned long flags;
920	struct zone *zone;
921
922	for_each_populated_zone(zone) {
923		struct per_cpu_pageset *pset;
924		struct per_cpu_pages *pcp;
925
926		pset = zone_pcp(zone, cpu);
927
928		pcp = &pset->pcp;
929		local_irq_save(flags);
930		free_pages_bulk(zone, pcp->count, &pcp->list, 0);
931		pcp->count = 0;
932		local_irq_restore(flags);
933	}
934}
935
936/*
937 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
938 */
939void drain_local_pages(void *arg)
940{
941	drain_pages(smp_processor_id());
942}
943
944/*
945 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
946 */
947void drain_all_pages(void)
948{
949	on_each_cpu(drain_local_pages, NULL, 1);
950}
951
952#ifdef CONFIG_HIBERNATION
953
954void mark_free_pages(struct zone *zone)
955{
956	unsigned long pfn, max_zone_pfn;
957	unsigned long flags;
958	int order, t;
959	struct list_head *curr;
960
961	if (!zone->spanned_pages)
962		return;
963
964	spin_lock_irqsave(&zone->lock, flags);
965
966	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
967	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
968		if (pfn_valid(pfn)) {
969			struct page *page = pfn_to_page(pfn);
970
971			if (!swsusp_page_is_forbidden(page))
972				swsusp_unset_page_free(page);
973		}
974
975	for_each_migratetype_order(order, t) {
976		list_for_each(curr, &zone->free_area[order].free_list[t]) {
977			unsigned long i;
978
979			pfn = page_to_pfn(list_entry(curr, struct page, lru));
980			for (i = 0; i < (1UL << order); i++)
981				swsusp_set_page_free(pfn_to_page(pfn + i));
982		}
983	}
984	spin_unlock_irqrestore(&zone->lock, flags);
985}
986#endif /* CONFIG_PM */
987
988/*
989 * Free a 0-order page
990 */
991static void free_hot_cold_page(struct page *page, int cold)
992{
993	struct zone *zone = page_zone(page);
994	struct per_cpu_pages *pcp;
995	unsigned long flags;
996
997	if (PageAnon(page))
998		page->mapping = NULL;
999	if (free_pages_check(page))
1000		return;
1001
1002	if (!PageHighMem(page)) {
1003		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1004		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1005	}
1006	arch_free_page(page, 0);
1007	kernel_map_pages(page, 1, 0);
1008
1009	pcp = &zone_pcp(zone, get_cpu())->pcp;
1010	local_irq_save(flags);
1011	__count_vm_event(PGFREE);
1012	if (cold)
1013		list_add_tail(&page->lru, &pcp->list);
1014	else
1015		list_add(&page->lru, &pcp->list);
1016	set_page_private(page, get_pageblock_migratetype(page));
1017	pcp->count++;
1018	if (pcp->count >= pcp->high) {
1019		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1020		pcp->count -= pcp->batch;
1021	}
1022	local_irq_restore(flags);
1023	put_cpu();
1024}
1025
1026void free_hot_page(struct page *page)
1027{
1028	free_hot_cold_page(page, 0);
1029}
1030
1031void free_cold_page(struct page *page)
1032{
1033	free_hot_cold_page(page, 1);
1034}
1035
1036/*
1037 * split_page takes a non-compound higher-order page, and splits it into
1038 * n (1<<order) sub-pages: page[0..n]
1039 * Each sub-page must be freed individually.
1040 *
1041 * Note: this is probably too low level an operation for use in drivers.
1042 * Please consult with lkml before using this in your driver.
1043 */
1044void split_page(struct page *page, unsigned int order)
1045{
1046	int i;
1047
1048	VM_BUG_ON(PageCompound(page));
1049	VM_BUG_ON(!page_count(page));
1050	for (i = 1; i < (1 << order); i++)
1051		set_page_refcounted(page + i);
1052}
1053
1054/*
1055 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1056 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1057 * or two.
1058 */
1059static struct page *buffered_rmqueue(struct zone *preferred_zone,
1060			struct zone *zone, int order, gfp_t gfp_flags)
1061{
1062	unsigned long flags;
1063	struct page *page;
1064	int cold = !!(gfp_flags & __GFP_COLD);
1065	int cpu;
1066	int migratetype = allocflags_to_migratetype(gfp_flags);
1067
1068again:
1069	cpu  = get_cpu();
1070	if (likely(order == 0)) {
1071		struct per_cpu_pages *pcp;
1072
1073		pcp = &zone_pcp(zone, cpu)->pcp;
1074		local_irq_save(flags);
1075		if (!pcp->count) {
1076			pcp->count = rmqueue_bulk(zone, 0,
1077					pcp->batch, &pcp->list, migratetype);
1078			if (unlikely(!pcp->count))
1079				goto failed;
1080		}
1081
1082		/* Find a page of the appropriate migrate type */
1083		if (cold) {
1084			list_for_each_entry_reverse(page, &pcp->list, lru)
1085				if (page_private(page) == migratetype)
1086					break;
1087		} else {
1088			list_for_each_entry(page, &pcp->list, lru)
1089				if (page_private(page) == migratetype)
1090					break;
1091		}
1092
1093		/* Allocate more to the pcp list if necessary */
1094		if (unlikely(&page->lru == &pcp->list)) {
1095			pcp->count += rmqueue_bulk(zone, 0,
1096					pcp->batch, &pcp->list, migratetype);
1097			page = list_entry(pcp->list.next, struct page, lru);
1098		}
1099
1100		list_del(&page->lru);
1101		pcp->count--;
1102	} else {
1103		spin_lock_irqsave(&zone->lock, flags);
1104		page = __rmqueue(zone, order, migratetype);
1105		spin_unlock(&zone->lock);
1106		if (!page)
1107			goto failed;
1108	}
1109
1110	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1111	zone_statistics(preferred_zone, zone);
1112	local_irq_restore(flags);
1113	put_cpu();
1114
1115	VM_BUG_ON(bad_range(zone, page));
1116	if (prep_new_page(page, order, gfp_flags))
1117		goto again;
1118	return page;
1119
1120failed:
1121	local_irq_restore(flags);
1122	put_cpu();
1123	return NULL;
1124}
1125
1126#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
1127#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
1128#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
1129#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
1130#define ALLOC_HARDER		0x10 /* try to alloc harder */
1131#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1132#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1133
1134#ifdef CONFIG_FAIL_PAGE_ALLOC
1135
1136static struct fail_page_alloc_attr {
1137	struct fault_attr attr;
1138
1139	u32 ignore_gfp_highmem;
1140	u32 ignore_gfp_wait;
1141	u32 min_order;
1142
1143#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1144
1145	struct dentry *ignore_gfp_highmem_file;
1146	struct dentry *ignore_gfp_wait_file;
1147	struct dentry *min_order_file;
1148
1149#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1150
1151} fail_page_alloc = {
1152	.attr = FAULT_ATTR_INITIALIZER,
1153	.ignore_gfp_wait = 1,
1154	.ignore_gfp_highmem = 1,
1155	.min_order = 1,
1156};
1157
1158static int __init setup_fail_page_alloc(char *str)
1159{
1160	return setup_fault_attr(&fail_page_alloc.attr, str);
1161}
1162__setup("fail_page_alloc=", setup_fail_page_alloc);
1163
1164static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1165{
1166	if (order < fail_page_alloc.min_order)
1167		return 0;
1168	if (gfp_mask & __GFP_NOFAIL)
1169		return 0;
1170	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1171		return 0;
1172	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1173		return 0;
1174
1175	return should_fail(&fail_page_alloc.attr, 1 << order);
1176}
1177
1178#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1179
1180static int __init fail_page_alloc_debugfs(void)
1181{
1182	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1183	struct dentry *dir;
1184	int err;
1185
1186	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1187				       "fail_page_alloc");
1188	if (err)
1189		return err;
1190	dir = fail_page_alloc.attr.dentries.dir;
1191
1192	fail_page_alloc.ignore_gfp_wait_file =
1193		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1194				      &fail_page_alloc.ignore_gfp_wait);
1195
1196	fail_page_alloc.ignore_gfp_highmem_file =
1197		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1198				      &fail_page_alloc.ignore_gfp_highmem);
1199	fail_page_alloc.min_order_file =
1200		debugfs_create_u32("min-order", mode, dir,
1201				   &fail_page_alloc.min_order);
1202
1203	if (!fail_page_alloc.ignore_gfp_wait_file ||
1204            !fail_page_alloc.ignore_gfp_highmem_file ||
1205            !fail_page_alloc.min_order_file) {
1206		err = -ENOMEM;
1207		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1208		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1209		debugfs_remove(fail_page_alloc.min_order_file);
1210		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1211	}
1212
1213	return err;
1214}
1215
1216late_initcall(fail_page_alloc_debugfs);
1217
1218#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1219
1220#else /* CONFIG_FAIL_PAGE_ALLOC */
1221
1222static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1223{
1224	return 0;
1225}
1226
1227#endif /* CONFIG_FAIL_PAGE_ALLOC */
1228
1229/*
1230 * Return 1 if free pages are above 'mark'. This takes into account the order
1231 * of the allocation.
1232 */
1233int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1234		      int classzone_idx, int alloc_flags)
1235{
1236	/* free_pages my go negative - that's OK */
1237	long min = mark;
1238	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1239	int o;
1240
1241	if (alloc_flags & ALLOC_HIGH)
1242		min -= min / 2;
1243	if (alloc_flags & ALLOC_HARDER)
1244		min -= min / 4;
1245
1246	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1247		return 0;
1248	for (o = 0; o < order; o++) {
1249		/* At the next order, this order's pages become unavailable */
1250		free_pages -= z->free_area[o].nr_free << o;
1251
1252		/* Require fewer higher order pages to be free */
1253		min >>= 1;
1254
1255		if (free_pages <= min)
1256			return 0;
1257	}
1258	return 1;
1259}
1260
1261#ifdef CONFIG_NUMA
1262/*
1263 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1264 * skip over zones that are not allowed by the cpuset, or that have
1265 * been recently (in last second) found to be nearly full.  See further
1266 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1267 * that have to skip over a lot of full or unallowed zones.
1268 *
1269 * If the zonelist cache is present in the passed in zonelist, then
1270 * returns a pointer to the allowed node mask (either the current
1271 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1272 *
1273 * If the zonelist cache is not available for this zonelist, does
1274 * nothing and returns NULL.
1275 *
1276 * If the fullzones BITMAP in the zonelist cache is stale (more than
1277 * a second since last zap'd) then we zap it out (clear its bits.)
1278 *
1279 * We hold off even calling zlc_setup, until after we've checked the
1280 * first zone in the zonelist, on the theory that most allocations will
1281 * be satisfied from that first zone, so best to examine that zone as
1282 * quickly as we can.
1283 */
1284static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1285{
1286	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1287	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1288
1289	zlc = zonelist->zlcache_ptr;
1290	if (!zlc)
1291		return NULL;
1292
1293	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1294		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1295		zlc->last_full_zap = jiffies;
1296	}
1297
1298	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1299					&cpuset_current_mems_allowed :
1300					&node_states[N_HIGH_MEMORY];
1301	return allowednodes;
1302}
1303
1304/*
1305 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1306 * if it is worth looking at further for free memory:
1307 *  1) Check that the zone isn't thought to be full (doesn't have its
1308 *     bit set in the zonelist_cache fullzones BITMAP).
1309 *  2) Check that the zones node (obtained from the zonelist_cache
1310 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1311 * Return true (non-zero) if zone is worth looking at further, or
1312 * else return false (zero) if it is not.
1313 *
1314 * This check -ignores- the distinction between various watermarks,
1315 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1316 * found to be full for any variation of these watermarks, it will
1317 * be considered full for up to one second by all requests, unless
1318 * we are so low on memory on all allowed nodes that we are forced
1319 * into the second scan of the zonelist.
1320 *
1321 * In the second scan we ignore this zonelist cache and exactly
1322 * apply the watermarks to all zones, even it is slower to do so.
1323 * We are low on memory in the second scan, and should leave no stone
1324 * unturned looking for a free page.
1325 */
1326static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1327						nodemask_t *allowednodes)
1328{
1329	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1330	int i;				/* index of *z in zonelist zones */
1331	int n;				/* node that zone *z is on */
1332
1333	zlc = zonelist->zlcache_ptr;
1334	if (!zlc)
1335		return 1;
1336
1337	i = z - zonelist->_zonerefs;
1338	n = zlc->z_to_n[i];
1339
1340	/* This zone is worth trying if it is allowed but not full */
1341	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1342}
1343
1344/*
1345 * Given 'z' scanning a zonelist, set the corresponding bit in
1346 * zlc->fullzones, so that subsequent attempts to allocate a page
1347 * from that zone don't waste time re-examining it.
1348 */
1349static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1350{
1351	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1352	int i;				/* index of *z in zonelist zones */
1353
1354	zlc = zonelist->zlcache_ptr;
1355	if (!zlc)
1356		return;
1357
1358	i = z - zonelist->_zonerefs;
1359
1360	set_bit(i, zlc->fullzones);
1361}
1362
1363#else	/* CONFIG_NUMA */
1364
1365static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1366{
1367	return NULL;
1368}
1369
1370static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1371				nodemask_t *allowednodes)
1372{
1373	return 1;
1374}
1375
1376static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1377{
1378}
1379#endif	/* CONFIG_NUMA */
1380
1381/*
1382 * get_page_from_freelist goes through the zonelist trying to allocate
1383 * a page.
1384 */
1385static struct page *
1386get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1387		struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
1388{
1389	struct zoneref *z;
1390	struct page *page = NULL;
1391	int classzone_idx;
1392	struct zone *zone, *preferred_zone;
1393	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1394	int zlc_active = 0;		/* set if using zonelist_cache */
1395	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1396
1397	(void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1398							&preferred_zone);
1399	if (!preferred_zone)
1400		return NULL;
1401
1402	classzone_idx = zone_idx(preferred_zone);
1403
1404zonelist_scan:
1405	/*
1406	 * Scan zonelist, looking for a zone with enough free.
1407	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1408	 */
1409	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1410						high_zoneidx, nodemask) {
1411		if (NUMA_BUILD && zlc_active &&
1412			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1413				continue;
1414		if ((alloc_flags & ALLOC_CPUSET) &&
1415			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1416				goto try_next_zone;
1417
1418		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1419			unsigned long mark;
1420			if (alloc_flags & ALLOC_WMARK_MIN)
1421				mark = zone->pages_min;
1422			else if (alloc_flags & ALLOC_WMARK_LOW)
1423				mark = zone->pages_low;
1424			else
1425				mark = zone->pages_high;
1426			if (!zone_watermark_ok(zone, order, mark,
1427				    classzone_idx, alloc_flags)) {
1428				if (!zone_reclaim_mode ||
1429				    !zone_reclaim(zone, gfp_mask, order))
1430					goto this_zone_full;
1431			}
1432		}
1433
1434		page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
1435		if (page)
1436			break;
1437this_zone_full:
1438		if (NUMA_BUILD)
1439			zlc_mark_zone_full(zonelist, z);
1440try_next_zone:
1441		if (NUMA_BUILD && !did_zlc_setup) {
1442			/* we do zlc_setup after the first zone is tried */
1443			allowednodes = zlc_setup(zonelist, alloc_flags);
1444			zlc_active = 1;
1445			did_zlc_setup = 1;
1446		}
1447	}
1448
1449	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1450		/* Disable zlc cache for second zonelist scan */
1451		zlc_active = 0;
1452		goto zonelist_scan;
1453	}
1454	return page;
1455}
1456
1457/*
1458 * This is the 'heart' of the zoned buddy allocator.
1459 */
1460struct page *
1461__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1462			struct zonelist *zonelist, nodemask_t *nodemask)
1463{
1464	const gfp_t wait = gfp_mask & __GFP_WAIT;
1465	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1466	struct zoneref *z;
1467	struct zone *zone;
1468	struct page *page;
1469	struct reclaim_state reclaim_state;
1470	struct task_struct *p = current;
1471	int do_retry;
1472	int alloc_flags;
1473	unsigned long did_some_progress;
1474	unsigned long pages_reclaimed = 0;
1475
1476	lockdep_trace_alloc(gfp_mask);
1477
1478	might_sleep_if(wait);
1479
1480	if (should_fail_alloc_page(gfp_mask, order))
1481		return NULL;
1482
1483restart:
1484	z = zonelist->_zonerefs;  /* the list of zones suitable for gfp_mask */
1485
1486	if (unlikely(!z->zone)) {
1487		/*
1488		 * Happens if we have an empty zonelist as a result of
1489		 * GFP_THISNODE being used on a memoryless node
1490		 */
1491		return NULL;
1492	}
1493
1494	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1495			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1496	if (page)
1497		goto got_pg;
1498
1499	/*
1500	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1501	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1502	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1503	 * using a larger set of nodes after it has established that the
1504	 * allowed per node queues are empty and that nodes are
1505	 * over allocated.
1506	 */
1507	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1508		goto nopage;
1509
1510	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1511		wakeup_kswapd(zone, order);
1512
1513	/*
1514	 * OK, we're below the kswapd watermark and have kicked background
1515	 * reclaim. Now things get more complex, so set up alloc_flags according
1516	 * to how we want to proceed.
1517	 *
1518	 * The caller may dip into page reserves a bit more if the caller
1519	 * cannot run direct reclaim, or if the caller has realtime scheduling
1520	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1521	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1522	 */
1523	alloc_flags = ALLOC_WMARK_MIN;
1524	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1525		alloc_flags |= ALLOC_HARDER;
1526	if (gfp_mask & __GFP_HIGH)
1527		alloc_flags |= ALLOC_HIGH;
1528	if (wait)
1529		alloc_flags |= ALLOC_CPUSET;
1530
1531	/*
1532	 * Go through the zonelist again. Let __GFP_HIGH and allocations
1533	 * coming from realtime tasks go deeper into reserves.
1534	 *
1535	 * This is the last chance, in general, before the goto nopage.
1536	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1537	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1538	 */
1539	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1540						high_zoneidx, alloc_flags);
1541	if (page)
1542		goto got_pg;
1543
1544	/* This allocation should allow future memory freeing. */
1545
1546rebalance:
1547	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1548			&& !in_interrupt()) {
1549		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1550nofail_alloc:
1551			/* go through the zonelist yet again, ignoring mins */
1552			page = get_page_from_freelist(gfp_mask, nodemask, order,
1553				zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
1554			if (page)
1555				goto got_pg;
1556			if (gfp_mask & __GFP_NOFAIL) {
1557				congestion_wait(WRITE, HZ/50);
1558				goto nofail_alloc;
1559			}
1560		}
1561		goto nopage;
1562	}
1563
1564	/* Atomic allocations - we can't balance anything */
1565	if (!wait)
1566		goto nopage;
1567
1568	cond_resched();
1569
1570	/* We now go into synchronous reclaim */
1571	cpuset_memory_pressure_bump();
1572
1573	p->flags |= PF_MEMALLOC;
1574
1575	lockdep_set_current_reclaim_state(gfp_mask);
1576	reclaim_state.reclaimed_slab = 0;
1577	p->reclaim_state = &reclaim_state;
1578
1579	did_some_progress = try_to_free_pages(zonelist, order,
1580						gfp_mask, nodemask);
1581
1582	p->reclaim_state = NULL;
1583	lockdep_clear_current_reclaim_state();
1584	p->flags &= ~PF_MEMALLOC;
1585
1586	cond_resched();
1587
1588	if (order != 0)
1589		drain_all_pages();
1590
1591	if (likely(did_some_progress)) {
1592		page = get_page_from_freelist(gfp_mask, nodemask, order,
1593					zonelist, high_zoneidx, alloc_flags);
1594		if (page)
1595			goto got_pg;
1596	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1597		if (!try_set_zone_oom(zonelist, gfp_mask)) {
1598			schedule_timeout_uninterruptible(1);
1599			goto restart;
1600		}
1601
1602		/*
1603		 * Go through the zonelist yet one more time, keep
1604		 * very high watermark here, this is only to catch
1605		 * a parallel oom killing, we must fail if we're still
1606		 * under heavy pressure.
1607		 */
1608		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1609			order, zonelist, high_zoneidx,
1610			ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1611		if (page) {
1612			clear_zonelist_oom(zonelist, gfp_mask);
1613			goto got_pg;
1614		}
1615
1616		/* The OOM killer will not help higher order allocs so fail */
1617		if (order > PAGE_ALLOC_COSTLY_ORDER) {
1618			clear_zonelist_oom(zonelist, gfp_mask);
1619			goto nopage;
1620		}
1621
1622		out_of_memory(zonelist, gfp_mask, order);
1623		clear_zonelist_oom(zonelist, gfp_mask);
1624		goto restart;
1625	}
1626
1627	/*
1628	 * Don't let big-order allocations loop unless the caller explicitly
1629	 * requests that.  Wait for some write requests to complete then retry.
1630	 *
1631	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1632	 * means __GFP_NOFAIL, but that may not be true in other
1633	 * implementations.
1634	 *
1635	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1636	 * specified, then we retry until we no longer reclaim any pages
1637	 * (above), or we've reclaimed an order of pages at least as
1638	 * large as the allocation's order. In both cases, if the
1639	 * allocation still fails, we stop retrying.
1640	 */
1641	pages_reclaimed += did_some_progress;
1642	do_retry = 0;
1643	if (!(gfp_mask & __GFP_NORETRY)) {
1644		if (order <= PAGE_ALLOC_COSTLY_ORDER) {
1645			do_retry = 1;
1646		} else {
1647			if (gfp_mask & __GFP_REPEAT &&
1648				pages_reclaimed < (1 << order))
1649					do_retry = 1;
1650		}
1651		if (gfp_mask & __GFP_NOFAIL)
1652			do_retry = 1;
1653	}
1654	if (do_retry) {
1655		congestion_wait(WRITE, HZ/50);
1656		goto rebalance;
1657	}
1658
1659nopage:
1660	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1661		printk(KERN_WARNING "%s: page allocation failure."
1662			" order:%d, mode:0x%x\n",
1663			p->comm, order, gfp_mask);
1664		dump_stack();
1665		show_mem();
1666	}
1667got_pg:
1668	return page;
1669}
1670EXPORT_SYMBOL(__alloc_pages_internal);
1671
1672/*
1673 * Common helper functions.
1674 */
1675unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1676{
1677	struct page * page;
1678	page = alloc_pages(gfp_mask, order);
1679	if (!page)
1680		return 0;
1681	return (unsigned long) page_address(page);
1682}
1683
1684EXPORT_SYMBOL(__get_free_pages);
1685
1686unsigned long get_zeroed_page(gfp_t gfp_mask)
1687{
1688	struct page * page;
1689
1690	/*
1691	 * get_zeroed_page() returns a 32-bit address, which cannot represent
1692	 * a highmem page
1693	 */
1694	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1695
1696	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1697	if (page)
1698		return (unsigned long) page_address(page);
1699	return 0;
1700}
1701
1702EXPORT_SYMBOL(get_zeroed_page);
1703
1704void __pagevec_free(struct pagevec *pvec)
1705{
1706	int i = pagevec_count(pvec);
1707
1708	while (--i >= 0)
1709		free_hot_cold_page(pvec->pages[i], pvec->cold);
1710}
1711
1712void __free_pages(struct page *page, unsigned int order)
1713{
1714	if (put_page_testzero(page)) {
1715		if (order == 0)
1716			free_hot_page(page);
1717		else
1718			__free_pages_ok(page, order);
1719	}
1720}
1721
1722EXPORT_SYMBOL(__free_pages);
1723
1724void free_pages(unsigned long addr, unsigned int order)
1725{
1726	if (addr != 0) {
1727		VM_BUG_ON(!virt_addr_valid((void *)addr));
1728		__free_pages(virt_to_page((void *)addr), order);
1729	}
1730}
1731
1732EXPORT_SYMBOL(free_pages);
1733
1734/**
1735 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
1736 * @size: the number of bytes to allocate
1737 * @gfp_mask: GFP flags for the allocation
1738 *
1739 * This function is similar to alloc_pages(), except that it allocates the
1740 * minimum number of pages to satisfy the request.  alloc_pages() can only
1741 * allocate memory in power-of-two pages.
1742 *
1743 * This function is also limited by MAX_ORDER.
1744 *
1745 * Memory allocated by this function must be released by free_pages_exact().
1746 */
1747void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1748{
1749	unsigned int order = get_order(size);
1750	unsigned long addr;
1751
1752	addr = __get_free_pages(gfp_mask, order);
1753	if (addr) {
1754		unsigned long alloc_end = addr + (PAGE_SIZE << order);
1755		unsigned long used = addr + PAGE_ALIGN(size);
1756
1757		split_page(virt_to_page(addr), order);
1758		while (used < alloc_end) {
1759			free_page(used);
1760			used += PAGE_SIZE;
1761		}
1762	}
1763
1764	return (void *)addr;
1765}
1766EXPORT_SYMBOL(alloc_pages_exact);
1767
1768/**
1769 * free_pages_exact - release memory allocated via alloc_pages_exact()
1770 * @virt: the value returned by alloc_pages_exact.
1771 * @size: size of allocation, same value as passed to alloc_pages_exact().
1772 *
1773 * Release the memory allocated by a previous call to alloc_pages_exact.
1774 */
1775void free_pages_exact(void *virt, size_t size)
1776{
1777	unsigned long addr = (unsigned long)virt;
1778	unsigned long end = addr + PAGE_ALIGN(size);
1779
1780	while (addr < end) {
1781		free_page(addr);
1782		addr += PAGE_SIZE;
1783	}
1784}
1785EXPORT_SYMBOL(free_pages_exact);
1786
1787static unsigned int nr_free_zone_pages(int offset)
1788{
1789	struct zoneref *z;
1790	struct zone *zone;
1791
1792	/* Just pick one node, since fallback list is circular */
1793	unsigned int sum = 0;
1794
1795	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1796
1797	for_each_zone_zonelist(zone, z, zonelist, offset) {
1798		unsigned long size = zone->present_pages;
1799		unsigned long high = zone->pages_high;
1800		if (size > high)
1801			sum += size - high;
1802	}
1803
1804	return sum;
1805}
1806
1807/*
1808 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1809 */
1810unsigned int nr_free_buffer_pages(void)
1811{
1812	return nr_free_zone_pages(gfp_zone(GFP_USER));
1813}
1814EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1815
1816/*
1817 * Amount of free RAM allocatable within all zones
1818 */
1819unsigned int nr_free_pagecache_pages(void)
1820{
1821	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1822}
1823
1824static inline void show_node(struct zone *zone)
1825{
1826	if (NUMA_BUILD)
1827		printk("Node %d ", zone_to_nid(zone));
1828}
1829
1830void si_meminfo(struct sysinfo *val)
1831{
1832	val->totalram = totalram_pages;
1833	val->sharedram = 0;
1834	val->freeram = global_page_state(NR_FREE_PAGES);
1835	val->bufferram = nr_blockdev_pages();
1836	val->totalhigh = totalhigh_pages;
1837	val->freehigh = nr_free_highpages();
1838	val->mem_unit = PAGE_SIZE;
1839}
1840
1841EXPORT_SYMBOL(si_meminfo);
1842
1843#ifdef CONFIG_NUMA
1844void si_meminfo_node(struct sysinfo *val, int nid)
1845{
1846	pg_data_t *pgdat = NODE_DATA(nid);
1847
1848	val->totalram = pgdat->node_present_pages;
1849	val->freeram = node_page_state(nid, NR_FREE_PAGES);
1850#ifdef CONFIG_HIGHMEM
1851	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1852	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1853			NR_FREE_PAGES);
1854#else
1855	val->totalhigh = 0;
1856	val->freehigh = 0;
1857#endif
1858	val->mem_unit = PAGE_SIZE;
1859}
1860#endif
1861
1862#define K(x) ((x) << (PAGE_SHIFT-10))
1863
1864/*
1865 * Show free area list (used inside shift_scroll-lock stuff)
1866 * We also calculate the percentage fragmentation. We do this by counting the
1867 * memory on each free list with the exception of the first item on the list.
1868 */
1869void show_free_areas(void)
1870{
1871	int cpu;
1872	struct zone *zone;
1873
1874	for_each_populated_zone(zone) {
1875		show_node(zone);
1876		printk("%s per-cpu:\n", zone->name);
1877
1878		for_each_online_cpu(cpu) {
1879			struct per_cpu_pageset *pageset;
1880
1881			pageset = zone_pcp(zone, cpu);
1882
1883			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1884			       cpu, pageset->pcp.high,
1885			       pageset->pcp.batch, pageset->pcp.count);
1886		}
1887	}
1888
1889	printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
1890		" inactive_file:%lu"
1891//TODO:  check/adjust line lengths
1892#ifdef CONFIG_UNEVICTABLE_LRU
1893		" unevictable:%lu"
1894#endif
1895		" dirty:%lu writeback:%lu unstable:%lu\n"
1896		" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1897		global_page_state(NR_ACTIVE_ANON),
1898		global_page_state(NR_ACTIVE_FILE),
1899		global_page_state(NR_INACTIVE_ANON),
1900		global_page_state(NR_INACTIVE_FILE),
1901#ifdef CONFIG_UNEVICTABLE_LRU
1902		global_page_state(NR_UNEVICTABLE),
1903#endif
1904		global_page_state(NR_FILE_DIRTY),
1905		global_page_state(NR_WRITEBACK),
1906		global_page_state(NR_UNSTABLE_NFS),
1907		global_page_state(NR_FREE_PAGES),
1908		global_page_state(NR_SLAB_RECLAIMABLE) +
1909			global_page_state(NR_SLAB_UNRECLAIMABLE),
1910		global_page_state(NR_FILE_MAPPED),
1911		global_page_state(NR_PAGETABLE),
1912		global_page_state(NR_BOUNCE));
1913
1914	for_each_populated_zone(zone) {
1915		int i;
1916
1917		show_node(zone);
1918		printk("%s"
1919			" free:%lukB"
1920			" min:%lukB"
1921			" low:%lukB"
1922			" high:%lukB"
1923			" active_anon:%lukB"
1924			" inactive_anon:%lukB"
1925			" active_file:%lukB"
1926			" inactive_file:%lukB"
1927#ifdef CONFIG_UNEVICTABLE_LRU
1928			" unevictable:%lukB"
1929#endif
1930			" present:%lukB"
1931			" pages_scanned:%lu"
1932			" all_unreclaimable? %s"
1933			"\n",
1934			zone->name,
1935			K(zone_page_state(zone, NR_FREE_PAGES)),
1936			K(zone->pages_min),
1937			K(zone->pages_low),
1938			K(zone->pages_high),
1939			K(zone_page_state(zone, NR_ACTIVE_ANON)),
1940			K(zone_page_state(zone, NR_INACTIVE_ANON)),
1941			K(zone_page_state(zone, NR_ACTIVE_FILE)),
1942			K(zone_page_state(zone, NR_INACTIVE_FILE)),
1943#ifdef CONFIG_UNEVICTABLE_LRU
1944			K(zone_page_state(zone, NR_UNEVICTABLE)),
1945#endif
1946			K(zone->present_pages),
1947			zone->pages_scanned,
1948			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
1949			);
1950		printk("lowmem_reserve[]:");
1951		for (i = 0; i < MAX_NR_ZONES; i++)
1952			printk(" %lu", zone->lowmem_reserve[i]);
1953		printk("\n");
1954	}
1955
1956	for_each_populated_zone(zone) {
1957 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
1958
1959		show_node(zone);
1960		printk("%s: ", zone->name);
1961
1962		spin_lock_irqsave(&zone->lock, flags);
1963		for (order = 0; order < MAX_ORDER; order++) {
1964			nr[order] = zone->free_area[order].nr_free;
1965			total += nr[order] << order;
1966		}
1967		spin_unlock_irqrestore(&zone->lock, flags);
1968		for (order = 0; order < MAX_ORDER; order++)
1969			printk("%lu*%lukB ", nr[order], K(1UL) << order);
1970		printk("= %lukB\n", K(total));
1971	}
1972
1973	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1974
1975	show_swap_cache_info();
1976}
1977
1978static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
1979{
1980	zoneref->zone = zone;
1981	zoneref->zone_idx = zone_idx(zone);
1982}
1983
1984/*
1985 * Builds allocation fallback zone lists.
1986 *
1987 * Add all populated zones of a node to the zonelist.
1988 */
1989static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1990				int nr_zones, enum zone_type zone_type)
1991{
1992	struct zone *zone;
1993
1994	BUG_ON(zone_type >= MAX_NR_ZONES);
1995	zone_type++;
1996
1997	do {
1998		zone_type--;
1999		zone = pgdat->node_zones + zone_type;
2000		if (populated_zone(zone)) {
2001			zoneref_set_zone(zone,
2002				&zonelist->_zonerefs[nr_zones++]);
2003			check_highest_zone(zone_type);
2004		}
2005
2006	} while (zone_type);
2007	return nr_zones;
2008}
2009
2010
2011/*
2012 *  zonelist_order:
2013 *  0 = automatic detection of better ordering.
2014 *  1 = order by ([node] distance, -zonetype)
2015 *  2 = order by (-zonetype, [node] distance)
2016 *
2017 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2018 *  the same zonelist. So only NUMA can configure this param.
2019 */
2020#define ZONELIST_ORDER_DEFAULT  0
2021#define ZONELIST_ORDER_NODE     1
2022#define ZONELIST_ORDER_ZONE     2
2023
2024/* zonelist order in the kernel.
2025 * set_zonelist_order() will set this to NODE or ZONE.
2026 */
2027static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2028static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2029
2030
2031#ifdef CONFIG_NUMA
2032/* The value user specified ....changed by config */
2033static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2034/* string for sysctl */
2035#define NUMA_ZONELIST_ORDER_LEN	16
2036char numa_zonelist_order[16] = "default";
2037
2038/*
2039 * interface for configure zonelist ordering.
2040 * command line option "numa_zonelist_order"
2041 *	= "[dD]efault	- default, automatic configuration.
2042 *	= "[nN]ode 	- order by node locality, then by zone within node
2043 *	= "[zZ]one      - order by zone, then by locality within zone
2044 */
2045
2046static int __parse_numa_zonelist_order(char *s)
2047{
2048	if (*s == 'd' || *s == 'D') {
2049		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2050	} else if (*s == 'n' || *s == 'N') {
2051		user_zonelist_order = ZONELIST_ORDER_NODE;
2052	} else if (*s == 'z' || *s == 'Z') {
2053		user_zonelist_order = ZONELIST_ORDER_ZONE;
2054	} else {
2055		printk(KERN_WARNING
2056			"Ignoring invalid numa_zonelist_order value:  "
2057			"%s\n", s);
2058		return -EINVAL;
2059	}
2060	return 0;
2061}
2062
2063static __init int setup_numa_zonelist_order(char *s)
2064{
2065	if (s)
2066		return __parse_numa_zonelist_order(s);
2067	return 0;
2068}
2069early_param("numa_zonelist_order", setup_numa_zonelist_order);
2070
2071/*
2072 * sysctl handler for numa_zonelist_order
2073 */
2074int numa_zonelist_order_handler(ctl_table *table, int write,
2075		struct file *file, void __user *buffer, size_t *length,
2076		loff_t *ppos)
2077{
2078	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2079	int ret;
2080
2081	if (write)
2082		strncpy(saved_string, (char*)table->data,
2083			NUMA_ZONELIST_ORDER_LEN);
2084	ret = proc_dostring(table, write, file, buffer, length, ppos);
2085	if (ret)
2086		return ret;
2087	if (write) {
2088		int oldval = user_zonelist_order;
2089		if (__parse_numa_zonelist_order((char*)table->data)) {
2090			/*
2091			 * bogus value.  restore saved string
2092			 */
2093			strncpy((char*)table->data, saved_string,
2094				NUMA_ZONELIST_ORDER_LEN);
2095			user_zonelist_order = oldval;
2096		} else if (oldval != user_zonelist_order)
2097			build_all_zonelists();
2098	}
2099	return 0;
2100}
2101
2102
2103#define MAX_NODE_LOAD (num_online_nodes())
2104static int node_load[MAX_NUMNODES];
2105
2106/**
2107 * find_next_best_node - find the next node that should appear in a given node's fallback list
2108 * @node: node whose fallback list we're appending
2109 * @used_node_mask: nodemask_t of already used nodes
2110 *
2111 * We use a number of factors to determine which is the next node that should
2112 * appear on a given node's fallback list.  The node should not have appeared
2113 * already in @node's fallback list, and it should be the next closest node
2114 * according to the distance array (which contains arbitrary distance values
2115 * from each node to each node in the system), and should also prefer nodes
2116 * with no CPUs, since presumably they'll have very little allocation pressure
2117 * on them otherwise.
2118 * It returns -1 if no node is found.
2119 */
2120static int find_next_best_node(int node, nodemask_t *used_node_mask)
2121{
2122	int n, val;
2123	int min_val = INT_MAX;
2124	int best_node = -1;
2125	const struct cpumask *tmp = cpumask_of_node(0);
2126
2127	/* Use the local node if we haven't already */
2128	if (!node_isset(node, *used_node_mask)) {
2129		node_set(node, *used_node_mask);
2130		return node;
2131	}
2132
2133	for_each_node_state(n, N_HIGH_MEMORY) {
2134
2135		/* Don't want a node to appear more than once */
2136		if (node_isset(n, *used_node_mask))
2137			continue;
2138
2139		/* Use the distance array to find the distance */
2140		val = node_distance(node, n);
2141
2142		/* Penalize nodes under us ("prefer the next node") */
2143		val += (n < node);
2144
2145		/* Give preference to headless and unused nodes */
2146		tmp = cpumask_of_node(n);
2147		if (!cpumask_empty(tmp))
2148			val += PENALTY_FOR_NODE_WITH_CPUS;
2149
2150		/* Slight preference for less loaded node */
2151		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2152		val += node_load[n];
2153
2154		if (val < min_val) {
2155			min_val = val;
2156			best_node = n;
2157		}
2158	}
2159
2160	if (best_node >= 0)
2161		node_set(best_node, *used_node_mask);
2162
2163	return best_node;
2164}
2165
2166
2167/*
2168 * Build zonelists ordered by node and zones within node.
2169 * This results in maximum locality--normal zone overflows into local
2170 * DMA zone, if any--but risks exhausting DMA zone.
2171 */
2172static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2173{
2174	int j;
2175	struct zonelist *zonelist;
2176
2177	zonelist = &pgdat->node_zonelists[0];
2178	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2179		;
2180	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2181							MAX_NR_ZONES - 1);
2182	zonelist->_zonerefs[j].zone = NULL;
2183	zonelist->_zonerefs[j].zone_idx = 0;
2184}
2185
2186/*
2187 * Build gfp_thisnode zonelists
2188 */
2189static void build_thisnode_zonelists(pg_data_t *pgdat)
2190{
2191	int j;
2192	struct zonelist *zonelist;
2193
2194	zonelist = &pgdat->node_zonelists[1];
2195	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2196	zonelist->_zonerefs[j].zone = NULL;
2197	zonelist->_zonerefs[j].zone_idx = 0;
2198}
2199
2200/*
2201 * Build zonelists ordered by zone and nodes within zones.
2202 * This results in conserving DMA zone[s] until all Normal memory is
2203 * exhausted, but results in overflowing to remote node while memory
2204 * may still exist in local DMA zone.
2205 */
2206static int node_order[MAX_NUMNODES];
2207
2208static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2209{
2210	int pos, j, node;
2211	int zone_type;		/* needs to be signed */
2212	struct zone *z;
2213	struct zonelist *zonelist;
2214
2215	zonelist = &pgdat->node_zonelists[0];
2216	pos = 0;
2217	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2218		for (j = 0; j < nr_nodes; j++) {
2219			node = node_order[j];
2220			z = &NODE_DATA(node)->node_zones[zone_type];
2221			if (populated_zone(z)) {
2222				zoneref_set_zone(z,
2223					&zonelist->_zonerefs[pos++]);
2224				check_highest_zone(zone_type);
2225			}
2226		}
2227	}
2228	zonelist->_zonerefs[pos].zone = NULL;
2229	zonelist->_zonerefs[pos].zone_idx = 0;
2230}
2231
2232static int default_zonelist_order(void)
2233{
2234	int nid, zone_type;
2235	unsigned long low_kmem_size,total_size;
2236	struct zone *z;
2237	int average_size;
2238	/*
2239         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2240	 * If they are really small and used heavily, the system can fall
2241	 * into OOM very easily.
2242	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2243	 */
2244	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2245	low_kmem_size = 0;
2246	total_size = 0;
2247	for_each_online_node(nid) {
2248		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2249			z = &NODE_DATA(nid)->node_zones[zone_type];
2250			if (populated_zone(z)) {
2251				if (zone_type < ZONE_NORMAL)
2252					low_kmem_size += z->present_pages;
2253				total_size += z->present_pages;
2254			}
2255		}
2256	}
2257	if (!low_kmem_size ||  /* there are no DMA area. */
2258	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2259		return ZONELIST_ORDER_NODE;
2260	/*
2261	 * look into each node's config.
2262  	 * If there is a node whose DMA/DMA32 memory is very big area on
2263 	 * local memory, NODE_ORDER may be suitable.
2264         */
2265	average_size = total_size /
2266				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2267	for_each_online_node(nid) {
2268		low_kmem_size = 0;
2269		total_size = 0;
2270		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2271			z = &NODE_DATA(nid)->node_zones[zone_type];
2272			if (populated_zone(z)) {
2273				if (zone_type < ZONE_NORMAL)
2274					low_kmem_size += z->present_pages;
2275				total_size += z->present_pages;
2276			}
2277		}
2278		if (low_kmem_size &&
2279		    total_size > average_size && /* ignore small node */
2280		    low_kmem_size > total_size * 70/100)
2281			return ZONELIST_ORDER_NODE;
2282	}
2283	return ZONELIST_ORDER_ZONE;
2284}
2285
2286static void set_zonelist_order(void)
2287{
2288	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2289		current_zonelist_order = default_zonelist_order();
2290	else
2291		current_zonelist_order = user_zonelist_order;
2292}
2293
2294static void build_zonelists(pg_data_t *pgdat)
2295{
2296	int j, node, load;
2297	enum zone_type i;
2298	nodemask_t used_mask;
2299	int local_node, prev_node;
2300	struct zonelist *zonelist;
2301	int order = current_zonelist_order;
2302
2303	/* initialize zonelists */
2304	for (i = 0; i < MAX_ZONELISTS; i++) {
2305		zonelist = pgdat->node_zonelists + i;
2306		zonelist->_zonerefs[0].zone = NULL;
2307		zonelist->_zonerefs[0].zone_idx = 0;
2308	}
2309
2310	/* NUMA-aware ordering of nodes */
2311	local_node = pgdat->node_id;
2312	load = num_online_nodes();
2313	prev_node = local_node;
2314	nodes_clear(used_mask);
2315
2316	memset(node_load, 0, sizeof(node_load));
2317	memset(node_order, 0, sizeof(node_order));
2318	j = 0;
2319
2320	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2321		int distance = node_distance(local_node, node);
2322
2323		/*
2324		 * If another node is sufficiently far away then it is better
2325		 * to reclaim pages in a zone before going off node.
2326		 */
2327		if (distance > RECLAIM_DISTANCE)
2328			zone_reclaim_mode = 1;
2329
2330		/*
2331		 * We don't want to pressure a particular node.
2332		 * So adding penalty to the first node in same
2333		 * distance group to make it round-robin.
2334		 */
2335		if (distance != node_distance(local_node, prev_node))
2336			node_load[node] = load;
2337
2338		prev_node = node;
2339		load--;
2340		if (order == ZONELIST_ORDER_NODE)
2341			build_zonelists_in_node_order(pgdat, node);
2342		else
2343			node_order[j++] = node;	/* remember order */
2344	}
2345
2346	if (order == ZONELIST_ORDER_ZONE) {
2347		/* calculate node order -- i.e., DMA last! */
2348		build_zonelists_in_zone_order(pgdat, j);
2349	}
2350
2351	build_thisnode_zonelists(pgdat);
2352}
2353
2354/* Construct the zonelist performance cache - see further mmzone.h */
2355static void build_zonelist_cache(pg_data_t *pgdat)
2356{
2357	struct zonelist *zonelist;
2358	struct zonelist_cache *zlc;
2359	struct zoneref *z;
2360
2361	zonelist = &pgdat->node_zonelists[0];
2362	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2363	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2364	for (z = zonelist->_zonerefs; z->zone; z++)
2365		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2366}
2367
2368
2369#else	/* CONFIG_NUMA */
2370
2371static void set_zonelist_order(void)
2372{
2373	current_zonelist_order = ZONELIST_ORDER_ZONE;
2374}
2375
2376static void build_zonelists(pg_data_t *pgdat)
2377{
2378	int node, local_node;
2379	enum zone_type j;
2380	struct zonelist *zonelist;
2381
2382	local_node = pgdat->node_id;
2383
2384	zonelist = &pgdat->node_zonelists[0];
2385	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2386
2387	/*
2388	 * Now we build the zonelist so that it contains the zones
2389	 * of all the other nodes.
2390	 * We don't want to pressure a particular node, so when
2391	 * building the zones for node N, we make sure that the
2392	 * zones coming right after the local ones are those from
2393	 * node N+1 (modulo N)
2394	 */
2395	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2396		if (!node_online(node))
2397			continue;
2398		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2399							MAX_NR_ZONES - 1);
2400	}
2401	for (node = 0; node < local_node; node++) {
2402		if (!node_online(node))
2403			continue;
2404		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2405							MAX_NR_ZONES - 1);
2406	}
2407
2408	zonelist->_zonerefs[j].zone = NULL;
2409	zonelist->_zonerefs[j].zone_idx = 0;
2410}
2411
2412/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2413static void build_zonelist_cache(pg_data_t *pgdat)
2414{
2415	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2416}
2417
2418#endif	/* CONFIG_NUMA */
2419
2420/* return values int ....just for stop_machine() */
2421static int __build_all_zonelists(void *dummy)
2422{
2423	int nid;
2424
2425	for_each_online_node(nid) {
2426		pg_data_t *pgdat = NODE_DATA(nid);
2427
2428		build_zonelists(pgdat);
2429		build_zonelist_cache(pgdat);
2430	}
2431	return 0;
2432}
2433
2434void build_all_zonelists(void)
2435{
2436	set_zonelist_order();
2437
2438	if (system_state == SYSTEM_BOOTING) {
2439		__build_all_zonelists(NULL);
2440		mminit_verify_zonelist();
2441		cpuset_init_current_mems_allowed();
2442	} else {
2443		/* we have to stop all cpus to guarantee there is no user
2444		   of zonelist */
2445		stop_machine(__build_all_zonelists, NULL, NULL);
2446		/* cpuset refresh routine should be here */
2447	}
2448	vm_total_pages = nr_free_pagecache_pages();
2449	/*
2450	 * Disable grouping by mobility if the number of pages in the
2451	 * system is too low to allow the mechanism to work. It would be
2452	 * more accurate, but expensive to check per-zone. This check is
2453	 * made on memory-hotadd so a system can start with mobility
2454	 * disabled and enable it later
2455	 */
2456	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2457		page_group_by_mobility_disabled = 1;
2458	else
2459		page_group_by_mobility_disabled = 0;
2460
2461	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2462		"Total pages: %ld\n",
2463			num_online_nodes(),
2464			zonelist_order_name[current_zonelist_order],
2465			page_group_by_mobility_disabled ? "off" : "on",
2466			vm_total_pages);
2467#ifdef CONFIG_NUMA
2468	printk("Policy zone: %s\n", zone_names[policy_zone]);
2469#endif
2470}
2471
2472/*
2473 * Helper functions to size the waitqueue hash table.
2474 * Essentially these want to choose hash table sizes sufficiently
2475 * large so that collisions trying to wait on pages are rare.
2476 * But in fact, the number of active page waitqueues on typical
2477 * systems is ridiculously low, less than 200. So this is even
2478 * conservative, even though it seems large.
2479 *
2480 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2481 * waitqueues, i.e. the size of the waitq table given the number of pages.
2482 */
2483#define PAGES_PER_WAITQUEUE	256
2484
2485#ifndef CONFIG_MEMORY_HOTPLUG
2486static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2487{
2488	unsigned long size = 1;
2489
2490	pages /= PAGES_PER_WAITQUEUE;
2491
2492	while (size < pages)
2493		size <<= 1;
2494
2495	/*
2496	 * Once we have dozens or even hundreds of threads sleeping
2497	 * on IO we've got bigger problems than wait queue collision.
2498	 * Limit the size of the wait table to a reasonable size.
2499	 */
2500	size = min(size, 4096UL);
2501
2502	return max(size, 4UL);
2503}
2504#else
2505/*
2506 * A zone's size might be changed by hot-add, so it is not possible to determine
2507 * a suitable size for its wait_table.  So we use the maximum size now.
2508 *
2509 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2510 *
2511 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2512 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2513 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2514 *
2515 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2516 * or more by the traditional way. (See above).  It equals:
2517 *
2518 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2519 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2520 *    powerpc (64K page size)             : =  (32G +16M)byte.
2521 */
2522static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2523{
2524	return 4096UL;
2525}
2526#endif
2527
2528/*
2529 * This is an integer logarithm so that shifts can be used later
2530 * to extract the more random high bits from the multiplicative
2531 * hash function before the remainder is taken.
2532 */
2533static inline unsigned long wait_table_bits(unsigned long size)
2534{
2535	return ffz(~size);
2536}
2537
2538#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2539
2540/*
2541 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2542 * of blocks reserved is based on zone->pages_min. The memory within the
2543 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2544 * higher will lead to a bigger reserve which will get freed as contiguous
2545 * blocks as reclaim kicks in
2546 */
2547static void setup_zone_migrate_reserve(struct zone *zone)
2548{
2549	unsigned long start_pfn, pfn, end_pfn;
2550	struct page *page;
2551	unsigned long reserve, block_migratetype;
2552
2553	/* Get the start pfn, end pfn and the number of blocks to reserve */
2554	start_pfn = zone->zone_start_pfn;
2555	end_pfn = start_pfn + zone->spanned_pages;
2556	reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2557							pageblock_order;
2558
2559	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2560		if (!pfn_valid(pfn))
2561			continue;
2562		page = pfn_to_page(pfn);
2563
2564		/* Watch out for overlapping nodes */
2565		if (page_to_nid(page) != zone_to_nid(zone))
2566			continue;
2567
2568		/* Blocks with reserved pages will never free, skip them. */
2569		if (PageReserved(page))
2570			continue;
2571
2572		block_migratetype = get_pageblock_migratetype(page);
2573
2574		/* If this block is reserved, account for it */
2575		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2576			reserve--;
2577			continue;
2578		}
2579
2580		/* Suitable for reserving if this block is movable */
2581		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2582			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2583			move_freepages_block(zone, page, MIGRATE_RESERVE);
2584			reserve--;
2585			continue;
2586		}
2587
2588		/*
2589		 * If the reserve is met and this is a previous reserved block,
2590		 * take it back
2591		 */
2592		if (block_migratetype == MIGRATE_RESERVE) {
2593			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2594			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2595		}
2596	}
2597}
2598
2599/*
2600 * Initially all pages are reserved - free ones are freed
2601 * up by free_all_bootmem() once the early boot process is
2602 * done. Non-atomic initialization, single-pass.
2603 */
2604void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2605		unsigned long start_pfn, enum memmap_context context)
2606{
2607	struct page *page;
2608	unsigned long end_pfn = start_pfn + size;
2609	unsigned long pfn;
2610	struct zone *z;
2611
2612	if (highest_memmap_pfn < end_pfn - 1)
2613		highest_memmap_pfn = end_pfn - 1;
2614
2615	z = &NODE_DATA(nid)->node_zones[zone];
2616	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2617		/*
2618		 * There can be holes in boot-time mem_map[]s
2619		 * handed to this function.  They do not
2620		 * exist on hotplugged memory.
2621		 */
2622		if (context == MEMMAP_EARLY) {
2623			if (!early_pfn_valid(pfn))
2624				continue;
2625			if (!early_pfn_in_nid(pfn, nid))
2626				continue;
2627		}
2628		page = pfn_to_page(pfn);
2629		set_page_links(page, zone, nid, pfn);
2630		mminit_verify_page_links(page, zone, nid, pfn);
2631		init_page_count(page);
2632		reset_page_mapcount(page);
2633		SetPageReserved(page);
2634		/*
2635		 * Mark the block movable so that blocks are reserved for
2636		 * movable at startup. This will force kernel allocations
2637		 * to reserve their blocks rather than leaking throughout
2638		 * the address space during boot when many long-lived
2639		 * kernel allocations are made. Later some blocks near
2640		 * the start are marked MIGRATE_RESERVE by
2641		 * setup_zone_migrate_reserve()
2642		 *
2643		 * bitmap is created for zone's valid pfn range. but memmap
2644		 * can be created for invalid pages (for alignment)
2645		 * check here not to call set_pageblock_migratetype() against
2646		 * pfn out of zone.
2647		 */
2648		if ((z->zone_start_pfn <= pfn)
2649		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2650		    && !(pfn & (pageblock_nr_pages - 1)))
2651			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2652
2653		INIT_LIST_HEAD(&page->lru);
2654#ifdef WANT_PAGE_VIRTUAL
2655		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2656		if (!is_highmem_idx(zone))
2657			set_page_address(page, __va(pfn << PAGE_SHIFT));
2658#endif
2659	}
2660}
2661
2662static void __meminit zone_init_free_lists(struct zone *zone)
2663{
2664	int order, t;
2665	for_each_migratetype_order(order, t) {
2666		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2667		zone->free_area[order].nr_free = 0;
2668	}
2669}
2670
2671#ifndef __HAVE_ARCH_MEMMAP_INIT
2672#define memmap_init(size, nid, zone, start_pfn) \
2673	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2674#endif
2675
2676static int zone_batchsize(struct zone *zone)
2677{
2678#ifdef CONFIG_MMU
2679	int batch;
2680
2681	/*
2682	 * The per-cpu-pages pools are set to around 1000th of the
2683	 * size of the zone.  But no more than 1/2 of a meg.
2684	 *
2685	 * OK, so we don't know how big the cache is.  So guess.
2686	 */
2687	batch = zone->present_pages / 1024;
2688	if (batch * PAGE_SIZE > 512 * 1024)
2689		batch = (512 * 1024) / PAGE_SIZE;
2690	batch /= 4;		/* We effectively *= 4 below */
2691	if (batch < 1)
2692		batch = 1;
2693
2694	/*
2695	 * Clamp the batch to a 2^n - 1 value. Having a power
2696	 * of 2 value was found to be more likely to have
2697	 * suboptimal cache aliasing properties in some cases.
2698	 *
2699	 * For example if 2 tasks are alternately allocating
2700	 * batches of pages, one task can end up with a lot
2701	 * of pages of one half of the possible page colors
2702	 * and the other with pages of the other colors.
2703	 */
2704	batch = rounddown_pow_of_two(batch + batch/2) - 1;
2705
2706	return batch;
2707
2708#else
2709	/* The deferral and batching of frees should be suppressed under NOMMU
2710	 * conditions.
2711	 *
2712	 * The problem is that NOMMU needs to be able to allocate large chunks
2713	 * of contiguous memory as there's no hardware page translation to
2714	 * assemble apparent contiguous memory from discontiguous pages.
2715	 *
2716	 * Queueing large contiguous runs of pages for batching, however,
2717	 * causes the pages to actually be freed in smaller chunks.  As there
2718	 * can be a significant delay between the individual batches being
2719	 * recycled, this leads to the once large chunks of space being
2720	 * fragmented and becoming unavailable for high-order allocations.
2721	 */
2722	return 0;
2723#endif
2724}
2725
2726static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2727{
2728	struct per_cpu_pages *pcp;
2729
2730	memset(p, 0, sizeof(*p));
2731
2732	pcp = &p->pcp;
2733	pcp->count = 0;
2734	pcp->high = 6 * batch;
2735	pcp->batch = max(1UL, 1 * batch);
2736	INIT_LIST_HEAD(&pcp->list);
2737}
2738
2739/*
2740 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2741 * to the value high for the pageset p.
2742 */
2743
2744static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2745				unsigned long high)
2746{
2747	struct per_cpu_pages *pcp;
2748
2749	pcp = &p->pcp;
2750	pcp->high = high;
2751	pcp->batch = max(1UL, high/4);
2752	if ((high/4) > (PAGE_SHIFT * 8))
2753		pcp->batch = PAGE_SHIFT * 8;
2754}
2755
2756
2757#ifdef CONFIG_NUMA
2758/*
2759 * Boot pageset table. One per cpu which is going to be used for all
2760 * zones and all nodes. The parameters will be set in such a way
2761 * that an item put on a list will immediately be handed over to
2762 * the buddy list. This is safe since pageset manipulation is done
2763 * with interrupts disabled.
2764 *
2765 * Some NUMA counter updates may also be caught by the boot pagesets.
2766 *
2767 * The boot_pagesets must be kept even after bootup is complete for
2768 * unused processors and/or zones. They do play a role for bootstrapping
2769 * hotplugged processors.
2770 *
2771 * zoneinfo_show() and maybe other functions do
2772 * not check if the processor is online before following the pageset pointer.
2773 * Other parts of the kernel may not check if the zone is available.
2774 */
2775static struct per_cpu_pageset boot_pageset[NR_CPUS];
2776
2777/*
2778 * Dynamically allocate memory for the
2779 * per cpu pageset array in struct zone.
2780 */
2781static int __cpuinit process_zones(int cpu)
2782{
2783	struct zone *zone, *dzone;
2784	int node = cpu_to_node(cpu);
2785
2786	node_set_state(node, N_CPU);	/* this node has a cpu */
2787
2788	for_each_populated_zone(zone) {
2789		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2790					 GFP_KERNEL, node);
2791		if (!zone_pcp(zone, cpu))
2792			goto bad;
2793
2794		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2795
2796		if (percpu_pagelist_fraction)
2797			setup_pagelist_highmark(zone_pcp(zone, cpu),
2798			 	(zone->present_pages / percpu_pagelist_fraction));
2799	}
2800
2801	return 0;
2802bad:
2803	for_each_zone(dzone) {
2804		if (!populated_zone(dzone))
2805			continue;
2806		if (dzone == zone)
2807			break;
2808		kfree(zone_pcp(dzone, cpu));
2809		zone_pcp(dzone, cpu) = NULL;
2810	}
2811	return -ENOMEM;
2812}
2813
2814static inline void free_zone_pagesets(int cpu)
2815{
2816	struct zone *zone;
2817
2818	for_each_zone(zone) {
2819		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2820
2821		/* Free per_cpu_pageset if it is slab allocated */
2822		if (pset != &boot_pageset[cpu])
2823			kfree(pset);
2824		zone_pcp(zone, cpu) = NULL;
2825	}
2826}
2827
2828static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2829		unsigned long action,
2830		void *hcpu)
2831{
2832	int cpu = (long)hcpu;
2833	int ret = NOTIFY_OK;
2834
2835	switch (action) {
2836	case CPU_UP_PREPARE:
2837	case CPU_UP_PREPARE_FROZEN:
2838		if (process_zones(cpu))
2839			ret = NOTIFY_BAD;
2840		break;
2841	case CPU_UP_CANCELED:
2842	case CPU_UP_CANCELED_FROZEN:
2843	case CPU_DEAD:
2844	case CPU_DEAD_FROZEN:
2845		free_zone_pagesets(cpu);
2846		break;
2847	default:
2848		break;
2849	}
2850	return ret;
2851}
2852
2853static struct notifier_block __cpuinitdata pageset_notifier =
2854	{ &pageset_cpuup_callback, NULL, 0 };
2855
2856void __init setup_per_cpu_pageset(void)
2857{
2858	int err;
2859
2860	/* Initialize per_cpu_pageset for cpu 0.
2861	 * A cpuup callback will do this for every cpu
2862	 * as it comes online
2863	 */
2864	err = process_zones(smp_processor_id());
2865	BUG_ON(err);
2866	register_cpu_notifier(&pageset_notifier);
2867}
2868
2869#endif
2870
2871static noinline __init_refok
2872int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2873{
2874	int i;
2875	struct pglist_data *pgdat = zone->zone_pgdat;
2876	size_t alloc_size;
2877
2878	/*
2879	 * The per-page waitqueue mechanism uses hashed waitqueues
2880	 * per zone.
2881	 */
2882	zone->wait_table_hash_nr_entries =
2883		 wait_table_hash_nr_entries(zone_size_pages);
2884	zone->wait_table_bits =
2885		wait_table_bits(zone->wait_table_hash_nr_entries);
2886	alloc_size = zone->wait_table_hash_nr_entries
2887					* sizeof(wait_queue_head_t);
2888
2889	if (!slab_is_available()) {
2890		zone->wait_table = (wait_queue_head_t *)
2891			alloc_bootmem_node(pgdat, alloc_size);
2892	} else {
2893		/*
2894		 * This case means that a zone whose size was 0 gets new memory
2895		 * via memory hot-add.
2896		 * But it may be the case that a new node was hot-added.  In
2897		 * this case vmalloc() will not be able to use this new node's
2898		 * memory - this wait_table must be initialized to use this new
2899		 * node itself as well.
2900		 * To use this new node's memory, further consideration will be
2901		 * necessary.
2902		 */
2903		zone->wait_table = vmalloc(alloc_size);
2904	}
2905	if (!zone->wait_table)
2906		return -ENOMEM;
2907
2908	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2909		init_waitqueue_head(zone->wait_table + i);
2910
2911	return 0;
2912}
2913
2914static __meminit void zone_pcp_init(struct zone *zone)
2915{
2916	int cpu;
2917	unsigned long batch = zone_batchsize(zone);
2918
2919	for (cpu = 0; cpu < NR_CPUS; cpu++) {
2920#ifdef CONFIG_NUMA
2921		/* Early boot. Slab allocator not functional yet */
2922		zone_pcp(zone, cpu) = &boot_pageset[cpu];
2923		setup_pageset(&boot_pageset[cpu],0);
2924#else
2925		setup_pageset(zone_pcp(zone,cpu), batch);
2926#endif
2927	}
2928	if (zone->present_pages)
2929		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
2930			zone->name, zone->present_pages, batch);
2931}
2932
2933__meminit int init_currently_empty_zone(struct zone *zone,
2934					unsigned long zone_start_pfn,
2935					unsigned long size,
2936					enum memmap_context context)
2937{
2938	struct pglist_data *pgdat = zone->zone_pgdat;
2939	int ret;
2940	ret = zone_wait_table_init(zone, size);
2941	if (ret)
2942		return ret;
2943	pgdat->nr_zones = zone_idx(zone) + 1;
2944
2945	zone->zone_start_pfn = zone_start_pfn;
2946
2947	mminit_dprintk(MMINIT_TRACE, "memmap_init",
2948			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
2949			pgdat->node_id,
2950			(unsigned long)zone_idx(zone),
2951			zone_start_pfn, (zone_start_pfn + size));
2952
2953	zone_init_free_lists(zone);
2954
2955	return 0;
2956}
2957
2958#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2959/*
2960 * Basic iterator support. Return the first range of PFNs for a node
2961 * Note: nid == MAX_NUMNODES returns first region regardless of node
2962 */
2963static int __meminit first_active_region_index_in_nid(int nid)
2964{
2965	int i;
2966
2967	for (i = 0; i < nr_nodemap_entries; i++)
2968		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2969			return i;
2970
2971	return -1;
2972}
2973
2974/*
2975 * Basic iterator support. Return the next active range of PFNs for a node
2976 * Note: nid == MAX_NUMNODES returns next region regardless of node
2977 */
2978static int __meminit next_active_region_index_in_nid(int index, int nid)
2979{
2980	for (index = index + 1; index < nr_nodemap_entries; index++)
2981		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2982			return index;
2983
2984	return -1;
2985}
2986
2987#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2988/*
2989 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2990 * Architectures may implement their own version but if add_active_range()
2991 * was used and there are no special requirements, this is a convenient
2992 * alternative
2993 */
2994int __meminit __early_pfn_to_nid(unsigned long pfn)
2995{
2996	int i;
2997
2998	for (i = 0; i < nr_nodemap_entries; i++) {
2999		unsigned long start_pfn = early_node_map[i].start_pfn;
3000		unsigned long end_pfn = early_node_map[i].end_pfn;
3001
3002		if (start_pfn <= pfn && pfn < end_pfn)
3003			return early_node_map[i].nid;
3004	}
3005	/* This is a memory hole */
3006	return -1;
3007}
3008#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3009
3010int __meminit early_pfn_to_nid(unsigned long pfn)
3011{
3012	int nid;
3013
3014	nid = __early_pfn_to_nid(pfn);
3015	if (nid >= 0)
3016		return nid;
3017	/* just returns 0 */
3018	return 0;
3019}
3020
3021#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3022bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3023{
3024	int nid;
3025
3026	nid = __early_pfn_to_nid(pfn);
3027	if (nid >= 0 && nid != node)
3028		return false;
3029	return true;
3030}
3031#endif
3032
3033/* Basic iterator support to walk early_node_map[] */
3034#define for_each_active_range_index_in_nid(i, nid) \
3035	for (i = first_active_region_index_in_nid(nid); i != -1; \
3036				i = next_active_region_index_in_nid(i, nid))
3037
3038/**
3039 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3040 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3041 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3042 *
3043 * If an architecture guarantees that all ranges registered with
3044 * add_active_ranges() contain no holes and may be freed, this
3045 * this function may be used instead of calling free_bootmem() manually.
3046 */
3047void __init free_bootmem_with_active_regions(int nid,
3048						unsigned long max_low_pfn)
3049{
3050	int i;
3051
3052	for_each_active_range_index_in_nid(i, nid) {
3053		unsigned long size_pages = 0;
3054		unsigned long end_pfn = early_node_map[i].end_pfn;
3055
3056		if (early_node_map[i].start_pfn >= max_low_pfn)
3057			continue;
3058
3059		if (end_pfn > max_low_pfn)
3060			end_pfn = max_low_pfn;
3061
3062		size_pages = end_pfn - early_node_map[i].start_pfn;
3063		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3064				PFN_PHYS(early_node_map[i].start_pfn),
3065				size_pages << PAGE_SHIFT);
3066	}
3067}
3068
3069void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3070{
3071	int i;
3072	int ret;
3073
3074	for_each_active_range_index_in_nid(i, nid) {
3075		ret = work_fn(early_node_map[i].start_pfn,
3076			      early_node_map[i].end_pfn, data);
3077		if (ret)
3078			break;
3079	}
3080}
3081/**
3082 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3083 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3084 *
3085 * If an architecture guarantees that all ranges registered with
3086 * add_active_ranges() contain no holes and may be freed, this
3087 * function may be used instead of calling memory_present() manually.
3088 */
3089void __init sparse_memory_present_with_active_regions(int nid)
3090{
3091	int i;
3092
3093	for_each_active_range_index_in_nid(i, nid)
3094		memory_present(early_node_map[i].nid,
3095				early_node_map[i].start_pfn,
3096				early_node_map[i].end_pfn);
3097}
3098
3099/**
3100 * get_pfn_range_for_nid - Return the start and end page frames for a node
3101 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3102 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3103 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3104 *
3105 * It returns the start and end page frame of a node based on information
3106 * provided by an arch calling add_active_range(). If called for a node
3107 * with no available memory, a warning is printed and the start and end
3108 * PFNs will be 0.
3109 */
3110void __meminit get_pfn_range_for_nid(unsigned int nid,
3111			unsigned long *start_pfn, unsigned long *end_pfn)
3112{
3113	int i;
3114	*start_pfn = -1UL;
3115	*end_pfn = 0;
3116
3117	for_each_active_range_index_in_nid(i, nid) {
3118		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3119		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3120	}
3121
3122	if (*start_pfn == -1UL)
3123		*start_pfn = 0;
3124}
3125
3126/*
3127 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3128 * assumption is made that zones within a node are ordered in monotonic
3129 * increasing memory addresses so that the "highest" populated zone is used
3130 */
3131static void __init find_usable_zone_for_movable(void)
3132{
3133	int zone_index;
3134	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3135		if (zone_index == ZONE_MOVABLE)
3136			continue;
3137
3138		if (arch_zone_highest_possible_pfn[zone_index] >
3139				arch_zone_lowest_possible_pfn[zone_index])
3140			break;
3141	}
3142
3143	VM_BUG_ON(zone_index == -1);
3144	movable_zone = zone_index;
3145}
3146
3147/*
3148 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3149 * because it is sized independant of architecture. Unlike the other zones,
3150 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3151 * in each node depending on the size of each node and how evenly kernelcore
3152 * is distributed. This helper function adjusts the zone ranges
3153 * provided by the architecture for a given node by using the end of the
3154 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3155 * zones within a node are in order of monotonic increases memory addresses
3156 */
3157static void __meminit adjust_zone_range_for_zone_movable(int nid,
3158					unsigned long zone_type,
3159					unsigned long node_start_pfn,
3160					unsigned long node_end_pfn,
3161					unsigned long *zone_start_pfn,
3162					unsigned long *zone_end_pfn)
3163{
3164	/* Only adjust if ZONE_MOVABLE is on this node */
3165	if (zone_movable_pfn[nid]) {
3166		/* Size ZONE_MOVABLE */
3167		if (zone_type == ZONE_MOVABLE) {
3168			*zone_start_pfn = zone_movable_pfn[nid];
3169			*zone_end_pfn = min(node_end_pfn,
3170				arch_zone_highest_possible_pfn[movable_zone]);
3171
3172		/* Adjust for ZONE_MOVABLE starting within this range */
3173		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3174				*zone_end_pfn > zone_movable_pfn[nid]) {
3175			*zone_end_pfn = zone_movable_pfn[nid];
3176
3177		/* Check if this whole range is within ZONE_MOVABLE */
3178		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3179			*zone_start_pfn = *zone_end_pfn;
3180	}
3181}
3182
3183/*
3184 * Return the number of pages a zone spans in a node, including holes
3185 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3186 */
3187static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3188					unsigned long zone_type,
3189					unsigned long *ignored)
3190{
3191	unsigned long node_start_pfn, node_end_pfn;
3192	unsigned long zone_start_pfn, zone_end_pfn;
3193
3194	/* Get the start and end of the node and zone */
3195	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3196	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3197	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3198	adjust_zone_range_for_zone_movable(nid, zone_type,
3199				node_start_pfn, node_end_pfn,
3200				&zone_start_pfn, &zone_end_pfn);
3201
3202	/* Check that this node has pages within the zone's required range */
3203	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3204		return 0;
3205
3206	/* Move the zone boundaries inside the node if necessary */
3207	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3208	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3209
3210	/* Return the spanned pages */
3211	return zone_end_pfn - zone_start_pfn;
3212}
3213
3214/*
3215 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3216 * then all holes in the requested range will be accounted for.
3217 */
3218static unsigned long __meminit __absent_pages_in_range(int nid,
3219				unsigned long range_start_pfn,
3220				unsigned long range_end_pfn)
3221{
3222	int i = 0;
3223	unsigned long prev_end_pfn = 0, hole_pages = 0;
3224	unsigned long start_pfn;
3225
3226	/* Find the end_pfn of the first active range of pfns in the node */
3227	i = first_active_region_index_in_nid(nid);
3228	if (i == -1)
3229		return 0;
3230
3231	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3232
3233	/* Account for ranges before physical memory on this node */
3234	if (early_node_map[i].start_pfn > range_start_pfn)
3235		hole_pages = prev_end_pfn - range_start_pfn;
3236
3237	/* Find all holes for the zone within the node */
3238	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3239
3240		/* No need to continue if prev_end_pfn is outside the zone */
3241		if (prev_end_pfn >= range_end_pfn)
3242			break;
3243
3244		/* Make sure the end of the zone is not within the hole */
3245		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3246		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3247
3248		/* Update the hole size cound and move on */
3249		if (start_pfn > range_start_pfn) {
3250			BUG_ON(prev_end_pfn > start_pfn);
3251			hole_pages += start_pfn - prev_end_pfn;
3252		}
3253		prev_end_pfn = early_node_map[i].end_pfn;
3254	}
3255
3256	/* Account for ranges past physical memory on this node */
3257	if (range_end_pfn > prev_end_pfn)
3258		hole_pages += range_end_pfn -
3259				max(range_start_pfn, prev_end_pfn);
3260
3261	return hole_pages;
3262}
3263
3264/**
3265 * absent_pages_in_range - Return number of page frames in holes within a range
3266 * @start_pfn: The start PFN to start searching for holes
3267 * @end_pfn: The end PFN to stop searching for holes
3268 *
3269 * It returns the number of pages frames in memory holes within a range.
3270 */
3271unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3272							unsigned long end_pfn)
3273{
3274	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3275}
3276
3277/* Return the number of page frames in holes in a zone on a node */
3278static unsigned long __meminit zone_absent_pages_in_node(int nid,
3279					unsigned long zone_type,
3280					unsigned long *ignored)
3281{
3282	unsigned long node_start_pfn, node_end_pfn;
3283	unsigned long zone_start_pfn, zone_end_pfn;
3284
3285	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3286	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3287							node_start_pfn);
3288	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3289							node_end_pfn);
3290
3291	adjust_zone_range_for_zone_movable(nid, zone_type,
3292			node_start_pfn, node_end_pfn,
3293			&zone_start_pfn, &zone_end_pfn);
3294	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3295}
3296
3297#else
3298static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3299					unsigned long zone_type,
3300					unsigned long *zones_size)
3301{
3302	return zones_size[zone_type];
3303}
3304
3305static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3306						unsigned long zone_type,
3307						unsigned long *zholes_size)
3308{
3309	if (!zholes_size)
3310		return 0;
3311
3312	return zholes_size[zone_type];
3313}
3314
3315#endif
3316
3317static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3318		unsigned long *zones_size, unsigned long *zholes_size)
3319{
3320	unsigned long realtotalpages, totalpages = 0;
3321	enum zone_type i;
3322
3323	for (i = 0; i < MAX_NR_ZONES; i++)
3324		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3325								zones_size);
3326	pgdat->node_spanned_pages = totalpages;
3327
3328	realtotalpages = totalpages;
3329	for (i = 0; i < MAX_NR_ZONES; i++)
3330		realtotalpages -=
3331			zone_absent_pages_in_node(pgdat->node_id, i,
3332								zholes_size);
3333	pgdat->node_present_pages = realtotalpages;
3334	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3335							realtotalpages);
3336}
3337
3338#ifndef CONFIG_SPARSEMEM
3339/*
3340 * Calculate the size of the zone->blockflags rounded to an unsigned long
3341 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3342 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3343 * round what is now in bits to nearest long in bits, then return it in
3344 * bytes.
3345 */
3346static unsigned long __init usemap_size(unsigned long zonesize)
3347{
3348	unsigned long usemapsize;
3349
3350	usemapsize = roundup(zonesize, pageblock_nr_pages);
3351	usemapsize = usemapsize >> pageblock_order;
3352	usemapsize *= NR_PAGEBLOCK_BITS;
3353	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3354
3355	return usemapsize / 8;
3356}
3357
3358static void __init setup_usemap(struct pglist_data *pgdat,
3359				struct zone *zone, unsigned long zonesize)
3360{
3361	unsigned long usemapsize = usemap_size(zonesize);
3362	zone->pageblock_flags = NULL;
3363	if (usemapsize)
3364		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3365}
3366#else
3367static void inline setup_usemap(struct pglist_data *pgdat,
3368				struct zone *zone, unsigned long zonesize) {}
3369#endif /* CONFIG_SPARSEMEM */
3370
3371#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3372
3373/* Return a sensible default order for the pageblock size. */
3374static inline int pageblock_default_order(void)
3375{
3376	if (HPAGE_SHIFT > PAGE_SHIFT)
3377		return HUGETLB_PAGE_ORDER;
3378
3379	return MAX_ORDER-1;
3380}
3381
3382/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3383static inline void __init set_pageblock_order(unsigned int order)
3384{
3385	/* Check that pageblock_nr_pages has not already been setup */
3386	if (pageblock_order)
3387		return;
3388
3389	/*
3390	 * Assume the largest contiguous order of interest is a huge page.
3391	 * This value may be variable depending on boot parameters on IA64
3392	 */
3393	pageblock_order = order;
3394}
3395#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3396
3397/*
3398 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3399 * and pageblock_default_order() are unused as pageblock_order is set
3400 * at compile-time. See include/linux/pageblock-flags.h for the values of
3401 * pageblock_order based on the kernel config
3402 */
3403static inline int pageblock_default_order(unsigned int order)
3404{
3405	return MAX_ORDER-1;
3406}
3407#define set_pageblock_order(x)	do {} while (0)
3408
3409#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3410
3411/*
3412 * Set up the zone data structures:
3413 *   - mark all pages reserved
3414 *   - mark all memory queues empty
3415 *   - clear the memory bitmaps
3416 */
3417static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3418		unsigned long *zones_size, unsigned long *zholes_size)
3419{
3420	enum zone_type j;
3421	int nid = pgdat->node_id;
3422	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3423	int ret;
3424
3425	pgdat_resize_init(pgdat);
3426	pgdat->nr_zones = 0;
3427	init_waitqueue_head(&pgdat->kswapd_wait);
3428	pgdat->kswapd_max_order = 0;
3429	pgdat_page_cgroup_init(pgdat);
3430
3431	for (j = 0; j < MAX_NR_ZONES; j++) {
3432		struct zone *zone = pgdat->node_zones + j;
3433		unsigned long size, realsize, memmap_pages;
3434		enum lru_list l;
3435
3436		size = zone_spanned_pages_in_node(nid, j, zones_size);
3437		realsize = size - zone_absent_pages_in_node(nid, j,
3438								zholes_size);
3439
3440		/*
3441		 * Adjust realsize so that it accounts for how much memory
3442		 * is used by this zone for memmap. This affects the watermark
3443		 * and per-cpu initialisations
3444		 */
3445		memmap_pages =
3446			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3447		if (realsize >= memmap_pages) {
3448			realsize -= memmap_pages;
3449			if (memmap_pages)
3450				printk(KERN_DEBUG
3451				       "  %s zone: %lu pages used for memmap\n",
3452				       zone_names[j], memmap_pages);
3453		} else
3454			printk(KERN_WARNING
3455				"  %s zone: %lu pages exceeds realsize %lu\n",
3456				zone_names[j], memmap_pages, realsize);
3457
3458		/* Account for reserved pages */
3459		if (j == 0 && realsize > dma_reserve) {
3460			realsize -= dma_reserve;
3461			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3462					zone_names[0], dma_reserve);
3463		}
3464
3465		if (!is_highmem_idx(j))
3466			nr_kernel_pages += realsize;
3467		nr_all_pages += realsize;
3468
3469		zone->spanned_pages = size;
3470		zone->present_pages = realsize;
3471#ifdef CONFIG_NUMA
3472		zone->node = nid;
3473		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3474						/ 100;
3475		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3476#endif
3477		zone->name = zone_names[j];
3478		spin_lock_init(&zone->lock);
3479		spin_lock_init(&zone->lru_lock);
3480		zone_seqlock_init(zone);
3481		zone->zone_pgdat = pgdat;
3482
3483		zone->prev_priority = DEF_PRIORITY;
3484
3485		zone_pcp_init(zone);
3486		for_each_lru(l) {
3487			INIT_LIST_HEAD(&zone->lru[l].list);
3488			zone->lru[l].nr_scan = 0;
3489		}
3490		zone->reclaim_stat.recent_rotated[0] = 0;
3491		zone->reclaim_stat.recent_rotated[1] = 0;
3492		zone->reclaim_stat.recent_scanned[0] = 0;
3493		zone->reclaim_stat.recent_scanned[1] = 0;
3494		zap_zone_vm_stats(zone);
3495		zone->flags = 0;
3496		if (!size)
3497			continue;
3498
3499		set_pageblock_order(pageblock_default_order());
3500		setup_usemap(pgdat, zone, size);
3501		ret = init_currently_empty_zone(zone, zone_start_pfn,
3502						size, MEMMAP_EARLY);
3503		BUG_ON(ret);
3504		memmap_init(size, nid, j, zone_start_pfn);
3505		zone_start_pfn += size;
3506	}
3507}
3508
3509static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3510{
3511	/* Skip empty nodes */
3512	if (!pgdat->node_spanned_pages)
3513		return;
3514
3515#ifdef CONFIG_FLAT_NODE_MEM_MAP
3516	/* ia64 gets its own node_mem_map, before this, without bootmem */
3517	if (!pgdat->node_mem_map) {
3518		unsigned long size, start, end;
3519		struct page *map;
3520
3521		/*
3522		 * The zone's endpoints aren't required to be MAX_ORDER
3523		 * aligned but the node_mem_map endpoints must be in order
3524		 * for the buddy allocator to function correctly.
3525		 */
3526		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3527		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3528		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3529		size =  (end - start) * sizeof(struct page);
3530		map = alloc_remap(pgdat->node_id, size);
3531		if (!map)
3532			map = alloc_bootmem_node(pgdat, size);
3533		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3534	}
3535#ifndef CONFIG_NEED_MULTIPLE_NODES
3536	/*
3537	 * With no DISCONTIG, the global mem_map is just set as node 0's
3538	 */
3539	if (pgdat == NODE_DATA(0)) {
3540		mem_map = NODE_DATA(0)->node_mem_map;
3541#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3542		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3543			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3544#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3545	}
3546#endif
3547#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3548}
3549
3550void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3551		unsigned long node_start_pfn, unsigned long *zholes_size)
3552{
3553	pg_data_t *pgdat = NODE_DATA(nid);
3554
3555	pgdat->node_id = nid;
3556	pgdat->node_start_pfn = node_start_pfn;
3557	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3558
3559	alloc_node_mem_map(pgdat);
3560#ifdef CONFIG_FLAT_NODE_MEM_MAP
3561	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3562		nid, (unsigned long)pgdat,
3563		(unsigned long)pgdat->node_mem_map);
3564#endif
3565
3566	free_area_init_core(pgdat, zones_size, zholes_size);
3567}
3568
3569#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3570
3571#if MAX_NUMNODES > 1
3572/*
3573 * Figure out the number of possible node ids.
3574 */
3575static void __init setup_nr_node_ids(void)
3576{
3577	unsigned int node;
3578	unsigned int highest = 0;
3579
3580	for_each_node_mask(node, node_possible_map)
3581		highest = node;
3582	nr_node_ids = highest + 1;
3583}
3584#else
3585static inline void setup_nr_node_ids(void)
3586{
3587}
3588#endif
3589
3590/**
3591 * add_active_range - Register a range of PFNs backed by physical memory
3592 * @nid: The node ID the range resides on
3593 * @start_pfn: The start PFN of the available physical memory
3594 * @end_pfn: The end PFN of the available physical memory
3595 *
3596 * These ranges are stored in an early_node_map[] and later used by
3597 * free_area_init_nodes() to calculate zone sizes and holes. If the
3598 * range spans a memory hole, it is up to the architecture to ensure
3599 * the memory is not freed by the bootmem allocator. If possible
3600 * the range being registered will be merged with existing ranges.
3601 */
3602void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3603						unsigned long end_pfn)
3604{
3605	int i;
3606
3607	mminit_dprintk(MMINIT_TRACE, "memory_register",
3608			"Entering add_active_range(%d, %#lx, %#lx) "
3609			"%d entries of %d used\n",
3610			nid, start_pfn, end_pfn,
3611			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3612
3613	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3614
3615	/* Merge with existing active regions if possible */
3616	for (i = 0; i < nr_nodemap_entries; i++) {
3617		if (early_node_map[i].nid != nid)
3618			continue;
3619
3620		/* Skip if an existing region covers this new one */
3621		if (start_pfn >= early_node_map[i].start_pfn &&
3622				end_pfn <= early_node_map[i].end_pfn)
3623			return;
3624
3625		/* Merge forward if suitable */
3626		if (start_pfn <= early_node_map[i].end_pfn &&
3627				end_pfn > early_node_map[i].end_pfn) {
3628			early_node_map[i].end_pfn = end_pfn;
3629			return;
3630		}
3631
3632		/* Merge backward if suitable */
3633		if (start_pfn < early_node_map[i].end_pfn &&
3634				end_pfn >= early_node_map[i].start_pfn) {
3635			early_node_map[i].start_pfn = start_pfn;
3636			return;
3637		}
3638	}
3639
3640	/* Check that early_node_map is large enough */
3641	if (i >= MAX_ACTIVE_REGIONS) {
3642		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3643							MAX_ACTIVE_REGIONS);
3644		return;
3645	}
3646
3647	early_node_map[i].nid = nid;
3648	early_node_map[i].start_pfn = start_pfn;
3649	early_node_map[i].end_pfn = end_pfn;
3650	nr_nodemap_entries = i + 1;
3651}
3652
3653/**
3654 * remove_active_range - Shrink an existing registered range of PFNs
3655 * @nid: The node id the range is on that should be shrunk
3656 * @start_pfn: The new PFN of the range
3657 * @end_pfn: The new PFN of the range
3658 *
3659 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3660 * The map is kept near the end physical page range that has already been
3661 * registered. This function allows an arch to shrink an existing registered
3662 * range.
3663 */
3664void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3665				unsigned long end_pfn)
3666{
3667	int i, j;
3668	int removed = 0;
3669
3670	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3671			  nid, start_pfn, end_pfn);
3672
3673	/* Find the old active region end and shrink */
3674	for_each_active_range_index_in_nid(i, nid) {
3675		if (early_node_map[i].start_pfn >= start_pfn &&
3676		    early_node_map[i].end_pfn <= end_pfn) {
3677			/* clear it */
3678			early_node_map[i].start_pfn = 0;
3679			early_node_map[i].end_pfn = 0;
3680			removed = 1;
3681			continue;
3682		}
3683		if (early_node_map[i].start_pfn < start_pfn &&
3684		    early_node_map[i].end_pfn > start_pfn) {
3685			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3686			early_node_map[i].end_pfn = start_pfn;
3687			if (temp_end_pfn > end_pfn)
3688				add_active_range(nid, end_pfn, temp_end_pfn);
3689			continue;
3690		}
3691		if (early_node_map[i].start_pfn >= start_pfn &&
3692		    early_node_map[i].end_pfn > end_pfn &&
3693		    early_node_map[i].start_pfn < end_pfn) {
3694			early_node_map[i].start_pfn = end_pfn;
3695			continue;
3696		}
3697	}
3698
3699	if (!removed)
3700		return;
3701
3702	/* remove the blank ones */
3703	for (i = nr_nodemap_entries - 1; i > 0; i--) {
3704		if (early_node_map[i].nid != nid)
3705			continue;
3706		if (early_node_map[i].end_pfn)
3707			continue;
3708		/* we found it, get rid of it */
3709		for (j = i; j < nr_nodemap_entries - 1; j++)
3710			memcpy(&early_node_map[j], &early_node_map[j+1],
3711				sizeof(early_node_map[j]));
3712		j = nr_nodemap_entries - 1;
3713		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3714		nr_nodemap_entries--;
3715	}
3716}
3717
3718/**
3719 * remove_all_active_ranges - Remove all currently registered regions
3720 *
3721 * During discovery, it may be found that a table like SRAT is invalid
3722 * and an alternative discovery method must be used. This function removes
3723 * all currently registered regions.
3724 */
3725void __init remove_all_active_ranges(void)
3726{
3727	memset(early_node_map, 0, sizeof(early_node_map));
3728	nr_nodemap_entries = 0;
3729}
3730
3731/* Compare two active node_active_regions */
3732static int __init cmp_node_active_region(const void *a, const void *b)
3733{
3734	struct node_active_region *arange = (struct node_active_region *)a;
3735	struct node_active_region *brange = (struct node_active_region *)b;
3736
3737	/* Done this way to avoid overflows */
3738	if (arange->start_pfn > brange->start_pfn)
3739		return 1;
3740	if (arange->start_pfn < brange->start_pfn)
3741		return -1;
3742
3743	return 0;
3744}
3745
3746/* sort the node_map by start_pfn */
3747static void __init sort_node_map(void)
3748{
3749	sort(early_node_map, (size_t)nr_nodemap_entries,
3750			sizeof(struct node_active_region),
3751			cmp_node_active_region, NULL);
3752}
3753
3754/* Find the lowest pfn for a node */
3755static unsigned long __init find_min_pfn_for_node(int nid)
3756{
3757	int i;
3758	unsigned long min_pfn = ULONG_MAX;
3759
3760	/* Assuming a sorted map, the first range found has the starting pfn */
3761	for_each_active_range_index_in_nid(i, nid)
3762		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3763
3764	if (min_pfn == ULONG_MAX) {
3765		printk(KERN_WARNING
3766			"Could not find start_pfn for node %d\n", nid);
3767		return 0;
3768	}
3769
3770	return min_pfn;
3771}
3772
3773/**
3774 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3775 *
3776 * It returns the minimum PFN based on information provided via
3777 * add_active_range().
3778 */
3779unsigned long __init find_min_pfn_with_active_regions(void)
3780{
3781	return find_min_pfn_for_node(MAX_NUMNODES);
3782}
3783
3784/*
3785 * early_calculate_totalpages()
3786 * Sum pages in active regions for movable zone.
3787 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3788 */
3789static unsigned long __init early_calculate_totalpages(void)
3790{
3791	int i;
3792	unsigned long totalpages = 0;
3793
3794	for (i = 0; i < nr_nodemap_entries; i++) {
3795		unsigned long pages = early_node_map[i].end_pfn -
3796						early_node_map[i].start_pfn;
3797		totalpages += pages;
3798		if (pages)
3799			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3800	}
3801  	return totalpages;
3802}
3803
3804/*
3805 * Find the PFN the Movable zone begins in each node. Kernel memory
3806 * is spread evenly between nodes as long as the nodes have enough
3807 * memory. When they don't, some nodes will have more kernelcore than
3808 * others
3809 */
3810static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3811{
3812	int i, nid;
3813	unsigned long usable_startpfn;
3814	unsigned long kernelcore_node, kernelcore_remaining;
3815	unsigned long totalpages = early_calculate_totalpages();
3816	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3817
3818	/*
3819	 * If movablecore was specified, calculate what size of
3820	 * kernelcore that corresponds so that memory usable for
3821	 * any allocation type is evenly spread. If both kernelcore
3822	 * and movablecore are specified, then the value of kernelcore
3823	 * will be used for required_kernelcore if it's greater than
3824	 * what movablecore would have allowed.
3825	 */
3826	if (required_movablecore) {
3827		unsigned long corepages;
3828
3829		/*
3830		 * Round-up so that ZONE_MOVABLE is at least as large as what
3831		 * was requested by the user
3832		 */
3833		required_movablecore =
3834			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3835		corepages = totalpages - required_movablecore;
3836
3837		required_kernelcore = max(required_kernelcore, corepages);
3838	}
3839
3840	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
3841	if (!required_kernelcore)
3842		return;
3843
3844	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3845	find_usable_zone_for_movable();
3846	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3847
3848restart:
3849	/* Spread kernelcore memory as evenly as possible throughout nodes */
3850	kernelcore_node = required_kernelcore / usable_nodes;
3851	for_each_node_state(nid, N_HIGH_MEMORY) {
3852		/*
3853		 * Recalculate kernelcore_node if the division per node
3854		 * now exceeds what is necessary to satisfy the requested
3855		 * amount of memory for the kernel
3856		 */
3857		if (required_kernelcore < kernelcore_node)
3858			kernelcore_node = required_kernelcore / usable_nodes;
3859
3860		/*
3861		 * As the map is walked, we track how much memory is usable
3862		 * by the kernel using kernelcore_remaining. When it is
3863		 * 0, the rest of the node is usable by ZONE_MOVABLE
3864		 */
3865		kernelcore_remaining = kernelcore_node;
3866
3867		/* Go through each range of PFNs within this node */
3868		for_each_active_range_index_in_nid(i, nid) {
3869			unsigned long start_pfn, end_pfn;
3870			unsigned long size_pages;
3871
3872			start_pfn = max(early_node_map[i].start_pfn,
3873						zone_movable_pfn[nid]);
3874			end_pfn = early_node_map[i].end_pfn;
3875			if (start_pfn >= end_pfn)
3876				continue;
3877
3878			/* Account for what is only usable for kernelcore */
3879			if (start_pfn < usable_startpfn) {
3880				unsigned long kernel_pages;
3881				kernel_pages = min(end_pfn, usable_startpfn)
3882								- start_pfn;
3883
3884				kernelcore_remaining -= min(kernel_pages,
3885							kernelcore_remaining);
3886				required_kernelcore -= min(kernel_pages,
3887							required_kernelcore);
3888
3889				/* Continue if range is now fully accounted */
3890				if (end_pfn <= usable_startpfn) {
3891
3892					/*
3893					 * Push zone_movable_pfn to the end so
3894					 * that if we have to rebalance
3895					 * kernelcore across nodes, we will
3896					 * not double account here
3897					 */
3898					zone_movable_pfn[nid] = end_pfn;
3899					continue;
3900				}
3901				start_pfn = usable_startpfn;
3902			}
3903
3904			/*
3905			 * The usable PFN range for ZONE_MOVABLE is from
3906			 * start_pfn->end_pfn. Calculate size_pages as the
3907			 * number of pages used as kernelcore
3908			 */
3909			size_pages = end_pfn - start_pfn;
3910			if (size_pages > kernelcore_remaining)
3911				size_pages = kernelcore_remaining;
3912			zone_movable_pfn[nid] = start_pfn + size_pages;
3913
3914			/*
3915			 * Some kernelcore has been met, update counts and
3916			 * break if the kernelcore for this node has been
3917			 * satisified
3918			 */
3919			required_kernelcore -= min(required_kernelcore,
3920								size_pages);
3921			kernelcore_remaining -= size_pages;
3922			if (!kernelcore_remaining)
3923				break;
3924		}
3925	}
3926
3927	/*
3928	 * If there is still required_kernelcore, we do another pass with one
3929	 * less node in the count. This will push zone_movable_pfn[nid] further
3930	 * along on the nodes that still have memory until kernelcore is
3931	 * satisified
3932	 */
3933	usable_nodes--;
3934	if (usable_nodes && required_kernelcore > usable_nodes)
3935		goto restart;
3936
3937	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3938	for (nid = 0; nid < MAX_NUMNODES; nid++)
3939		zone_movable_pfn[nid] =
3940			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3941}
3942
3943/* Any regular memory on that node ? */
3944static void check_for_regular_memory(pg_data_t *pgdat)
3945{
3946#ifdef CONFIG_HIGHMEM
3947	enum zone_type zone_type;
3948
3949	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3950		struct zone *zone = &pgdat->node_zones[zone_type];
3951		if (zone->present_pages)
3952			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3953	}
3954#endif
3955}
3956
3957/**
3958 * free_area_init_nodes - Initialise all pg_data_t and zone data
3959 * @max_zone_pfn: an array of max PFNs for each zone
3960 *
3961 * This will call free_area_init_node() for each active node in the system.
3962 * Using the page ranges provided by add_active_range(), the size of each
3963 * zone in each node and their holes is calculated. If the maximum PFN
3964 * between two adjacent zones match, it is assumed that the zone is empty.
3965 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3966 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3967 * starts where the previous one ended. For example, ZONE_DMA32 starts
3968 * at arch_max_dma_pfn.
3969 */
3970void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3971{
3972	unsigned long nid;
3973	int i;
3974
3975	/* Sort early_node_map as initialisation assumes it is sorted */
3976	sort_node_map();
3977
3978	/* Record where the zone boundaries are */
3979	memset(arch_zone_lowest_possible_pfn, 0,
3980				sizeof(arch_zone_lowest_possible_pfn));
3981	memset(arch_zone_highest_possible_pfn, 0,
3982				sizeof(arch_zone_highest_possible_pfn));
3983	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3984	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3985	for (i = 1; i < MAX_NR_ZONES; i++) {
3986		if (i == ZONE_MOVABLE)
3987			continue;
3988		arch_zone_lowest_possible_pfn[i] =
3989			arch_zone_highest_possible_pfn[i-1];
3990		arch_zone_highest_possible_pfn[i] =
3991			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3992	}
3993	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3994	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3995
3996	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
3997	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3998	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
3999
4000	/* Print out the zone ranges */
4001	printk("Zone PFN ranges:\n");
4002	for (i = 0; i < MAX_NR_ZONES; i++) {
4003		if (i == ZONE_MOVABLE)
4004			continue;
4005		printk("  %-8s %0#10lx -> %0#10lx\n",
4006				zone_names[i],
4007				arch_zone_lowest_possible_pfn[i],
4008				arch_zone_highest_possible_pfn[i]);
4009	}
4010
4011	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4012	printk("Movable zone start PFN for each node\n");
4013	for (i = 0; i < MAX_NUMNODES; i++) {
4014		if (zone_movable_pfn[i])
4015			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4016	}
4017
4018	/* Print out the early_node_map[] */
4019	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4020	for (i = 0; i < nr_nodemap_entries; i++)
4021		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4022						early_node_map[i].start_pfn,
4023						early_node_map[i].end_pfn);
4024
4025	/* Initialise every node */
4026	mminit_verify_pageflags_layout();
4027	setup_nr_node_ids();
4028	for_each_online_node(nid) {
4029		pg_data_t *pgdat = NODE_DATA(nid);
4030		free_area_init_node(nid, NULL,
4031				find_min_pfn_for_node(nid), NULL);
4032
4033		/* Any memory on that node */
4034		if (pgdat->node_present_pages)
4035			node_set_state(nid, N_HIGH_MEMORY);
4036		check_for_regular_memory(pgdat);
4037	}
4038}
4039
4040static int __init cmdline_parse_core(char *p, unsigned long *core)
4041{
4042	unsigned long long coremem;
4043	if (!p)
4044		return -EINVAL;
4045
4046	coremem = memparse(p, &p);
4047	*core = coremem >> PAGE_SHIFT;
4048
4049	/* Paranoid check that UL is enough for the coremem value */
4050	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4051
4052	return 0;
4053}
4054
4055/*
4056 * kernelcore=size sets the amount of memory for use for allocations that
4057 * cannot be reclaimed or migrated.
4058 */
4059static int __init cmdline_parse_kernelcore(char *p)
4060{
4061	return cmdline_parse_core(p, &required_kernelcore);
4062}
4063
4064/*
4065 * movablecore=size sets the amount of memory for use for allocations that
4066 * can be reclaimed or migrated.
4067 */
4068static int __init cmdline_parse_movablecore(char *p)
4069{
4070	return cmdline_parse_core(p, &required_movablecore);
4071}
4072
4073early_param("kernelcore", cmdline_parse_kernelcore);
4074early_param("movablecore", cmdline_parse_movablecore);
4075
4076#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4077
4078/**
4079 * set_dma_reserve - set the specified number of pages reserved in the first zone
4080 * @new_dma_reserve: The number of pages to mark reserved
4081 *
4082 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4083 * In the DMA zone, a significant percentage may be consumed by kernel image
4084 * and other unfreeable allocations which can skew the watermarks badly. This
4085 * function may optionally be used to account for unfreeable pages in the
4086 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4087 * smaller per-cpu batchsize.
4088 */
4089void __init set_dma_reserve(unsigned long new_dma_reserve)
4090{
4091	dma_reserve = new_dma_reserve;
4092}
4093
4094#ifndef CONFIG_NEED_MULTIPLE_NODES
4095struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4096EXPORT_SYMBOL(contig_page_data);
4097#endif
4098
4099void __init free_area_init(unsigned long *zones_size)
4100{
4101	free_area_init_node(0, zones_size,
4102			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4103}
4104
4105static int page_alloc_cpu_notify(struct notifier_block *self,
4106				 unsigned long action, void *hcpu)
4107{
4108	int cpu = (unsigned long)hcpu;
4109
4110	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4111		drain_pages(cpu);
4112
4113		/*
4114		 * Spill the event counters of the dead processor
4115		 * into the current processors event counters.
4116		 * This artificially elevates the count of the current
4117		 * processor.
4118		 */
4119		vm_events_fold_cpu(cpu);
4120
4121		/*
4122		 * Zero the differential counters of the dead processor
4123		 * so that the vm statistics are consistent.
4124		 *
4125		 * This is only okay since the processor is dead and cannot
4126		 * race with what we are doing.
4127		 */
4128		refresh_cpu_vm_stats(cpu);
4129	}
4130	return NOTIFY_OK;
4131}
4132
4133void __init page_alloc_init(void)
4134{
4135	hotcpu_notifier(page_alloc_cpu_notify, 0);
4136}
4137
4138/*
4139 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4140 *	or min_free_kbytes changes.
4141 */
4142static void calculate_totalreserve_pages(void)
4143{
4144	struct pglist_data *pgdat;
4145	unsigned long reserve_pages = 0;
4146	enum zone_type i, j;
4147
4148	for_each_online_pgdat(pgdat) {
4149		for (i = 0; i < MAX_NR_ZONES; i++) {
4150			struct zone *zone = pgdat->node_zones + i;
4151			unsigned long max = 0;
4152
4153			/* Find valid and maximum lowmem_reserve in the zone */
4154			for (j = i; j < MAX_NR_ZONES; j++) {
4155				if (zone->lowmem_reserve[j] > max)
4156					max = zone->lowmem_reserve[j];
4157			}
4158
4159			/* we treat pages_high as reserved pages. */
4160			max += zone->pages_high;
4161
4162			if (max > zone->present_pages)
4163				max = zone->present_pages;
4164			reserve_pages += max;
4165		}
4166	}
4167	totalreserve_pages = reserve_pages;
4168}
4169
4170/*
4171 * setup_per_zone_lowmem_reserve - called whenever
4172 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4173 *	has a correct pages reserved value, so an adequate number of
4174 *	pages are left in the zone after a successful __alloc_pages().
4175 */
4176static void setup_per_zone_lowmem_reserve(void)
4177{
4178	struct pglist_data *pgdat;
4179	enum zone_type j, idx;
4180
4181	for_each_online_pgdat(pgdat) {
4182		for (j = 0; j < MAX_NR_ZONES; j++) {
4183			struct zone *zone = pgdat->node_zones + j;
4184			unsigned long present_pages = zone->present_pages;
4185
4186			zone->lowmem_reserve[j] = 0;
4187
4188			idx = j;
4189			while (idx) {
4190				struct zone *lower_zone;
4191
4192				idx--;
4193
4194				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4195					sysctl_lowmem_reserve_ratio[idx] = 1;
4196
4197				lower_zone = pgdat->node_zones + idx;
4198				lower_zone->lowmem_reserve[j] = present_pages /
4199					sysctl_lowmem_reserve_ratio[idx];
4200				present_pages += lower_zone->present_pages;
4201			}
4202		}
4203	}
4204
4205	/* update totalreserve_pages */
4206	calculate_totalreserve_pages();
4207}
4208
4209/**
4210 * setup_per_zone_pages_min - called when min_free_kbytes changes.
4211 *
4212 * Ensures that the pages_{min,low,high} values for each zone are set correctly
4213 * with respect to min_free_kbytes.
4214 */
4215void setup_per_zone_pages_min(void)
4216{
4217	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4218	unsigned long lowmem_pages = 0;
4219	struct zone *zone;
4220	unsigned long flags;
4221
4222	/* Calculate total number of !ZONE_HIGHMEM pages */
4223	for_each_zone(zone) {
4224		if (!is_highmem(zone))
4225			lowmem_pages += zone->present_pages;
4226	}
4227
4228	for_each_zone(zone) {
4229		u64 tmp;
4230
4231		spin_lock_irqsave(&zone->lock, flags);
4232		tmp = (u64)pages_min * zone->present_pages;
4233		do_div(tmp, lowmem_pages);
4234		if (is_highmem(zone)) {
4235			/*
4236			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4237			 * need highmem pages, so cap pages_min to a small
4238			 * value here.
4239			 *
4240			 * The (pages_high-pages_low) and (pages_low-pages_min)
4241			 * deltas controls asynch page reclaim, and so should
4242			 * not be capped for highmem.
4243			 */
4244			int min_pages;
4245
4246			min_pages = zone->present_pages / 1024;
4247			if (min_pages < SWAP_CLUSTER_MAX)
4248				min_pages = SWAP_CLUSTER_MAX;
4249			if (min_pages > 128)
4250				min_pages = 128;
4251			zone->pages_min = min_pages;
4252		} else {
4253			/*
4254			 * If it's a lowmem zone, reserve a number of pages
4255			 * proportionate to the zone's size.
4256			 */
4257			zone->pages_min = tmp;
4258		}
4259
4260		zone->pages_low   = zone->pages_min + (tmp >> 2);
4261		zone->pages_high  = zone->pages_min + (tmp >> 1);
4262		setup_zone_migrate_reserve(zone);
4263		spin_unlock_irqrestore(&zone->lock, flags);
4264	}
4265
4266	/* update totalreserve_pages */
4267	calculate_totalreserve_pages();
4268}
4269
4270/**
4271 * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
4272 *
4273 * The inactive anon list should be small enough that the VM never has to
4274 * do too much work, but large enough that each inactive page has a chance
4275 * to be referenced again before it is swapped out.
4276 *
4277 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4278 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4279 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4280 * the anonymous pages are kept on the inactive list.
4281 *
4282 * total     target    max
4283 * memory    ratio     inactive anon
4284 * -------------------------------------
4285 *   10MB       1         5MB
4286 *  100MB       1        50MB
4287 *    1GB       3       250MB
4288 *   10GB      10       0.9GB
4289 *  100GB      31         3GB
4290 *    1TB     101        10GB
4291 *   10TB     320        32GB
4292 */
4293static void setup_per_zone_inactive_ratio(void)
4294{
4295	struct zone *zone;
4296
4297	for_each_zone(zone) {
4298		unsigned int gb, ratio;
4299
4300		/* Zone size in gigabytes */
4301		gb = zone->present_pages >> (30 - PAGE_SHIFT);
4302		ratio = int_sqrt(10 * gb);
4303		if (!ratio)
4304			ratio = 1;
4305
4306		zone->inactive_ratio = ratio;
4307	}
4308}
4309
4310/*
4311 * Initialise min_free_kbytes.
4312 *
4313 * For small machines we want it small (128k min).  For large machines
4314 * we want it large (64MB max).  But it is not linear, because network
4315 * bandwidth does not increase linearly with machine size.  We use
4316 *
4317 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4318 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4319 *
4320 * which yields
4321 *
4322 * 16MB:	512k
4323 * 32MB:	724k
4324 * 64MB:	1024k
4325 * 128MB:	1448k
4326 * 256MB:	2048k
4327 * 512MB:	2896k
4328 * 1024MB:	4096k
4329 * 2048MB:	5792k
4330 * 4096MB:	8192k
4331 * 8192MB:	11584k
4332 * 16384MB:	16384k
4333 */
4334static int __init init_per_zone_pages_min(void)
4335{
4336	unsigned long lowmem_kbytes;
4337
4338	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4339
4340	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4341	if (min_free_kbytes < 128)
4342		min_free_kbytes = 128;
4343	if (min_free_kbytes > 65536)
4344		min_free_kbytes = 65536;
4345	setup_per_zone_pages_min();
4346	setup_per_zone_lowmem_reserve();
4347	setup_per_zone_inactive_ratio();
4348	return 0;
4349}
4350module_init(init_per_zone_pages_min)
4351
4352/*
4353 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4354 *	that we can call two helper functions whenever min_free_kbytes
4355 *	changes.
4356 */
4357int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4358	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4359{
4360	proc_dointvec(table, write, file, buffer, length, ppos);
4361	if (write)
4362		setup_per_zone_pages_min();
4363	return 0;
4364}
4365
4366#ifdef CONFIG_NUMA
4367int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4368	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4369{
4370	struct zone *zone;
4371	int rc;
4372
4373	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4374	if (rc)
4375		return rc;
4376
4377	for_each_zone(zone)
4378		zone->min_unmapped_pages = (zone->present_pages *
4379				sysctl_min_unmapped_ratio) / 100;
4380	return 0;
4381}
4382
4383int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4384	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4385{
4386	struct zone *zone;
4387	int rc;
4388
4389	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4390	if (rc)
4391		return rc;
4392
4393	for_each_zone(zone)
4394		zone->min_slab_pages = (zone->present_pages *
4395				sysctl_min_slab_ratio) / 100;
4396	return 0;
4397}
4398#endif
4399
4400/*
4401 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4402 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4403 *	whenever sysctl_lowmem_reserve_ratio changes.
4404 *
4405 * The reserve ratio obviously has absolutely no relation with the
4406 * pages_min watermarks. The lowmem reserve ratio can only make sense
4407 * if in function of the boot time zone sizes.
4408 */
4409int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4410	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4411{
4412	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4413	setup_per_zone_lowmem_reserve();
4414	return 0;
4415}
4416
4417/*
4418 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4419 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4420 * can have before it gets flushed back to buddy allocator.
4421 */
4422
4423int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4424	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4425{
4426	struct zone *zone;
4427	unsigned int cpu;
4428	int ret;
4429
4430	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4431	if (!write || (ret == -EINVAL))
4432		return ret;
4433	for_each_zone(zone) {
4434		for_each_online_cpu(cpu) {
4435			unsigned long  high;
4436			high = zone->present_pages / percpu_pagelist_fraction;
4437			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4438		}
4439	}
4440	return 0;
4441}
4442
4443int hashdist = HASHDIST_DEFAULT;
4444
4445#ifdef CONFIG_NUMA
4446static int __init set_hashdist(char *str)
4447{
4448	if (!str)
4449		return 0;
4450	hashdist = simple_strtoul(str, &str, 0);
4451	return 1;
4452}
4453__setup("hashdist=", set_hashdist);
4454#endif
4455
4456/*
4457 * allocate a large system hash table from bootmem
4458 * - it is assumed that the hash table must contain an exact power-of-2
4459 *   quantity of entries
4460 * - limit is the number of hash buckets, not the total allocation size
4461 */
4462void *__init alloc_large_system_hash(const char *tablename,
4463				     unsigned long bucketsize,
4464				     unsigned long numentries,
4465				     int scale,
4466				     int flags,
4467				     unsigned int *_hash_shift,
4468				     unsigned int *_hash_mask,
4469				     unsigned long limit)
4470{
4471	unsigned long long max = limit;
4472	unsigned long log2qty, size;
4473	void *table = NULL;
4474
4475	/* allow the kernel cmdline to have a say */
4476	if (!numentries) {
4477		/* round applicable memory size up to nearest megabyte */
4478		numentries = nr_kernel_pages;
4479		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4480		numentries >>= 20 - PAGE_SHIFT;
4481		numentries <<= 20 - PAGE_SHIFT;
4482
4483		/* limit to 1 bucket per 2^scale bytes of low memory */
4484		if (scale > PAGE_SHIFT)
4485			numentries >>= (scale - PAGE_SHIFT);
4486		else
4487			numentries <<= (PAGE_SHIFT - scale);
4488
4489		/* Make sure we've got at least a 0-order allocation.. */
4490		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4491			numentries = PAGE_SIZE / bucketsize;
4492	}
4493	numentries = roundup_pow_of_two(numentries);
4494
4495	/* limit allocation size to 1/16 total memory by default */
4496	if (max == 0) {
4497		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4498		do_div(max, bucketsize);
4499	}
4500
4501	if (numentries > max)
4502		numentries = max;
4503
4504	log2qty = ilog2(numentries);
4505
4506	do {
4507		size = bucketsize << log2qty;
4508		if (flags & HASH_EARLY)
4509			table = alloc_bootmem_nopanic(size);
4510		else if (hashdist)
4511			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4512		else {
4513			unsigned long order = get_order(size);
4514
4515			if (order < MAX_ORDER)
4516				table = (void *)__get_free_pages(GFP_ATOMIC,
4517								order);
4518			/*
4519			 * If bucketsize is not a power-of-two, we may free
4520			 * some pages at the end of hash table.
4521			 */
4522			if (table) {
4523				unsigned long alloc_end = (unsigned long)table +
4524						(PAGE_SIZE << order);
4525				unsigned long used = (unsigned long)table +
4526						PAGE_ALIGN(size);
4527				split_page(virt_to_page(table), order);
4528				while (used < alloc_end) {
4529					free_page(used);
4530					used += PAGE_SIZE;
4531				}
4532			}
4533		}
4534	} while (!table && size > PAGE_SIZE && --log2qty);
4535
4536	if (!table)
4537		panic("Failed to allocate %s hash table\n", tablename);
4538
4539	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4540	       tablename,
4541	       (1U << log2qty),
4542	       ilog2(size) - PAGE_SHIFT,
4543	       size);
4544
4545	if (_hash_shift)
4546		*_hash_shift = log2qty;
4547	if (_hash_mask)
4548		*_hash_mask = (1 << log2qty) - 1;
4549
4550	/*
4551	 * If hashdist is set, the table allocation is done with __vmalloc()
4552	 * which invokes the kmemleak_alloc() callback. This function may also
4553	 * be called before the slab and kmemleak are initialised when
4554	 * kmemleak simply buffers the request to be executed later
4555	 * (GFP_ATOMIC flag ignored in this case).
4556	 */
4557	if (!hashdist)
4558		kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4559
4560	return table;
4561}
4562
4563/* Return a pointer to the bitmap storing bits affecting a block of pages */
4564static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4565							unsigned long pfn)
4566{
4567#ifdef CONFIG_SPARSEMEM
4568	return __pfn_to_section(pfn)->pageblock_flags;
4569#else
4570	return zone->pageblock_flags;
4571#endif /* CONFIG_SPARSEMEM */
4572}
4573
4574static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4575{
4576#ifdef CONFIG_SPARSEMEM
4577	pfn &= (PAGES_PER_SECTION-1);
4578	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4579#else
4580	pfn = pfn - zone->zone_start_pfn;
4581	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4582#endif /* CONFIG_SPARSEMEM */
4583}
4584
4585/**
4586 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4587 * @page: The page within the block of interest
4588 * @start_bitidx: The first bit of interest to retrieve
4589 * @end_bitidx: The last bit of interest
4590 * returns pageblock_bits flags
4591 */
4592unsigned long get_pageblock_flags_group(struct page *page,
4593					int start_bitidx, int end_bitidx)
4594{
4595	struct zone *zone;
4596	unsigned long *bitmap;
4597	unsigned long pfn, bitidx;
4598	unsigned long flags = 0;
4599	unsigned long value = 1;
4600
4601	zone = page_zone(page);
4602	pfn = page_to_pfn(page);
4603	bitmap = get_pageblock_bitmap(zone, pfn);
4604	bitidx = pfn_to_bitidx(zone, pfn);
4605
4606	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4607		if (test_bit(bitidx + start_bitidx, bitmap))
4608			flags |= value;
4609
4610	return flags;
4611}
4612
4613/**
4614 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4615 * @page: The page within the block of interest
4616 * @start_bitidx: The first bit of interest
4617 * @end_bitidx: The last bit of interest
4618 * @flags: The flags to set
4619 */
4620void set_pageblock_flags_group(struct page *page, unsigned long flags,
4621					int start_bitidx, int end_bitidx)
4622{
4623	struct zone *zone;
4624	unsigned long *bitmap;
4625	unsigned long pfn, bitidx;
4626	unsigned long value = 1;
4627
4628	zone = page_zone(page);
4629	pfn = page_to_pfn(page);
4630	bitmap = get_pageblock_bitmap(zone, pfn);
4631	bitidx = pfn_to_bitidx(zone, pfn);
4632	VM_BUG_ON(pfn < zone->zone_start_pfn);
4633	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4634
4635	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4636		if (flags & value)
4637			__set_bit(bitidx + start_bitidx, bitmap);
4638		else
4639			__clear_bit(bitidx + start_bitidx, bitmap);
4640}
4641
4642/*
4643 * This is designed as sub function...plz see page_isolation.c also.
4644 * set/clear page block's type to be ISOLATE.
4645 * page allocater never alloc memory from ISOLATE block.
4646 */
4647
4648int set_migratetype_isolate(struct page *page)
4649{
4650	struct zone *zone;
4651	unsigned long flags;
4652	int ret = -EBUSY;
4653
4654	zone = page_zone(page);
4655	spin_lock_irqsave(&zone->lock, flags);
4656	/*
4657	 * In future, more migrate types will be able to be isolation target.
4658	 */
4659	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4660		goto out;
4661	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4662	move_freepages_block(zone, page, MIGRATE_ISOLATE);
4663	ret = 0;
4664out:
4665	spin_unlock_irqrestore(&zone->lock, flags);
4666	if (!ret)
4667		drain_all_pages();
4668	return ret;
4669}
4670
4671void unset_migratetype_isolate(struct page *page)
4672{
4673	struct zone *zone;
4674	unsigned long flags;
4675	zone = page_zone(page);
4676	spin_lock_irqsave(&zone->lock, flags);
4677	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4678		goto out;
4679	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4680	move_freepages_block(zone, page, MIGRATE_MOVABLE);
4681out:
4682	spin_unlock_irqrestore(&zone->lock, flags);
4683}
4684
4685#ifdef CONFIG_MEMORY_HOTREMOVE
4686/*
4687 * All pages in the range must be isolated before calling this.
4688 */
4689void
4690__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4691{
4692	struct page *page;
4693	struct zone *zone;
4694	int order, i;
4695	unsigned long pfn;
4696	unsigned long flags;
4697	/* find the first valid pfn */
4698	for (pfn = start_pfn; pfn < end_pfn; pfn++)
4699		if (pfn_valid(pfn))
4700			break;
4701	if (pfn == end_pfn)
4702		return;
4703	zone = page_zone(pfn_to_page(pfn));
4704	spin_lock_irqsave(&zone->lock, flags);
4705	pfn = start_pfn;
4706	while (pfn < end_pfn) {
4707		if (!pfn_valid(pfn)) {
4708			pfn++;
4709			continue;
4710		}
4711		page = pfn_to_page(pfn);
4712		BUG_ON(page_count(page));
4713		BUG_ON(!PageBuddy(page));
4714		order = page_order(page);
4715#ifdef CONFIG_DEBUG_VM
4716		printk(KERN_INFO "remove from free list %lx %d %lx\n",
4717		       pfn, 1 << order, end_pfn);
4718#endif
4719		list_del(&page->lru);
4720		rmv_page_order(page);
4721		zone->free_area[order].nr_free--;
4722		__mod_zone_page_state(zone, NR_FREE_PAGES,
4723				      - (1UL << order));
4724		for (i = 0; i < (1 << order); i++)
4725			SetPageReserved((page+i));
4726		pfn += (1 << order);
4727	}
4728	spin_unlock_irqrestore(&zone->lock, flags);
4729}
4730#endif
4731