page_alloc.c revision 985737cf2ea096ea946aed82c7484d40defc71a8
1/*
2 *  linux/mm/page_alloc.c
3 *
4 *  Manages the free list, the system allocates free pages here.
5 *  Note that kmalloc() lives in slab.c
6 *
7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8 *  Swap reorganised 29.12.95, Stephen Tweedie
9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
31#include <linux/oom.h>
32#include <linux/notifier.h>
33#include <linux/topology.h>
34#include <linux/sysctl.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
37#include <linux/memory_hotplug.h>
38#include <linux/nodemask.h>
39#include <linux/vmalloc.h>
40#include <linux/mempolicy.h>
41#include <linux/stop_machine.h>
42#include <linux/sort.h>
43#include <linux/pfn.h>
44#include <linux/backing-dev.h>
45#include <linux/fault-inject.h>
46#include <linux/page-isolation.h>
47#include <linux/memcontrol.h>
48#include <linux/debugobjects.h>
49
50#include <asm/tlbflush.h>
51#include <asm/div64.h>
52#include "internal.h"
53
54/*
55 * Array of node states.
56 */
57nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
58	[N_POSSIBLE] = NODE_MASK_ALL,
59	[N_ONLINE] = { { [0] = 1UL } },
60#ifndef CONFIG_NUMA
61	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
62#ifdef CONFIG_HIGHMEM
63	[N_HIGH_MEMORY] = { { [0] = 1UL } },
64#endif
65	[N_CPU] = { { [0] = 1UL } },
66#endif	/* NUMA */
67};
68EXPORT_SYMBOL(node_states);
69
70unsigned long totalram_pages __read_mostly;
71unsigned long totalreserve_pages __read_mostly;
72long nr_swap_pages;
73int percpu_pagelist_fraction;
74
75#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
76int pageblock_order __read_mostly;
77#endif
78
79static void __free_pages_ok(struct page *page, unsigned int order);
80
81/*
82 * results with 256, 32 in the lowmem_reserve sysctl:
83 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
84 *	1G machine -> (16M dma, 784M normal, 224M high)
85 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
86 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
87 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
88 *
89 * TBD: should special case ZONE_DMA32 machines here - in those we normally
90 * don't need any ZONE_NORMAL reservation
91 */
92int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
93#ifdef CONFIG_ZONE_DMA
94	 256,
95#endif
96#ifdef CONFIG_ZONE_DMA32
97	 256,
98#endif
99#ifdef CONFIG_HIGHMEM
100	 32,
101#endif
102	 32,
103};
104
105EXPORT_SYMBOL(totalram_pages);
106
107static char * const zone_names[MAX_NR_ZONES] = {
108#ifdef CONFIG_ZONE_DMA
109	 "DMA",
110#endif
111#ifdef CONFIG_ZONE_DMA32
112	 "DMA32",
113#endif
114	 "Normal",
115#ifdef CONFIG_HIGHMEM
116	 "HighMem",
117#endif
118	 "Movable",
119};
120
121int min_free_kbytes = 1024;
122
123unsigned long __meminitdata nr_kernel_pages;
124unsigned long __meminitdata nr_all_pages;
125static unsigned long __meminitdata dma_reserve;
126
127#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
128  /*
129   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
130   * ranges of memory (RAM) that may be registered with add_active_range().
131   * Ranges passed to add_active_range() will be merged if possible
132   * so the number of times add_active_range() can be called is
133   * related to the number of nodes and the number of holes
134   */
135  #ifdef CONFIG_MAX_ACTIVE_REGIONS
136    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
137    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
138  #else
139    #if MAX_NUMNODES >= 32
140      /* If there can be many nodes, allow up to 50 holes per node */
141      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
142    #else
143      /* By default, allow up to 256 distinct regions */
144      #define MAX_ACTIVE_REGIONS 256
145    #endif
146  #endif
147
148  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
149  static int __meminitdata nr_nodemap_entries;
150  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
151  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
152#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
153  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
154  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
155#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
156  static unsigned long __initdata required_kernelcore;
157  static unsigned long __initdata required_movablecore;
158  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
159
160  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
161  int movable_zone;
162  EXPORT_SYMBOL(movable_zone);
163#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
164
165#if MAX_NUMNODES > 1
166int nr_node_ids __read_mostly = MAX_NUMNODES;
167EXPORT_SYMBOL(nr_node_ids);
168#endif
169
170int page_group_by_mobility_disabled __read_mostly;
171
172static void set_pageblock_migratetype(struct page *page, int migratetype)
173{
174	set_pageblock_flags_group(page, (unsigned long)migratetype,
175					PB_migrate, PB_migrate_end);
176}
177
178#ifdef CONFIG_DEBUG_VM
179static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
180{
181	int ret = 0;
182	unsigned seq;
183	unsigned long pfn = page_to_pfn(page);
184
185	do {
186		seq = zone_span_seqbegin(zone);
187		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
188			ret = 1;
189		else if (pfn < zone->zone_start_pfn)
190			ret = 1;
191	} while (zone_span_seqretry(zone, seq));
192
193	return ret;
194}
195
196static int page_is_consistent(struct zone *zone, struct page *page)
197{
198	if (!pfn_valid_within(page_to_pfn(page)))
199		return 0;
200	if (zone != page_zone(page))
201		return 0;
202
203	return 1;
204}
205/*
206 * Temporary debugging check for pages not lying within a given zone.
207 */
208static int bad_range(struct zone *zone, struct page *page)
209{
210	if (page_outside_zone_boundaries(zone, page))
211		return 1;
212	if (!page_is_consistent(zone, page))
213		return 1;
214
215	return 0;
216}
217#else
218static inline int bad_range(struct zone *zone, struct page *page)
219{
220	return 0;
221}
222#endif
223
224static void bad_page(struct page *page)
225{
226	void *pc = page_get_page_cgroup(page);
227
228	printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
229		"page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
230		current->comm, page, (int)(2*sizeof(unsigned long)),
231		(unsigned long)page->flags, page->mapping,
232		page_mapcount(page), page_count(page));
233	if (pc) {
234		printk(KERN_EMERG "cgroup:%p\n", pc);
235		page_reset_bad_cgroup(page);
236	}
237	printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
238		KERN_EMERG "Backtrace:\n");
239	dump_stack();
240	page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD;
241	set_page_count(page, 0);
242	reset_page_mapcount(page);
243	page->mapping = NULL;
244	add_taint(TAINT_BAD_PAGE);
245}
246
247/*
248 * Higher-order pages are called "compound pages".  They are structured thusly:
249 *
250 * The first PAGE_SIZE page is called the "head page".
251 *
252 * The remaining PAGE_SIZE pages are called "tail pages".
253 *
254 * All pages have PG_compound set.  All pages have their ->private pointing at
255 * the head page (even the head page has this).
256 *
257 * The first tail page's ->lru.next holds the address of the compound page's
258 * put_page() function.  Its ->lru.prev holds the order of allocation.
259 * This usage means that zero-order pages may not be compound.
260 */
261
262static void free_compound_page(struct page *page)
263{
264	__free_pages_ok(page, compound_order(page));
265}
266
267void prep_compound_page(struct page *page, unsigned long order)
268{
269	int i;
270	int nr_pages = 1 << order;
271	struct page *p = page + 1;
272
273	set_compound_page_dtor(page, free_compound_page);
274	set_compound_order(page, order);
275	__SetPageHead(page);
276	for (i = 1; i < nr_pages; i++, p++) {
277		if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
278			p = pfn_to_page(page_to_pfn(page) + i);
279		__SetPageTail(p);
280		p->first_page = page;
281	}
282}
283
284static void destroy_compound_page(struct page *page, unsigned long order)
285{
286	int i;
287	int nr_pages = 1 << order;
288	struct page *p = page + 1;
289
290	if (unlikely(compound_order(page) != order))
291		bad_page(page);
292
293	if (unlikely(!PageHead(page)))
294			bad_page(page);
295	__ClearPageHead(page);
296	for (i = 1; i < nr_pages; i++, p++) {
297		if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
298			p = pfn_to_page(page_to_pfn(page) + i);
299
300		if (unlikely(!PageTail(p) |
301				(p->first_page != page)))
302			bad_page(page);
303		__ClearPageTail(p);
304	}
305}
306
307static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
308{
309	int i;
310
311	/*
312	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
313	 * and __GFP_HIGHMEM from hard or soft interrupt context.
314	 */
315	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
316	for (i = 0; i < (1 << order); i++)
317		clear_highpage(page + i);
318}
319
320static inline void set_page_order(struct page *page, int order)
321{
322	set_page_private(page, order);
323	__SetPageBuddy(page);
324}
325
326static inline void rmv_page_order(struct page *page)
327{
328	__ClearPageBuddy(page);
329	set_page_private(page, 0);
330}
331
332/*
333 * Locate the struct page for both the matching buddy in our
334 * pair (buddy1) and the combined O(n+1) page they form (page).
335 *
336 * 1) Any buddy B1 will have an order O twin B2 which satisfies
337 * the following equation:
338 *     B2 = B1 ^ (1 << O)
339 * For example, if the starting buddy (buddy2) is #8 its order
340 * 1 buddy is #10:
341 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
342 *
343 * 2) Any buddy B will have an order O+1 parent P which
344 * satisfies the following equation:
345 *     P = B & ~(1 << O)
346 *
347 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
348 */
349static inline struct page *
350__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
351{
352	unsigned long buddy_idx = page_idx ^ (1 << order);
353
354	return page + (buddy_idx - page_idx);
355}
356
357static inline unsigned long
358__find_combined_index(unsigned long page_idx, unsigned int order)
359{
360	return (page_idx & ~(1 << order));
361}
362
363/*
364 * This function checks whether a page is free && is the buddy
365 * we can do coalesce a page and its buddy if
366 * (a) the buddy is not in a hole &&
367 * (b) the buddy is in the buddy system &&
368 * (c) a page and its buddy have the same order &&
369 * (d) a page and its buddy are in the same zone.
370 *
371 * For recording whether a page is in the buddy system, we use PG_buddy.
372 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
373 *
374 * For recording page's order, we use page_private(page).
375 */
376static inline int page_is_buddy(struct page *page, struct page *buddy,
377								int order)
378{
379	if (!pfn_valid_within(page_to_pfn(buddy)))
380		return 0;
381
382	if (page_zone_id(page) != page_zone_id(buddy))
383		return 0;
384
385	if (PageBuddy(buddy) && page_order(buddy) == order) {
386		BUG_ON(page_count(buddy) != 0);
387		return 1;
388	}
389	return 0;
390}
391
392/*
393 * Freeing function for a buddy system allocator.
394 *
395 * The concept of a buddy system is to maintain direct-mapped table
396 * (containing bit values) for memory blocks of various "orders".
397 * The bottom level table contains the map for the smallest allocatable
398 * units of memory (here, pages), and each level above it describes
399 * pairs of units from the levels below, hence, "buddies".
400 * At a high level, all that happens here is marking the table entry
401 * at the bottom level available, and propagating the changes upward
402 * as necessary, plus some accounting needed to play nicely with other
403 * parts of the VM system.
404 * At each level, we keep a list of pages, which are heads of continuous
405 * free pages of length of (1 << order) and marked with PG_buddy. Page's
406 * order is recorded in page_private(page) field.
407 * So when we are allocating or freeing one, we can derive the state of the
408 * other.  That is, if we allocate a small block, and both were
409 * free, the remainder of the region must be split into blocks.
410 * If a block is freed, and its buddy is also free, then this
411 * triggers coalescing into a block of larger size.
412 *
413 * -- wli
414 */
415
416static inline void __free_one_page(struct page *page,
417		struct zone *zone, unsigned int order)
418{
419	unsigned long page_idx;
420	int order_size = 1 << order;
421	int migratetype = get_pageblock_migratetype(page);
422
423	if (unlikely(PageCompound(page)))
424		destroy_compound_page(page, order);
425
426	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
427
428	VM_BUG_ON(page_idx & (order_size - 1));
429	VM_BUG_ON(bad_range(zone, page));
430
431	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
432	while (order < MAX_ORDER-1) {
433		unsigned long combined_idx;
434		struct page *buddy;
435
436		buddy = __page_find_buddy(page, page_idx, order);
437		if (!page_is_buddy(page, buddy, order))
438			break;
439
440		/* Our buddy is free, merge with it and move up one order. */
441		list_del(&buddy->lru);
442		zone->free_area[order].nr_free--;
443		rmv_page_order(buddy);
444		combined_idx = __find_combined_index(page_idx, order);
445		page = page + (combined_idx - page_idx);
446		page_idx = combined_idx;
447		order++;
448	}
449	set_page_order(page, order);
450	list_add(&page->lru,
451		&zone->free_area[order].free_list[migratetype]);
452	zone->free_area[order].nr_free++;
453}
454
455static inline int free_pages_check(struct page *page)
456{
457	free_page_mlock(page);
458	if (unlikely(page_mapcount(page) |
459		(page->mapping != NULL)  |
460		(page_get_page_cgroup(page) != NULL) |
461		(page_count(page) != 0)  |
462		(page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
463		bad_page(page);
464	if (PageDirty(page))
465		__ClearPageDirty(page);
466	if (PageSwapBacked(page))
467		__ClearPageSwapBacked(page);
468	/*
469	 * For now, we report if PG_reserved was found set, but do not
470	 * clear it, and do not free the page.  But we shall soon need
471	 * to do more, for when the ZERO_PAGE count wraps negative.
472	 */
473	return PageReserved(page);
474}
475
476/*
477 * Frees a list of pages.
478 * Assumes all pages on list are in same zone, and of same order.
479 * count is the number of pages to free.
480 *
481 * If the zone was previously in an "all pages pinned" state then look to
482 * see if this freeing clears that state.
483 *
484 * And clear the zone's pages_scanned counter, to hold off the "all pages are
485 * pinned" detection logic.
486 */
487static void free_pages_bulk(struct zone *zone, int count,
488					struct list_head *list, int order)
489{
490	spin_lock(&zone->lock);
491	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
492	zone->pages_scanned = 0;
493	while (count--) {
494		struct page *page;
495
496		VM_BUG_ON(list_empty(list));
497		page = list_entry(list->prev, struct page, lru);
498		/* have to delete it as __free_one_page list manipulates */
499		list_del(&page->lru);
500		__free_one_page(page, zone, order);
501	}
502	spin_unlock(&zone->lock);
503}
504
505static void free_one_page(struct zone *zone, struct page *page, int order)
506{
507	spin_lock(&zone->lock);
508	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
509	zone->pages_scanned = 0;
510	__free_one_page(page, zone, order);
511	spin_unlock(&zone->lock);
512}
513
514static void __free_pages_ok(struct page *page, unsigned int order)
515{
516	unsigned long flags;
517	int i;
518	int reserved = 0;
519
520	for (i = 0 ; i < (1 << order) ; ++i)
521		reserved += free_pages_check(page + i);
522	if (reserved)
523		return;
524
525	if (!PageHighMem(page)) {
526		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
527		debug_check_no_obj_freed(page_address(page),
528					   PAGE_SIZE << order);
529	}
530	arch_free_page(page, order);
531	kernel_map_pages(page, 1 << order, 0);
532
533	local_irq_save(flags);
534	__count_vm_events(PGFREE, 1 << order);
535	free_one_page(page_zone(page), page, order);
536	local_irq_restore(flags);
537}
538
539/*
540 * permit the bootmem allocator to evade page validation on high-order frees
541 */
542void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
543{
544	if (order == 0) {
545		__ClearPageReserved(page);
546		set_page_count(page, 0);
547		set_page_refcounted(page);
548		__free_page(page);
549	} else {
550		int loop;
551
552		prefetchw(page);
553		for (loop = 0; loop < BITS_PER_LONG; loop++) {
554			struct page *p = &page[loop];
555
556			if (loop + 1 < BITS_PER_LONG)
557				prefetchw(p + 1);
558			__ClearPageReserved(p);
559			set_page_count(p, 0);
560		}
561
562		set_page_refcounted(page);
563		__free_pages(page, order);
564	}
565}
566
567
568/*
569 * The order of subdivision here is critical for the IO subsystem.
570 * Please do not alter this order without good reasons and regression
571 * testing. Specifically, as large blocks of memory are subdivided,
572 * the order in which smaller blocks are delivered depends on the order
573 * they're subdivided in this function. This is the primary factor
574 * influencing the order in which pages are delivered to the IO
575 * subsystem according to empirical testing, and this is also justified
576 * by considering the behavior of a buddy system containing a single
577 * large block of memory acted on by a series of small allocations.
578 * This behavior is a critical factor in sglist merging's success.
579 *
580 * -- wli
581 */
582static inline void expand(struct zone *zone, struct page *page,
583	int low, int high, struct free_area *area,
584	int migratetype)
585{
586	unsigned long size = 1 << high;
587
588	while (high > low) {
589		area--;
590		high--;
591		size >>= 1;
592		VM_BUG_ON(bad_range(zone, &page[size]));
593		list_add(&page[size].lru, &area->free_list[migratetype]);
594		area->nr_free++;
595		set_page_order(&page[size], high);
596	}
597}
598
599/*
600 * This page is about to be returned from the page allocator
601 */
602static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
603{
604	if (unlikely(page_mapcount(page) |
605		(page->mapping != NULL)  |
606		(page_get_page_cgroup(page) != NULL) |
607		(page_count(page) != 0)  |
608		(page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
609		bad_page(page);
610
611	/*
612	 * For now, we report if PG_reserved was found set, but do not
613	 * clear it, and do not allocate the page: as a safety net.
614	 */
615	if (PageReserved(page))
616		return 1;
617
618	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
619			1 << PG_referenced | 1 << PG_arch_1 |
620			1 << PG_owner_priv_1 | 1 << PG_mappedtodisk
621#ifdef CONFIG_UNEVICTABLE_LRU
622			| 1 << PG_mlocked
623#endif
624			);
625	set_page_private(page, 0);
626	set_page_refcounted(page);
627
628	arch_alloc_page(page, order);
629	kernel_map_pages(page, 1 << order, 1);
630
631	if (gfp_flags & __GFP_ZERO)
632		prep_zero_page(page, order, gfp_flags);
633
634	if (order && (gfp_flags & __GFP_COMP))
635		prep_compound_page(page, order);
636
637	return 0;
638}
639
640/*
641 * Go through the free lists for the given migratetype and remove
642 * the smallest available page from the freelists
643 */
644static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
645						int migratetype)
646{
647	unsigned int current_order;
648	struct free_area * area;
649	struct page *page;
650
651	/* Find a page of the appropriate size in the preferred list */
652	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
653		area = &(zone->free_area[current_order]);
654		if (list_empty(&area->free_list[migratetype]))
655			continue;
656
657		page = list_entry(area->free_list[migratetype].next,
658							struct page, lru);
659		list_del(&page->lru);
660		rmv_page_order(page);
661		area->nr_free--;
662		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
663		expand(zone, page, order, current_order, area, migratetype);
664		return page;
665	}
666
667	return NULL;
668}
669
670
671/*
672 * This array describes the order lists are fallen back to when
673 * the free lists for the desirable migrate type are depleted
674 */
675static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
676	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
677	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
678	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
679	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
680};
681
682/*
683 * Move the free pages in a range to the free lists of the requested type.
684 * Note that start_page and end_pages are not aligned on a pageblock
685 * boundary. If alignment is required, use move_freepages_block()
686 */
687static int move_freepages(struct zone *zone,
688			  struct page *start_page, struct page *end_page,
689			  int migratetype)
690{
691	struct page *page;
692	unsigned long order;
693	int pages_moved = 0;
694
695#ifndef CONFIG_HOLES_IN_ZONE
696	/*
697	 * page_zone is not safe to call in this context when
698	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
699	 * anyway as we check zone boundaries in move_freepages_block().
700	 * Remove at a later date when no bug reports exist related to
701	 * grouping pages by mobility
702	 */
703	BUG_ON(page_zone(start_page) != page_zone(end_page));
704#endif
705
706	for (page = start_page; page <= end_page;) {
707		/* Make sure we are not inadvertently changing nodes */
708		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
709
710		if (!pfn_valid_within(page_to_pfn(page))) {
711			page++;
712			continue;
713		}
714
715		if (!PageBuddy(page)) {
716			page++;
717			continue;
718		}
719
720		order = page_order(page);
721		list_del(&page->lru);
722		list_add(&page->lru,
723			&zone->free_area[order].free_list[migratetype]);
724		page += 1 << order;
725		pages_moved += 1 << order;
726	}
727
728	return pages_moved;
729}
730
731static int move_freepages_block(struct zone *zone, struct page *page,
732				int migratetype)
733{
734	unsigned long start_pfn, end_pfn;
735	struct page *start_page, *end_page;
736
737	start_pfn = page_to_pfn(page);
738	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
739	start_page = pfn_to_page(start_pfn);
740	end_page = start_page + pageblock_nr_pages - 1;
741	end_pfn = start_pfn + pageblock_nr_pages - 1;
742
743	/* Do not cross zone boundaries */
744	if (start_pfn < zone->zone_start_pfn)
745		start_page = page;
746	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
747		return 0;
748
749	return move_freepages(zone, start_page, end_page, migratetype);
750}
751
752/* Remove an element from the buddy allocator from the fallback list */
753static struct page *__rmqueue_fallback(struct zone *zone, int order,
754						int start_migratetype)
755{
756	struct free_area * area;
757	int current_order;
758	struct page *page;
759	int migratetype, i;
760
761	/* Find the largest possible block of pages in the other list */
762	for (current_order = MAX_ORDER-1; current_order >= order;
763						--current_order) {
764		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
765			migratetype = fallbacks[start_migratetype][i];
766
767			/* MIGRATE_RESERVE handled later if necessary */
768			if (migratetype == MIGRATE_RESERVE)
769				continue;
770
771			area = &(zone->free_area[current_order]);
772			if (list_empty(&area->free_list[migratetype]))
773				continue;
774
775			page = list_entry(area->free_list[migratetype].next,
776					struct page, lru);
777			area->nr_free--;
778
779			/*
780			 * If breaking a large block of pages, move all free
781			 * pages to the preferred allocation list. If falling
782			 * back for a reclaimable kernel allocation, be more
783			 * agressive about taking ownership of free pages
784			 */
785			if (unlikely(current_order >= (pageblock_order >> 1)) ||
786					start_migratetype == MIGRATE_RECLAIMABLE) {
787				unsigned long pages;
788				pages = move_freepages_block(zone, page,
789								start_migratetype);
790
791				/* Claim the whole block if over half of it is free */
792				if (pages >= (1 << (pageblock_order-1)))
793					set_pageblock_migratetype(page,
794								start_migratetype);
795
796				migratetype = start_migratetype;
797			}
798
799			/* Remove the page from the freelists */
800			list_del(&page->lru);
801			rmv_page_order(page);
802			__mod_zone_page_state(zone, NR_FREE_PAGES,
803							-(1UL << order));
804
805			if (current_order == pageblock_order)
806				set_pageblock_migratetype(page,
807							start_migratetype);
808
809			expand(zone, page, order, current_order, area, migratetype);
810			return page;
811		}
812	}
813
814	/* Use MIGRATE_RESERVE rather than fail an allocation */
815	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
816}
817
818/*
819 * Do the hard work of removing an element from the buddy allocator.
820 * Call me with the zone->lock already held.
821 */
822static struct page *__rmqueue(struct zone *zone, unsigned int order,
823						int migratetype)
824{
825	struct page *page;
826
827	page = __rmqueue_smallest(zone, order, migratetype);
828
829	if (unlikely(!page))
830		page = __rmqueue_fallback(zone, order, migratetype);
831
832	return page;
833}
834
835/*
836 * Obtain a specified number of elements from the buddy allocator, all under
837 * a single hold of the lock, for efficiency.  Add them to the supplied list.
838 * Returns the number of new pages which were placed at *list.
839 */
840static int rmqueue_bulk(struct zone *zone, unsigned int order,
841			unsigned long count, struct list_head *list,
842			int migratetype)
843{
844	int i;
845
846	spin_lock(&zone->lock);
847	for (i = 0; i < count; ++i) {
848		struct page *page = __rmqueue(zone, order, migratetype);
849		if (unlikely(page == NULL))
850			break;
851
852		/*
853		 * Split buddy pages returned by expand() are received here
854		 * in physical page order. The page is added to the callers and
855		 * list and the list head then moves forward. From the callers
856		 * perspective, the linked list is ordered by page number in
857		 * some conditions. This is useful for IO devices that can
858		 * merge IO requests if the physical pages are ordered
859		 * properly.
860		 */
861		list_add(&page->lru, list);
862		set_page_private(page, migratetype);
863		list = &page->lru;
864	}
865	spin_unlock(&zone->lock);
866	return i;
867}
868
869#ifdef CONFIG_NUMA
870/*
871 * Called from the vmstat counter updater to drain pagesets of this
872 * currently executing processor on remote nodes after they have
873 * expired.
874 *
875 * Note that this function must be called with the thread pinned to
876 * a single processor.
877 */
878void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
879{
880	unsigned long flags;
881	int to_drain;
882
883	local_irq_save(flags);
884	if (pcp->count >= pcp->batch)
885		to_drain = pcp->batch;
886	else
887		to_drain = pcp->count;
888	free_pages_bulk(zone, to_drain, &pcp->list, 0);
889	pcp->count -= to_drain;
890	local_irq_restore(flags);
891}
892#endif
893
894/*
895 * Drain pages of the indicated processor.
896 *
897 * The processor must either be the current processor and the
898 * thread pinned to the current processor or a processor that
899 * is not online.
900 */
901static void drain_pages(unsigned int cpu)
902{
903	unsigned long flags;
904	struct zone *zone;
905
906	for_each_zone(zone) {
907		struct per_cpu_pageset *pset;
908		struct per_cpu_pages *pcp;
909
910		if (!populated_zone(zone))
911			continue;
912
913		pset = zone_pcp(zone, cpu);
914
915		pcp = &pset->pcp;
916		local_irq_save(flags);
917		free_pages_bulk(zone, pcp->count, &pcp->list, 0);
918		pcp->count = 0;
919		local_irq_restore(flags);
920	}
921}
922
923/*
924 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
925 */
926void drain_local_pages(void *arg)
927{
928	drain_pages(smp_processor_id());
929}
930
931/*
932 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
933 */
934void drain_all_pages(void)
935{
936	on_each_cpu(drain_local_pages, NULL, 1);
937}
938
939#ifdef CONFIG_HIBERNATION
940
941void mark_free_pages(struct zone *zone)
942{
943	unsigned long pfn, max_zone_pfn;
944	unsigned long flags;
945	int order, t;
946	struct list_head *curr;
947
948	if (!zone->spanned_pages)
949		return;
950
951	spin_lock_irqsave(&zone->lock, flags);
952
953	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
954	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
955		if (pfn_valid(pfn)) {
956			struct page *page = pfn_to_page(pfn);
957
958			if (!swsusp_page_is_forbidden(page))
959				swsusp_unset_page_free(page);
960		}
961
962	for_each_migratetype_order(order, t) {
963		list_for_each(curr, &zone->free_area[order].free_list[t]) {
964			unsigned long i;
965
966			pfn = page_to_pfn(list_entry(curr, struct page, lru));
967			for (i = 0; i < (1UL << order); i++)
968				swsusp_set_page_free(pfn_to_page(pfn + i));
969		}
970	}
971	spin_unlock_irqrestore(&zone->lock, flags);
972}
973#endif /* CONFIG_PM */
974
975/*
976 * Free a 0-order page
977 */
978static void free_hot_cold_page(struct page *page, int cold)
979{
980	struct zone *zone = page_zone(page);
981	struct per_cpu_pages *pcp;
982	unsigned long flags;
983
984	if (PageAnon(page))
985		page->mapping = NULL;
986	if (free_pages_check(page))
987		return;
988
989	if (!PageHighMem(page)) {
990		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
991		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
992	}
993	arch_free_page(page, 0);
994	kernel_map_pages(page, 1, 0);
995
996	pcp = &zone_pcp(zone, get_cpu())->pcp;
997	local_irq_save(flags);
998	__count_vm_event(PGFREE);
999	if (cold)
1000		list_add_tail(&page->lru, &pcp->list);
1001	else
1002		list_add(&page->lru, &pcp->list);
1003	set_page_private(page, get_pageblock_migratetype(page));
1004	pcp->count++;
1005	if (pcp->count >= pcp->high) {
1006		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1007		pcp->count -= pcp->batch;
1008	}
1009	local_irq_restore(flags);
1010	put_cpu();
1011}
1012
1013void free_hot_page(struct page *page)
1014{
1015	free_hot_cold_page(page, 0);
1016}
1017
1018void free_cold_page(struct page *page)
1019{
1020	free_hot_cold_page(page, 1);
1021}
1022
1023/*
1024 * split_page takes a non-compound higher-order page, and splits it into
1025 * n (1<<order) sub-pages: page[0..n]
1026 * Each sub-page must be freed individually.
1027 *
1028 * Note: this is probably too low level an operation for use in drivers.
1029 * Please consult with lkml before using this in your driver.
1030 */
1031void split_page(struct page *page, unsigned int order)
1032{
1033	int i;
1034
1035	VM_BUG_ON(PageCompound(page));
1036	VM_BUG_ON(!page_count(page));
1037	for (i = 1; i < (1 << order); i++)
1038		set_page_refcounted(page + i);
1039}
1040
1041/*
1042 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1043 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1044 * or two.
1045 */
1046static struct page *buffered_rmqueue(struct zone *preferred_zone,
1047			struct zone *zone, int order, gfp_t gfp_flags)
1048{
1049	unsigned long flags;
1050	struct page *page;
1051	int cold = !!(gfp_flags & __GFP_COLD);
1052	int cpu;
1053	int migratetype = allocflags_to_migratetype(gfp_flags);
1054
1055again:
1056	cpu  = get_cpu();
1057	if (likely(order == 0)) {
1058		struct per_cpu_pages *pcp;
1059
1060		pcp = &zone_pcp(zone, cpu)->pcp;
1061		local_irq_save(flags);
1062		if (!pcp->count) {
1063			pcp->count = rmqueue_bulk(zone, 0,
1064					pcp->batch, &pcp->list, migratetype);
1065			if (unlikely(!pcp->count))
1066				goto failed;
1067		}
1068
1069		/* Find a page of the appropriate migrate type */
1070		if (cold) {
1071			list_for_each_entry_reverse(page, &pcp->list, lru)
1072				if (page_private(page) == migratetype)
1073					break;
1074		} else {
1075			list_for_each_entry(page, &pcp->list, lru)
1076				if (page_private(page) == migratetype)
1077					break;
1078		}
1079
1080		/* Allocate more to the pcp list if necessary */
1081		if (unlikely(&page->lru == &pcp->list)) {
1082			pcp->count += rmqueue_bulk(zone, 0,
1083					pcp->batch, &pcp->list, migratetype);
1084			page = list_entry(pcp->list.next, struct page, lru);
1085		}
1086
1087		list_del(&page->lru);
1088		pcp->count--;
1089	} else {
1090		spin_lock_irqsave(&zone->lock, flags);
1091		page = __rmqueue(zone, order, migratetype);
1092		spin_unlock(&zone->lock);
1093		if (!page)
1094			goto failed;
1095	}
1096
1097	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1098	zone_statistics(preferred_zone, zone);
1099	local_irq_restore(flags);
1100	put_cpu();
1101
1102	VM_BUG_ON(bad_range(zone, page));
1103	if (prep_new_page(page, order, gfp_flags))
1104		goto again;
1105	return page;
1106
1107failed:
1108	local_irq_restore(flags);
1109	put_cpu();
1110	return NULL;
1111}
1112
1113#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
1114#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
1115#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
1116#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
1117#define ALLOC_HARDER		0x10 /* try to alloc harder */
1118#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1119#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1120
1121#ifdef CONFIG_FAIL_PAGE_ALLOC
1122
1123static struct fail_page_alloc_attr {
1124	struct fault_attr attr;
1125
1126	u32 ignore_gfp_highmem;
1127	u32 ignore_gfp_wait;
1128	u32 min_order;
1129
1130#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1131
1132	struct dentry *ignore_gfp_highmem_file;
1133	struct dentry *ignore_gfp_wait_file;
1134	struct dentry *min_order_file;
1135
1136#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1137
1138} fail_page_alloc = {
1139	.attr = FAULT_ATTR_INITIALIZER,
1140	.ignore_gfp_wait = 1,
1141	.ignore_gfp_highmem = 1,
1142	.min_order = 1,
1143};
1144
1145static int __init setup_fail_page_alloc(char *str)
1146{
1147	return setup_fault_attr(&fail_page_alloc.attr, str);
1148}
1149__setup("fail_page_alloc=", setup_fail_page_alloc);
1150
1151static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1152{
1153	if (order < fail_page_alloc.min_order)
1154		return 0;
1155	if (gfp_mask & __GFP_NOFAIL)
1156		return 0;
1157	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1158		return 0;
1159	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1160		return 0;
1161
1162	return should_fail(&fail_page_alloc.attr, 1 << order);
1163}
1164
1165#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1166
1167static int __init fail_page_alloc_debugfs(void)
1168{
1169	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1170	struct dentry *dir;
1171	int err;
1172
1173	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1174				       "fail_page_alloc");
1175	if (err)
1176		return err;
1177	dir = fail_page_alloc.attr.dentries.dir;
1178
1179	fail_page_alloc.ignore_gfp_wait_file =
1180		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1181				      &fail_page_alloc.ignore_gfp_wait);
1182
1183	fail_page_alloc.ignore_gfp_highmem_file =
1184		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1185				      &fail_page_alloc.ignore_gfp_highmem);
1186	fail_page_alloc.min_order_file =
1187		debugfs_create_u32("min-order", mode, dir,
1188				   &fail_page_alloc.min_order);
1189
1190	if (!fail_page_alloc.ignore_gfp_wait_file ||
1191            !fail_page_alloc.ignore_gfp_highmem_file ||
1192            !fail_page_alloc.min_order_file) {
1193		err = -ENOMEM;
1194		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1195		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1196		debugfs_remove(fail_page_alloc.min_order_file);
1197		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1198	}
1199
1200	return err;
1201}
1202
1203late_initcall(fail_page_alloc_debugfs);
1204
1205#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1206
1207#else /* CONFIG_FAIL_PAGE_ALLOC */
1208
1209static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1210{
1211	return 0;
1212}
1213
1214#endif /* CONFIG_FAIL_PAGE_ALLOC */
1215
1216/*
1217 * Return 1 if free pages are above 'mark'. This takes into account the order
1218 * of the allocation.
1219 */
1220int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1221		      int classzone_idx, int alloc_flags)
1222{
1223	/* free_pages my go negative - that's OK */
1224	long min = mark;
1225	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1226	int o;
1227
1228	if (alloc_flags & ALLOC_HIGH)
1229		min -= min / 2;
1230	if (alloc_flags & ALLOC_HARDER)
1231		min -= min / 4;
1232
1233	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1234		return 0;
1235	for (o = 0; o < order; o++) {
1236		/* At the next order, this order's pages become unavailable */
1237		free_pages -= z->free_area[o].nr_free << o;
1238
1239		/* Require fewer higher order pages to be free */
1240		min >>= 1;
1241
1242		if (free_pages <= min)
1243			return 0;
1244	}
1245	return 1;
1246}
1247
1248#ifdef CONFIG_NUMA
1249/*
1250 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1251 * skip over zones that are not allowed by the cpuset, or that have
1252 * been recently (in last second) found to be nearly full.  See further
1253 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1254 * that have to skip over a lot of full or unallowed zones.
1255 *
1256 * If the zonelist cache is present in the passed in zonelist, then
1257 * returns a pointer to the allowed node mask (either the current
1258 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1259 *
1260 * If the zonelist cache is not available for this zonelist, does
1261 * nothing and returns NULL.
1262 *
1263 * If the fullzones BITMAP in the zonelist cache is stale (more than
1264 * a second since last zap'd) then we zap it out (clear its bits.)
1265 *
1266 * We hold off even calling zlc_setup, until after we've checked the
1267 * first zone in the zonelist, on the theory that most allocations will
1268 * be satisfied from that first zone, so best to examine that zone as
1269 * quickly as we can.
1270 */
1271static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1272{
1273	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1274	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1275
1276	zlc = zonelist->zlcache_ptr;
1277	if (!zlc)
1278		return NULL;
1279
1280	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1281		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1282		zlc->last_full_zap = jiffies;
1283	}
1284
1285	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1286					&cpuset_current_mems_allowed :
1287					&node_states[N_HIGH_MEMORY];
1288	return allowednodes;
1289}
1290
1291/*
1292 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1293 * if it is worth looking at further for free memory:
1294 *  1) Check that the zone isn't thought to be full (doesn't have its
1295 *     bit set in the zonelist_cache fullzones BITMAP).
1296 *  2) Check that the zones node (obtained from the zonelist_cache
1297 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1298 * Return true (non-zero) if zone is worth looking at further, or
1299 * else return false (zero) if it is not.
1300 *
1301 * This check -ignores- the distinction between various watermarks,
1302 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1303 * found to be full for any variation of these watermarks, it will
1304 * be considered full for up to one second by all requests, unless
1305 * we are so low on memory on all allowed nodes that we are forced
1306 * into the second scan of the zonelist.
1307 *
1308 * In the second scan we ignore this zonelist cache and exactly
1309 * apply the watermarks to all zones, even it is slower to do so.
1310 * We are low on memory in the second scan, and should leave no stone
1311 * unturned looking for a free page.
1312 */
1313static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1314						nodemask_t *allowednodes)
1315{
1316	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1317	int i;				/* index of *z in zonelist zones */
1318	int n;				/* node that zone *z is on */
1319
1320	zlc = zonelist->zlcache_ptr;
1321	if (!zlc)
1322		return 1;
1323
1324	i = z - zonelist->_zonerefs;
1325	n = zlc->z_to_n[i];
1326
1327	/* This zone is worth trying if it is allowed but not full */
1328	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1329}
1330
1331/*
1332 * Given 'z' scanning a zonelist, set the corresponding bit in
1333 * zlc->fullzones, so that subsequent attempts to allocate a page
1334 * from that zone don't waste time re-examining it.
1335 */
1336static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1337{
1338	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1339	int i;				/* index of *z in zonelist zones */
1340
1341	zlc = zonelist->zlcache_ptr;
1342	if (!zlc)
1343		return;
1344
1345	i = z - zonelist->_zonerefs;
1346
1347	set_bit(i, zlc->fullzones);
1348}
1349
1350#else	/* CONFIG_NUMA */
1351
1352static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1353{
1354	return NULL;
1355}
1356
1357static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1358				nodemask_t *allowednodes)
1359{
1360	return 1;
1361}
1362
1363static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1364{
1365}
1366#endif	/* CONFIG_NUMA */
1367
1368/*
1369 * get_page_from_freelist goes through the zonelist trying to allocate
1370 * a page.
1371 */
1372static struct page *
1373get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1374		struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
1375{
1376	struct zoneref *z;
1377	struct page *page = NULL;
1378	int classzone_idx;
1379	struct zone *zone, *preferred_zone;
1380	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1381	int zlc_active = 0;		/* set if using zonelist_cache */
1382	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1383
1384	(void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1385							&preferred_zone);
1386	if (!preferred_zone)
1387		return NULL;
1388
1389	classzone_idx = zone_idx(preferred_zone);
1390
1391zonelist_scan:
1392	/*
1393	 * Scan zonelist, looking for a zone with enough free.
1394	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1395	 */
1396	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1397						high_zoneidx, nodemask) {
1398		if (NUMA_BUILD && zlc_active &&
1399			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1400				continue;
1401		if ((alloc_flags & ALLOC_CPUSET) &&
1402			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1403				goto try_next_zone;
1404
1405		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1406			unsigned long mark;
1407			if (alloc_flags & ALLOC_WMARK_MIN)
1408				mark = zone->pages_min;
1409			else if (alloc_flags & ALLOC_WMARK_LOW)
1410				mark = zone->pages_low;
1411			else
1412				mark = zone->pages_high;
1413			if (!zone_watermark_ok(zone, order, mark,
1414				    classzone_idx, alloc_flags)) {
1415				if (!zone_reclaim_mode ||
1416				    !zone_reclaim(zone, gfp_mask, order))
1417					goto this_zone_full;
1418			}
1419		}
1420
1421		page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
1422		if (page)
1423			break;
1424this_zone_full:
1425		if (NUMA_BUILD)
1426			zlc_mark_zone_full(zonelist, z);
1427try_next_zone:
1428		if (NUMA_BUILD && !did_zlc_setup) {
1429			/* we do zlc_setup after the first zone is tried */
1430			allowednodes = zlc_setup(zonelist, alloc_flags);
1431			zlc_active = 1;
1432			did_zlc_setup = 1;
1433		}
1434	}
1435
1436	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1437		/* Disable zlc cache for second zonelist scan */
1438		zlc_active = 0;
1439		goto zonelist_scan;
1440	}
1441	return page;
1442}
1443
1444/*
1445 * This is the 'heart' of the zoned buddy allocator.
1446 */
1447struct page *
1448__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1449			struct zonelist *zonelist, nodemask_t *nodemask)
1450{
1451	const gfp_t wait = gfp_mask & __GFP_WAIT;
1452	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1453	struct zoneref *z;
1454	struct zone *zone;
1455	struct page *page;
1456	struct reclaim_state reclaim_state;
1457	struct task_struct *p = current;
1458	int do_retry;
1459	int alloc_flags;
1460	unsigned long did_some_progress;
1461	unsigned long pages_reclaimed = 0;
1462
1463	might_sleep_if(wait);
1464
1465	if (should_fail_alloc_page(gfp_mask, order))
1466		return NULL;
1467
1468restart:
1469	z = zonelist->_zonerefs;  /* the list of zones suitable for gfp_mask */
1470
1471	if (unlikely(!z->zone)) {
1472		/*
1473		 * Happens if we have an empty zonelist as a result of
1474		 * GFP_THISNODE being used on a memoryless node
1475		 */
1476		return NULL;
1477	}
1478
1479	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1480			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1481	if (page)
1482		goto got_pg;
1483
1484	/*
1485	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1486	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1487	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1488	 * using a larger set of nodes after it has established that the
1489	 * allowed per node queues are empty and that nodes are
1490	 * over allocated.
1491	 */
1492	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1493		goto nopage;
1494
1495	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1496		wakeup_kswapd(zone, order);
1497
1498	/*
1499	 * OK, we're below the kswapd watermark and have kicked background
1500	 * reclaim. Now things get more complex, so set up alloc_flags according
1501	 * to how we want to proceed.
1502	 *
1503	 * The caller may dip into page reserves a bit more if the caller
1504	 * cannot run direct reclaim, or if the caller has realtime scheduling
1505	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1506	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1507	 */
1508	alloc_flags = ALLOC_WMARK_MIN;
1509	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1510		alloc_flags |= ALLOC_HARDER;
1511	if (gfp_mask & __GFP_HIGH)
1512		alloc_flags |= ALLOC_HIGH;
1513	if (wait)
1514		alloc_flags |= ALLOC_CPUSET;
1515
1516	/*
1517	 * Go through the zonelist again. Let __GFP_HIGH and allocations
1518	 * coming from realtime tasks go deeper into reserves.
1519	 *
1520	 * This is the last chance, in general, before the goto nopage.
1521	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1522	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1523	 */
1524	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1525						high_zoneidx, alloc_flags);
1526	if (page)
1527		goto got_pg;
1528
1529	/* This allocation should allow future memory freeing. */
1530
1531rebalance:
1532	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1533			&& !in_interrupt()) {
1534		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1535nofail_alloc:
1536			/* go through the zonelist yet again, ignoring mins */
1537			page = get_page_from_freelist(gfp_mask, nodemask, order,
1538				zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
1539			if (page)
1540				goto got_pg;
1541			if (gfp_mask & __GFP_NOFAIL) {
1542				congestion_wait(WRITE, HZ/50);
1543				goto nofail_alloc;
1544			}
1545		}
1546		goto nopage;
1547	}
1548
1549	/* Atomic allocations - we can't balance anything */
1550	if (!wait)
1551		goto nopage;
1552
1553	cond_resched();
1554
1555	/* We now go into synchronous reclaim */
1556	cpuset_memory_pressure_bump();
1557	p->flags |= PF_MEMALLOC;
1558	reclaim_state.reclaimed_slab = 0;
1559	p->reclaim_state = &reclaim_state;
1560
1561	did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
1562
1563	p->reclaim_state = NULL;
1564	p->flags &= ~PF_MEMALLOC;
1565
1566	cond_resched();
1567
1568	if (order != 0)
1569		drain_all_pages();
1570
1571	if (likely(did_some_progress)) {
1572		page = get_page_from_freelist(gfp_mask, nodemask, order,
1573					zonelist, high_zoneidx, alloc_flags);
1574		if (page)
1575			goto got_pg;
1576	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1577		if (!try_set_zone_oom(zonelist, gfp_mask)) {
1578			schedule_timeout_uninterruptible(1);
1579			goto restart;
1580		}
1581
1582		/*
1583		 * Go through the zonelist yet one more time, keep
1584		 * very high watermark here, this is only to catch
1585		 * a parallel oom killing, we must fail if we're still
1586		 * under heavy pressure.
1587		 */
1588		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1589			order, zonelist, high_zoneidx,
1590			ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1591		if (page) {
1592			clear_zonelist_oom(zonelist, gfp_mask);
1593			goto got_pg;
1594		}
1595
1596		/* The OOM killer will not help higher order allocs so fail */
1597		if (order > PAGE_ALLOC_COSTLY_ORDER) {
1598			clear_zonelist_oom(zonelist, gfp_mask);
1599			goto nopage;
1600		}
1601
1602		out_of_memory(zonelist, gfp_mask, order);
1603		clear_zonelist_oom(zonelist, gfp_mask);
1604		goto restart;
1605	}
1606
1607	/*
1608	 * Don't let big-order allocations loop unless the caller explicitly
1609	 * requests that.  Wait for some write requests to complete then retry.
1610	 *
1611	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1612	 * means __GFP_NOFAIL, but that may not be true in other
1613	 * implementations.
1614	 *
1615	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1616	 * specified, then we retry until we no longer reclaim any pages
1617	 * (above), or we've reclaimed an order of pages at least as
1618	 * large as the allocation's order. In both cases, if the
1619	 * allocation still fails, we stop retrying.
1620	 */
1621	pages_reclaimed += did_some_progress;
1622	do_retry = 0;
1623	if (!(gfp_mask & __GFP_NORETRY)) {
1624		if (order <= PAGE_ALLOC_COSTLY_ORDER) {
1625			do_retry = 1;
1626		} else {
1627			if (gfp_mask & __GFP_REPEAT &&
1628				pages_reclaimed < (1 << order))
1629					do_retry = 1;
1630		}
1631		if (gfp_mask & __GFP_NOFAIL)
1632			do_retry = 1;
1633	}
1634	if (do_retry) {
1635		congestion_wait(WRITE, HZ/50);
1636		goto rebalance;
1637	}
1638
1639nopage:
1640	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1641		printk(KERN_WARNING "%s: page allocation failure."
1642			" order:%d, mode:0x%x\n",
1643			p->comm, order, gfp_mask);
1644		dump_stack();
1645		show_mem();
1646	}
1647got_pg:
1648	return page;
1649}
1650EXPORT_SYMBOL(__alloc_pages_internal);
1651
1652/*
1653 * Common helper functions.
1654 */
1655unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1656{
1657	struct page * page;
1658	page = alloc_pages(gfp_mask, order);
1659	if (!page)
1660		return 0;
1661	return (unsigned long) page_address(page);
1662}
1663
1664EXPORT_SYMBOL(__get_free_pages);
1665
1666unsigned long get_zeroed_page(gfp_t gfp_mask)
1667{
1668	struct page * page;
1669
1670	/*
1671	 * get_zeroed_page() returns a 32-bit address, which cannot represent
1672	 * a highmem page
1673	 */
1674	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1675
1676	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1677	if (page)
1678		return (unsigned long) page_address(page);
1679	return 0;
1680}
1681
1682EXPORT_SYMBOL(get_zeroed_page);
1683
1684void __pagevec_free(struct pagevec *pvec)
1685{
1686	int i = pagevec_count(pvec);
1687
1688	while (--i >= 0)
1689		free_hot_cold_page(pvec->pages[i], pvec->cold);
1690}
1691
1692void __free_pages(struct page *page, unsigned int order)
1693{
1694	if (put_page_testzero(page)) {
1695		if (order == 0)
1696			free_hot_page(page);
1697		else
1698			__free_pages_ok(page, order);
1699	}
1700}
1701
1702EXPORT_SYMBOL(__free_pages);
1703
1704void free_pages(unsigned long addr, unsigned int order)
1705{
1706	if (addr != 0) {
1707		VM_BUG_ON(!virt_addr_valid((void *)addr));
1708		__free_pages(virt_to_page((void *)addr), order);
1709	}
1710}
1711
1712EXPORT_SYMBOL(free_pages);
1713
1714/**
1715 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
1716 * @size: the number of bytes to allocate
1717 * @gfp_mask: GFP flags for the allocation
1718 *
1719 * This function is similar to alloc_pages(), except that it allocates the
1720 * minimum number of pages to satisfy the request.  alloc_pages() can only
1721 * allocate memory in power-of-two pages.
1722 *
1723 * This function is also limited by MAX_ORDER.
1724 *
1725 * Memory allocated by this function must be released by free_pages_exact().
1726 */
1727void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1728{
1729	unsigned int order = get_order(size);
1730	unsigned long addr;
1731
1732	addr = __get_free_pages(gfp_mask, order);
1733	if (addr) {
1734		unsigned long alloc_end = addr + (PAGE_SIZE << order);
1735		unsigned long used = addr + PAGE_ALIGN(size);
1736
1737		split_page(virt_to_page(addr), order);
1738		while (used < alloc_end) {
1739			free_page(used);
1740			used += PAGE_SIZE;
1741		}
1742	}
1743
1744	return (void *)addr;
1745}
1746EXPORT_SYMBOL(alloc_pages_exact);
1747
1748/**
1749 * free_pages_exact - release memory allocated via alloc_pages_exact()
1750 * @virt: the value returned by alloc_pages_exact.
1751 * @size: size of allocation, same value as passed to alloc_pages_exact().
1752 *
1753 * Release the memory allocated by a previous call to alloc_pages_exact.
1754 */
1755void free_pages_exact(void *virt, size_t size)
1756{
1757	unsigned long addr = (unsigned long)virt;
1758	unsigned long end = addr + PAGE_ALIGN(size);
1759
1760	while (addr < end) {
1761		free_page(addr);
1762		addr += PAGE_SIZE;
1763	}
1764}
1765EXPORT_SYMBOL(free_pages_exact);
1766
1767static unsigned int nr_free_zone_pages(int offset)
1768{
1769	struct zoneref *z;
1770	struct zone *zone;
1771
1772	/* Just pick one node, since fallback list is circular */
1773	unsigned int sum = 0;
1774
1775	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1776
1777	for_each_zone_zonelist(zone, z, zonelist, offset) {
1778		unsigned long size = zone->present_pages;
1779		unsigned long high = zone->pages_high;
1780		if (size > high)
1781			sum += size - high;
1782	}
1783
1784	return sum;
1785}
1786
1787/*
1788 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1789 */
1790unsigned int nr_free_buffer_pages(void)
1791{
1792	return nr_free_zone_pages(gfp_zone(GFP_USER));
1793}
1794EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1795
1796/*
1797 * Amount of free RAM allocatable within all zones
1798 */
1799unsigned int nr_free_pagecache_pages(void)
1800{
1801	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1802}
1803
1804static inline void show_node(struct zone *zone)
1805{
1806	if (NUMA_BUILD)
1807		printk("Node %d ", zone_to_nid(zone));
1808}
1809
1810void si_meminfo(struct sysinfo *val)
1811{
1812	val->totalram = totalram_pages;
1813	val->sharedram = 0;
1814	val->freeram = global_page_state(NR_FREE_PAGES);
1815	val->bufferram = nr_blockdev_pages();
1816	val->totalhigh = totalhigh_pages;
1817	val->freehigh = nr_free_highpages();
1818	val->mem_unit = PAGE_SIZE;
1819}
1820
1821EXPORT_SYMBOL(si_meminfo);
1822
1823#ifdef CONFIG_NUMA
1824void si_meminfo_node(struct sysinfo *val, int nid)
1825{
1826	pg_data_t *pgdat = NODE_DATA(nid);
1827
1828	val->totalram = pgdat->node_present_pages;
1829	val->freeram = node_page_state(nid, NR_FREE_PAGES);
1830#ifdef CONFIG_HIGHMEM
1831	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1832	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1833			NR_FREE_PAGES);
1834#else
1835	val->totalhigh = 0;
1836	val->freehigh = 0;
1837#endif
1838	val->mem_unit = PAGE_SIZE;
1839}
1840#endif
1841
1842#define K(x) ((x) << (PAGE_SHIFT-10))
1843
1844/*
1845 * Show free area list (used inside shift_scroll-lock stuff)
1846 * We also calculate the percentage fragmentation. We do this by counting the
1847 * memory on each free list with the exception of the first item on the list.
1848 */
1849void show_free_areas(void)
1850{
1851	int cpu;
1852	struct zone *zone;
1853
1854	for_each_zone(zone) {
1855		if (!populated_zone(zone))
1856			continue;
1857
1858		show_node(zone);
1859		printk("%s per-cpu:\n", zone->name);
1860
1861		for_each_online_cpu(cpu) {
1862			struct per_cpu_pageset *pageset;
1863
1864			pageset = zone_pcp(zone, cpu);
1865
1866			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1867			       cpu, pageset->pcp.high,
1868			       pageset->pcp.batch, pageset->pcp.count);
1869		}
1870	}
1871
1872	printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
1873		" inactive_file:%lu"
1874//TODO:  check/adjust line lengths
1875#ifdef CONFIG_UNEVICTABLE_LRU
1876		" unevictable:%lu"
1877#endif
1878		" dirty:%lu writeback:%lu unstable:%lu\n"
1879		" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1880		global_page_state(NR_ACTIVE_ANON),
1881		global_page_state(NR_ACTIVE_FILE),
1882		global_page_state(NR_INACTIVE_ANON),
1883		global_page_state(NR_INACTIVE_FILE),
1884#ifdef CONFIG_UNEVICTABLE_LRU
1885		global_page_state(NR_UNEVICTABLE),
1886#endif
1887		global_page_state(NR_FILE_DIRTY),
1888		global_page_state(NR_WRITEBACK),
1889		global_page_state(NR_UNSTABLE_NFS),
1890		global_page_state(NR_FREE_PAGES),
1891		global_page_state(NR_SLAB_RECLAIMABLE) +
1892			global_page_state(NR_SLAB_UNRECLAIMABLE),
1893		global_page_state(NR_FILE_MAPPED),
1894		global_page_state(NR_PAGETABLE),
1895		global_page_state(NR_BOUNCE));
1896
1897	for_each_zone(zone) {
1898		int i;
1899
1900		if (!populated_zone(zone))
1901			continue;
1902
1903		show_node(zone);
1904		printk("%s"
1905			" free:%lukB"
1906			" min:%lukB"
1907			" low:%lukB"
1908			" high:%lukB"
1909			" active_anon:%lukB"
1910			" inactive_anon:%lukB"
1911			" active_file:%lukB"
1912			" inactive_file:%lukB"
1913#ifdef CONFIG_UNEVICTABLE_LRU
1914			" unevictable:%lukB"
1915#endif
1916			" present:%lukB"
1917			" pages_scanned:%lu"
1918			" all_unreclaimable? %s"
1919			"\n",
1920			zone->name,
1921			K(zone_page_state(zone, NR_FREE_PAGES)),
1922			K(zone->pages_min),
1923			K(zone->pages_low),
1924			K(zone->pages_high),
1925			K(zone_page_state(zone, NR_ACTIVE_ANON)),
1926			K(zone_page_state(zone, NR_INACTIVE_ANON)),
1927			K(zone_page_state(zone, NR_ACTIVE_FILE)),
1928			K(zone_page_state(zone, NR_INACTIVE_FILE)),
1929#ifdef CONFIG_UNEVICTABLE_LRU
1930			K(zone_page_state(zone, NR_UNEVICTABLE)),
1931#endif
1932			K(zone->present_pages),
1933			zone->pages_scanned,
1934			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
1935			);
1936		printk("lowmem_reserve[]:");
1937		for (i = 0; i < MAX_NR_ZONES; i++)
1938			printk(" %lu", zone->lowmem_reserve[i]);
1939		printk("\n");
1940	}
1941
1942	for_each_zone(zone) {
1943 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
1944
1945		if (!populated_zone(zone))
1946			continue;
1947
1948		show_node(zone);
1949		printk("%s: ", zone->name);
1950
1951		spin_lock_irqsave(&zone->lock, flags);
1952		for (order = 0; order < MAX_ORDER; order++) {
1953			nr[order] = zone->free_area[order].nr_free;
1954			total += nr[order] << order;
1955		}
1956		spin_unlock_irqrestore(&zone->lock, flags);
1957		for (order = 0; order < MAX_ORDER; order++)
1958			printk("%lu*%lukB ", nr[order], K(1UL) << order);
1959		printk("= %lukB\n", K(total));
1960	}
1961
1962	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1963
1964	show_swap_cache_info();
1965}
1966
1967static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
1968{
1969	zoneref->zone = zone;
1970	zoneref->zone_idx = zone_idx(zone);
1971}
1972
1973/*
1974 * Builds allocation fallback zone lists.
1975 *
1976 * Add all populated zones of a node to the zonelist.
1977 */
1978static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1979				int nr_zones, enum zone_type zone_type)
1980{
1981	struct zone *zone;
1982
1983	BUG_ON(zone_type >= MAX_NR_ZONES);
1984	zone_type++;
1985
1986	do {
1987		zone_type--;
1988		zone = pgdat->node_zones + zone_type;
1989		if (populated_zone(zone)) {
1990			zoneref_set_zone(zone,
1991				&zonelist->_zonerefs[nr_zones++]);
1992			check_highest_zone(zone_type);
1993		}
1994
1995	} while (zone_type);
1996	return nr_zones;
1997}
1998
1999
2000/*
2001 *  zonelist_order:
2002 *  0 = automatic detection of better ordering.
2003 *  1 = order by ([node] distance, -zonetype)
2004 *  2 = order by (-zonetype, [node] distance)
2005 *
2006 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2007 *  the same zonelist. So only NUMA can configure this param.
2008 */
2009#define ZONELIST_ORDER_DEFAULT  0
2010#define ZONELIST_ORDER_NODE     1
2011#define ZONELIST_ORDER_ZONE     2
2012
2013/* zonelist order in the kernel.
2014 * set_zonelist_order() will set this to NODE or ZONE.
2015 */
2016static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2017static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2018
2019
2020#ifdef CONFIG_NUMA
2021/* The value user specified ....changed by config */
2022static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2023/* string for sysctl */
2024#define NUMA_ZONELIST_ORDER_LEN	16
2025char numa_zonelist_order[16] = "default";
2026
2027/*
2028 * interface for configure zonelist ordering.
2029 * command line option "numa_zonelist_order"
2030 *	= "[dD]efault	- default, automatic configuration.
2031 *	= "[nN]ode 	- order by node locality, then by zone within node
2032 *	= "[zZ]one      - order by zone, then by locality within zone
2033 */
2034
2035static int __parse_numa_zonelist_order(char *s)
2036{
2037	if (*s == 'd' || *s == 'D') {
2038		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2039	} else if (*s == 'n' || *s == 'N') {
2040		user_zonelist_order = ZONELIST_ORDER_NODE;
2041	} else if (*s == 'z' || *s == 'Z') {
2042		user_zonelist_order = ZONELIST_ORDER_ZONE;
2043	} else {
2044		printk(KERN_WARNING
2045			"Ignoring invalid numa_zonelist_order value:  "
2046			"%s\n", s);
2047		return -EINVAL;
2048	}
2049	return 0;
2050}
2051
2052static __init int setup_numa_zonelist_order(char *s)
2053{
2054	if (s)
2055		return __parse_numa_zonelist_order(s);
2056	return 0;
2057}
2058early_param("numa_zonelist_order", setup_numa_zonelist_order);
2059
2060/*
2061 * sysctl handler for numa_zonelist_order
2062 */
2063int numa_zonelist_order_handler(ctl_table *table, int write,
2064		struct file *file, void __user *buffer, size_t *length,
2065		loff_t *ppos)
2066{
2067	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2068	int ret;
2069
2070	if (write)
2071		strncpy(saved_string, (char*)table->data,
2072			NUMA_ZONELIST_ORDER_LEN);
2073	ret = proc_dostring(table, write, file, buffer, length, ppos);
2074	if (ret)
2075		return ret;
2076	if (write) {
2077		int oldval = user_zonelist_order;
2078		if (__parse_numa_zonelist_order((char*)table->data)) {
2079			/*
2080			 * bogus value.  restore saved string
2081			 */
2082			strncpy((char*)table->data, saved_string,
2083				NUMA_ZONELIST_ORDER_LEN);
2084			user_zonelist_order = oldval;
2085		} else if (oldval != user_zonelist_order)
2086			build_all_zonelists();
2087	}
2088	return 0;
2089}
2090
2091
2092#define MAX_NODE_LOAD (num_online_nodes())
2093static int node_load[MAX_NUMNODES];
2094
2095/**
2096 * find_next_best_node - find the next node that should appear in a given node's fallback list
2097 * @node: node whose fallback list we're appending
2098 * @used_node_mask: nodemask_t of already used nodes
2099 *
2100 * We use a number of factors to determine which is the next node that should
2101 * appear on a given node's fallback list.  The node should not have appeared
2102 * already in @node's fallback list, and it should be the next closest node
2103 * according to the distance array (which contains arbitrary distance values
2104 * from each node to each node in the system), and should also prefer nodes
2105 * with no CPUs, since presumably they'll have very little allocation pressure
2106 * on them otherwise.
2107 * It returns -1 if no node is found.
2108 */
2109static int find_next_best_node(int node, nodemask_t *used_node_mask)
2110{
2111	int n, val;
2112	int min_val = INT_MAX;
2113	int best_node = -1;
2114	node_to_cpumask_ptr(tmp, 0);
2115
2116	/* Use the local node if we haven't already */
2117	if (!node_isset(node, *used_node_mask)) {
2118		node_set(node, *used_node_mask);
2119		return node;
2120	}
2121
2122	for_each_node_state(n, N_HIGH_MEMORY) {
2123
2124		/* Don't want a node to appear more than once */
2125		if (node_isset(n, *used_node_mask))
2126			continue;
2127
2128		/* Use the distance array to find the distance */
2129		val = node_distance(node, n);
2130
2131		/* Penalize nodes under us ("prefer the next node") */
2132		val += (n < node);
2133
2134		/* Give preference to headless and unused nodes */
2135		node_to_cpumask_ptr_next(tmp, n);
2136		if (!cpus_empty(*tmp))
2137			val += PENALTY_FOR_NODE_WITH_CPUS;
2138
2139		/* Slight preference for less loaded node */
2140		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2141		val += node_load[n];
2142
2143		if (val < min_val) {
2144			min_val = val;
2145			best_node = n;
2146		}
2147	}
2148
2149	if (best_node >= 0)
2150		node_set(best_node, *used_node_mask);
2151
2152	return best_node;
2153}
2154
2155
2156/*
2157 * Build zonelists ordered by node and zones within node.
2158 * This results in maximum locality--normal zone overflows into local
2159 * DMA zone, if any--but risks exhausting DMA zone.
2160 */
2161static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2162{
2163	int j;
2164	struct zonelist *zonelist;
2165
2166	zonelist = &pgdat->node_zonelists[0];
2167	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2168		;
2169	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2170							MAX_NR_ZONES - 1);
2171	zonelist->_zonerefs[j].zone = NULL;
2172	zonelist->_zonerefs[j].zone_idx = 0;
2173}
2174
2175/*
2176 * Build gfp_thisnode zonelists
2177 */
2178static void build_thisnode_zonelists(pg_data_t *pgdat)
2179{
2180	int j;
2181	struct zonelist *zonelist;
2182
2183	zonelist = &pgdat->node_zonelists[1];
2184	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2185	zonelist->_zonerefs[j].zone = NULL;
2186	zonelist->_zonerefs[j].zone_idx = 0;
2187}
2188
2189/*
2190 * Build zonelists ordered by zone and nodes within zones.
2191 * This results in conserving DMA zone[s] until all Normal memory is
2192 * exhausted, but results in overflowing to remote node while memory
2193 * may still exist in local DMA zone.
2194 */
2195static int node_order[MAX_NUMNODES];
2196
2197static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2198{
2199	int pos, j, node;
2200	int zone_type;		/* needs to be signed */
2201	struct zone *z;
2202	struct zonelist *zonelist;
2203
2204	zonelist = &pgdat->node_zonelists[0];
2205	pos = 0;
2206	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2207		for (j = 0; j < nr_nodes; j++) {
2208			node = node_order[j];
2209			z = &NODE_DATA(node)->node_zones[zone_type];
2210			if (populated_zone(z)) {
2211				zoneref_set_zone(z,
2212					&zonelist->_zonerefs[pos++]);
2213				check_highest_zone(zone_type);
2214			}
2215		}
2216	}
2217	zonelist->_zonerefs[pos].zone = NULL;
2218	zonelist->_zonerefs[pos].zone_idx = 0;
2219}
2220
2221static int default_zonelist_order(void)
2222{
2223	int nid, zone_type;
2224	unsigned long low_kmem_size,total_size;
2225	struct zone *z;
2226	int average_size;
2227	/*
2228         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2229	 * If they are really small and used heavily, the system can fall
2230	 * into OOM very easily.
2231	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2232	 */
2233	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2234	low_kmem_size = 0;
2235	total_size = 0;
2236	for_each_online_node(nid) {
2237		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2238			z = &NODE_DATA(nid)->node_zones[zone_type];
2239			if (populated_zone(z)) {
2240				if (zone_type < ZONE_NORMAL)
2241					low_kmem_size += z->present_pages;
2242				total_size += z->present_pages;
2243			}
2244		}
2245	}
2246	if (!low_kmem_size ||  /* there are no DMA area. */
2247	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2248		return ZONELIST_ORDER_NODE;
2249	/*
2250	 * look into each node's config.
2251  	 * If there is a node whose DMA/DMA32 memory is very big area on
2252 	 * local memory, NODE_ORDER may be suitable.
2253         */
2254	average_size = total_size /
2255				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2256	for_each_online_node(nid) {
2257		low_kmem_size = 0;
2258		total_size = 0;
2259		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2260			z = &NODE_DATA(nid)->node_zones[zone_type];
2261			if (populated_zone(z)) {
2262				if (zone_type < ZONE_NORMAL)
2263					low_kmem_size += z->present_pages;
2264				total_size += z->present_pages;
2265			}
2266		}
2267		if (low_kmem_size &&
2268		    total_size > average_size && /* ignore small node */
2269		    low_kmem_size > total_size * 70/100)
2270			return ZONELIST_ORDER_NODE;
2271	}
2272	return ZONELIST_ORDER_ZONE;
2273}
2274
2275static void set_zonelist_order(void)
2276{
2277	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2278		current_zonelist_order = default_zonelist_order();
2279	else
2280		current_zonelist_order = user_zonelist_order;
2281}
2282
2283static void build_zonelists(pg_data_t *pgdat)
2284{
2285	int j, node, load;
2286	enum zone_type i;
2287	nodemask_t used_mask;
2288	int local_node, prev_node;
2289	struct zonelist *zonelist;
2290	int order = current_zonelist_order;
2291
2292	/* initialize zonelists */
2293	for (i = 0; i < MAX_ZONELISTS; i++) {
2294		zonelist = pgdat->node_zonelists + i;
2295		zonelist->_zonerefs[0].zone = NULL;
2296		zonelist->_zonerefs[0].zone_idx = 0;
2297	}
2298
2299	/* NUMA-aware ordering of nodes */
2300	local_node = pgdat->node_id;
2301	load = num_online_nodes();
2302	prev_node = local_node;
2303	nodes_clear(used_mask);
2304
2305	memset(node_load, 0, sizeof(node_load));
2306	memset(node_order, 0, sizeof(node_order));
2307	j = 0;
2308
2309	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2310		int distance = node_distance(local_node, node);
2311
2312		/*
2313		 * If another node is sufficiently far away then it is better
2314		 * to reclaim pages in a zone before going off node.
2315		 */
2316		if (distance > RECLAIM_DISTANCE)
2317			zone_reclaim_mode = 1;
2318
2319		/*
2320		 * We don't want to pressure a particular node.
2321		 * So adding penalty to the first node in same
2322		 * distance group to make it round-robin.
2323		 */
2324		if (distance != node_distance(local_node, prev_node))
2325			node_load[node] = load;
2326
2327		prev_node = node;
2328		load--;
2329		if (order == ZONELIST_ORDER_NODE)
2330			build_zonelists_in_node_order(pgdat, node);
2331		else
2332			node_order[j++] = node;	/* remember order */
2333	}
2334
2335	if (order == ZONELIST_ORDER_ZONE) {
2336		/* calculate node order -- i.e., DMA last! */
2337		build_zonelists_in_zone_order(pgdat, j);
2338	}
2339
2340	build_thisnode_zonelists(pgdat);
2341}
2342
2343/* Construct the zonelist performance cache - see further mmzone.h */
2344static void build_zonelist_cache(pg_data_t *pgdat)
2345{
2346	struct zonelist *zonelist;
2347	struct zonelist_cache *zlc;
2348	struct zoneref *z;
2349
2350	zonelist = &pgdat->node_zonelists[0];
2351	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2352	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2353	for (z = zonelist->_zonerefs; z->zone; z++)
2354		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2355}
2356
2357
2358#else	/* CONFIG_NUMA */
2359
2360static void set_zonelist_order(void)
2361{
2362	current_zonelist_order = ZONELIST_ORDER_ZONE;
2363}
2364
2365static void build_zonelists(pg_data_t *pgdat)
2366{
2367	int node, local_node;
2368	enum zone_type j;
2369	struct zonelist *zonelist;
2370
2371	local_node = pgdat->node_id;
2372
2373	zonelist = &pgdat->node_zonelists[0];
2374	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2375
2376	/*
2377	 * Now we build the zonelist so that it contains the zones
2378	 * of all the other nodes.
2379	 * We don't want to pressure a particular node, so when
2380	 * building the zones for node N, we make sure that the
2381	 * zones coming right after the local ones are those from
2382	 * node N+1 (modulo N)
2383	 */
2384	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2385		if (!node_online(node))
2386			continue;
2387		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2388							MAX_NR_ZONES - 1);
2389	}
2390	for (node = 0; node < local_node; node++) {
2391		if (!node_online(node))
2392			continue;
2393		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2394							MAX_NR_ZONES - 1);
2395	}
2396
2397	zonelist->_zonerefs[j].zone = NULL;
2398	zonelist->_zonerefs[j].zone_idx = 0;
2399}
2400
2401/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2402static void build_zonelist_cache(pg_data_t *pgdat)
2403{
2404	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2405}
2406
2407#endif	/* CONFIG_NUMA */
2408
2409/* return values int ....just for stop_machine() */
2410static int __build_all_zonelists(void *dummy)
2411{
2412	int nid;
2413
2414	for_each_online_node(nid) {
2415		pg_data_t *pgdat = NODE_DATA(nid);
2416
2417		build_zonelists(pgdat);
2418		build_zonelist_cache(pgdat);
2419	}
2420	return 0;
2421}
2422
2423void build_all_zonelists(void)
2424{
2425	set_zonelist_order();
2426
2427	if (system_state == SYSTEM_BOOTING) {
2428		__build_all_zonelists(NULL);
2429		mminit_verify_zonelist();
2430		cpuset_init_current_mems_allowed();
2431	} else {
2432		/* we have to stop all cpus to guarantee there is no user
2433		   of zonelist */
2434		stop_machine(__build_all_zonelists, NULL, NULL);
2435		/* cpuset refresh routine should be here */
2436	}
2437	vm_total_pages = nr_free_pagecache_pages();
2438	/*
2439	 * Disable grouping by mobility if the number of pages in the
2440	 * system is too low to allow the mechanism to work. It would be
2441	 * more accurate, but expensive to check per-zone. This check is
2442	 * made on memory-hotadd so a system can start with mobility
2443	 * disabled and enable it later
2444	 */
2445	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2446		page_group_by_mobility_disabled = 1;
2447	else
2448		page_group_by_mobility_disabled = 0;
2449
2450	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2451		"Total pages: %ld\n",
2452			num_online_nodes(),
2453			zonelist_order_name[current_zonelist_order],
2454			page_group_by_mobility_disabled ? "off" : "on",
2455			vm_total_pages);
2456#ifdef CONFIG_NUMA
2457	printk("Policy zone: %s\n", zone_names[policy_zone]);
2458#endif
2459}
2460
2461/*
2462 * Helper functions to size the waitqueue hash table.
2463 * Essentially these want to choose hash table sizes sufficiently
2464 * large so that collisions trying to wait on pages are rare.
2465 * But in fact, the number of active page waitqueues on typical
2466 * systems is ridiculously low, less than 200. So this is even
2467 * conservative, even though it seems large.
2468 *
2469 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2470 * waitqueues, i.e. the size of the waitq table given the number of pages.
2471 */
2472#define PAGES_PER_WAITQUEUE	256
2473
2474#ifndef CONFIG_MEMORY_HOTPLUG
2475static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2476{
2477	unsigned long size = 1;
2478
2479	pages /= PAGES_PER_WAITQUEUE;
2480
2481	while (size < pages)
2482		size <<= 1;
2483
2484	/*
2485	 * Once we have dozens or even hundreds of threads sleeping
2486	 * on IO we've got bigger problems than wait queue collision.
2487	 * Limit the size of the wait table to a reasonable size.
2488	 */
2489	size = min(size, 4096UL);
2490
2491	return max(size, 4UL);
2492}
2493#else
2494/*
2495 * A zone's size might be changed by hot-add, so it is not possible to determine
2496 * a suitable size for its wait_table.  So we use the maximum size now.
2497 *
2498 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2499 *
2500 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2501 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2502 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2503 *
2504 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2505 * or more by the traditional way. (See above).  It equals:
2506 *
2507 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2508 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2509 *    powerpc (64K page size)             : =  (32G +16M)byte.
2510 */
2511static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2512{
2513	return 4096UL;
2514}
2515#endif
2516
2517/*
2518 * This is an integer logarithm so that shifts can be used later
2519 * to extract the more random high bits from the multiplicative
2520 * hash function before the remainder is taken.
2521 */
2522static inline unsigned long wait_table_bits(unsigned long size)
2523{
2524	return ffz(~size);
2525}
2526
2527#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2528
2529/*
2530 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2531 * of blocks reserved is based on zone->pages_min. The memory within the
2532 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2533 * higher will lead to a bigger reserve which will get freed as contiguous
2534 * blocks as reclaim kicks in
2535 */
2536static void setup_zone_migrate_reserve(struct zone *zone)
2537{
2538	unsigned long start_pfn, pfn, end_pfn;
2539	struct page *page;
2540	unsigned long reserve, block_migratetype;
2541
2542	/* Get the start pfn, end pfn and the number of blocks to reserve */
2543	start_pfn = zone->zone_start_pfn;
2544	end_pfn = start_pfn + zone->spanned_pages;
2545	reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2546							pageblock_order;
2547
2548	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2549		if (!pfn_valid(pfn))
2550			continue;
2551		page = pfn_to_page(pfn);
2552
2553		/* Watch out for overlapping nodes */
2554		if (page_to_nid(page) != zone_to_nid(zone))
2555			continue;
2556
2557		/* Blocks with reserved pages will never free, skip them. */
2558		if (PageReserved(page))
2559			continue;
2560
2561		block_migratetype = get_pageblock_migratetype(page);
2562
2563		/* If this block is reserved, account for it */
2564		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2565			reserve--;
2566			continue;
2567		}
2568
2569		/* Suitable for reserving if this block is movable */
2570		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2571			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2572			move_freepages_block(zone, page, MIGRATE_RESERVE);
2573			reserve--;
2574			continue;
2575		}
2576
2577		/*
2578		 * If the reserve is met and this is a previous reserved block,
2579		 * take it back
2580		 */
2581		if (block_migratetype == MIGRATE_RESERVE) {
2582			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2583			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2584		}
2585	}
2586}
2587
2588/*
2589 * Initially all pages are reserved - free ones are freed
2590 * up by free_all_bootmem() once the early boot process is
2591 * done. Non-atomic initialization, single-pass.
2592 */
2593void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2594		unsigned long start_pfn, enum memmap_context context)
2595{
2596	struct page *page;
2597	unsigned long end_pfn = start_pfn + size;
2598	unsigned long pfn;
2599	struct zone *z;
2600
2601	z = &NODE_DATA(nid)->node_zones[zone];
2602	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2603		/*
2604		 * There can be holes in boot-time mem_map[]s
2605		 * handed to this function.  They do not
2606		 * exist on hotplugged memory.
2607		 */
2608		if (context == MEMMAP_EARLY) {
2609			if (!early_pfn_valid(pfn))
2610				continue;
2611			if (!early_pfn_in_nid(pfn, nid))
2612				continue;
2613		}
2614		page = pfn_to_page(pfn);
2615		set_page_links(page, zone, nid, pfn);
2616		mminit_verify_page_links(page, zone, nid, pfn);
2617		init_page_count(page);
2618		reset_page_mapcount(page);
2619		SetPageReserved(page);
2620		/*
2621		 * Mark the block movable so that blocks are reserved for
2622		 * movable at startup. This will force kernel allocations
2623		 * to reserve their blocks rather than leaking throughout
2624		 * the address space during boot when many long-lived
2625		 * kernel allocations are made. Later some blocks near
2626		 * the start are marked MIGRATE_RESERVE by
2627		 * setup_zone_migrate_reserve()
2628		 *
2629		 * bitmap is created for zone's valid pfn range. but memmap
2630		 * can be created for invalid pages (for alignment)
2631		 * check here not to call set_pageblock_migratetype() against
2632		 * pfn out of zone.
2633		 */
2634		if ((z->zone_start_pfn <= pfn)
2635		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2636		    && !(pfn & (pageblock_nr_pages - 1)))
2637			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2638
2639		INIT_LIST_HEAD(&page->lru);
2640#ifdef WANT_PAGE_VIRTUAL
2641		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2642		if (!is_highmem_idx(zone))
2643			set_page_address(page, __va(pfn << PAGE_SHIFT));
2644#endif
2645	}
2646}
2647
2648static void __meminit zone_init_free_lists(struct zone *zone)
2649{
2650	int order, t;
2651	for_each_migratetype_order(order, t) {
2652		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2653		zone->free_area[order].nr_free = 0;
2654	}
2655}
2656
2657#ifndef __HAVE_ARCH_MEMMAP_INIT
2658#define memmap_init(size, nid, zone, start_pfn) \
2659	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2660#endif
2661
2662static int zone_batchsize(struct zone *zone)
2663{
2664	int batch;
2665
2666	/*
2667	 * The per-cpu-pages pools are set to around 1000th of the
2668	 * size of the zone.  But no more than 1/2 of a meg.
2669	 *
2670	 * OK, so we don't know how big the cache is.  So guess.
2671	 */
2672	batch = zone->present_pages / 1024;
2673	if (batch * PAGE_SIZE > 512 * 1024)
2674		batch = (512 * 1024) / PAGE_SIZE;
2675	batch /= 4;		/* We effectively *= 4 below */
2676	if (batch < 1)
2677		batch = 1;
2678
2679	/*
2680	 * Clamp the batch to a 2^n - 1 value. Having a power
2681	 * of 2 value was found to be more likely to have
2682	 * suboptimal cache aliasing properties in some cases.
2683	 *
2684	 * For example if 2 tasks are alternately allocating
2685	 * batches of pages, one task can end up with a lot
2686	 * of pages of one half of the possible page colors
2687	 * and the other with pages of the other colors.
2688	 */
2689	batch = (1 << (fls(batch + batch/2)-1)) - 1;
2690
2691	return batch;
2692}
2693
2694static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2695{
2696	struct per_cpu_pages *pcp;
2697
2698	memset(p, 0, sizeof(*p));
2699
2700	pcp = &p->pcp;
2701	pcp->count = 0;
2702	pcp->high = 6 * batch;
2703	pcp->batch = max(1UL, 1 * batch);
2704	INIT_LIST_HEAD(&pcp->list);
2705}
2706
2707/*
2708 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2709 * to the value high for the pageset p.
2710 */
2711
2712static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2713				unsigned long high)
2714{
2715	struct per_cpu_pages *pcp;
2716
2717	pcp = &p->pcp;
2718	pcp->high = high;
2719	pcp->batch = max(1UL, high/4);
2720	if ((high/4) > (PAGE_SHIFT * 8))
2721		pcp->batch = PAGE_SHIFT * 8;
2722}
2723
2724
2725#ifdef CONFIG_NUMA
2726/*
2727 * Boot pageset table. One per cpu which is going to be used for all
2728 * zones and all nodes. The parameters will be set in such a way
2729 * that an item put on a list will immediately be handed over to
2730 * the buddy list. This is safe since pageset manipulation is done
2731 * with interrupts disabled.
2732 *
2733 * Some NUMA counter updates may also be caught by the boot pagesets.
2734 *
2735 * The boot_pagesets must be kept even after bootup is complete for
2736 * unused processors and/or zones. They do play a role for bootstrapping
2737 * hotplugged processors.
2738 *
2739 * zoneinfo_show() and maybe other functions do
2740 * not check if the processor is online before following the pageset pointer.
2741 * Other parts of the kernel may not check if the zone is available.
2742 */
2743static struct per_cpu_pageset boot_pageset[NR_CPUS];
2744
2745/*
2746 * Dynamically allocate memory for the
2747 * per cpu pageset array in struct zone.
2748 */
2749static int __cpuinit process_zones(int cpu)
2750{
2751	struct zone *zone, *dzone;
2752	int node = cpu_to_node(cpu);
2753
2754	node_set_state(node, N_CPU);	/* this node has a cpu */
2755
2756	for_each_zone(zone) {
2757
2758		if (!populated_zone(zone))
2759			continue;
2760
2761		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2762					 GFP_KERNEL, node);
2763		if (!zone_pcp(zone, cpu))
2764			goto bad;
2765
2766		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2767
2768		if (percpu_pagelist_fraction)
2769			setup_pagelist_highmark(zone_pcp(zone, cpu),
2770			 	(zone->present_pages / percpu_pagelist_fraction));
2771	}
2772
2773	return 0;
2774bad:
2775	for_each_zone(dzone) {
2776		if (!populated_zone(dzone))
2777			continue;
2778		if (dzone == zone)
2779			break;
2780		kfree(zone_pcp(dzone, cpu));
2781		zone_pcp(dzone, cpu) = NULL;
2782	}
2783	return -ENOMEM;
2784}
2785
2786static inline void free_zone_pagesets(int cpu)
2787{
2788	struct zone *zone;
2789
2790	for_each_zone(zone) {
2791		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2792
2793		/* Free per_cpu_pageset if it is slab allocated */
2794		if (pset != &boot_pageset[cpu])
2795			kfree(pset);
2796		zone_pcp(zone, cpu) = NULL;
2797	}
2798}
2799
2800static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2801		unsigned long action,
2802		void *hcpu)
2803{
2804	int cpu = (long)hcpu;
2805	int ret = NOTIFY_OK;
2806
2807	switch (action) {
2808	case CPU_UP_PREPARE:
2809	case CPU_UP_PREPARE_FROZEN:
2810		if (process_zones(cpu))
2811			ret = NOTIFY_BAD;
2812		break;
2813	case CPU_UP_CANCELED:
2814	case CPU_UP_CANCELED_FROZEN:
2815	case CPU_DEAD:
2816	case CPU_DEAD_FROZEN:
2817		free_zone_pagesets(cpu);
2818		break;
2819	default:
2820		break;
2821	}
2822	return ret;
2823}
2824
2825static struct notifier_block __cpuinitdata pageset_notifier =
2826	{ &pageset_cpuup_callback, NULL, 0 };
2827
2828void __init setup_per_cpu_pageset(void)
2829{
2830	int err;
2831
2832	/* Initialize per_cpu_pageset for cpu 0.
2833	 * A cpuup callback will do this for every cpu
2834	 * as it comes online
2835	 */
2836	err = process_zones(smp_processor_id());
2837	BUG_ON(err);
2838	register_cpu_notifier(&pageset_notifier);
2839}
2840
2841#endif
2842
2843static noinline __init_refok
2844int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2845{
2846	int i;
2847	struct pglist_data *pgdat = zone->zone_pgdat;
2848	size_t alloc_size;
2849
2850	/*
2851	 * The per-page waitqueue mechanism uses hashed waitqueues
2852	 * per zone.
2853	 */
2854	zone->wait_table_hash_nr_entries =
2855		 wait_table_hash_nr_entries(zone_size_pages);
2856	zone->wait_table_bits =
2857		wait_table_bits(zone->wait_table_hash_nr_entries);
2858	alloc_size = zone->wait_table_hash_nr_entries
2859					* sizeof(wait_queue_head_t);
2860
2861	if (!slab_is_available()) {
2862		zone->wait_table = (wait_queue_head_t *)
2863			alloc_bootmem_node(pgdat, alloc_size);
2864	} else {
2865		/*
2866		 * This case means that a zone whose size was 0 gets new memory
2867		 * via memory hot-add.
2868		 * But it may be the case that a new node was hot-added.  In
2869		 * this case vmalloc() will not be able to use this new node's
2870		 * memory - this wait_table must be initialized to use this new
2871		 * node itself as well.
2872		 * To use this new node's memory, further consideration will be
2873		 * necessary.
2874		 */
2875		zone->wait_table = vmalloc(alloc_size);
2876	}
2877	if (!zone->wait_table)
2878		return -ENOMEM;
2879
2880	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2881		init_waitqueue_head(zone->wait_table + i);
2882
2883	return 0;
2884}
2885
2886static __meminit void zone_pcp_init(struct zone *zone)
2887{
2888	int cpu;
2889	unsigned long batch = zone_batchsize(zone);
2890
2891	for (cpu = 0; cpu < NR_CPUS; cpu++) {
2892#ifdef CONFIG_NUMA
2893		/* Early boot. Slab allocator not functional yet */
2894		zone_pcp(zone, cpu) = &boot_pageset[cpu];
2895		setup_pageset(&boot_pageset[cpu],0);
2896#else
2897		setup_pageset(zone_pcp(zone,cpu), batch);
2898#endif
2899	}
2900	if (zone->present_pages)
2901		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
2902			zone->name, zone->present_pages, batch);
2903}
2904
2905__meminit int init_currently_empty_zone(struct zone *zone,
2906					unsigned long zone_start_pfn,
2907					unsigned long size,
2908					enum memmap_context context)
2909{
2910	struct pglist_data *pgdat = zone->zone_pgdat;
2911	int ret;
2912	ret = zone_wait_table_init(zone, size);
2913	if (ret)
2914		return ret;
2915	pgdat->nr_zones = zone_idx(zone) + 1;
2916
2917	zone->zone_start_pfn = zone_start_pfn;
2918
2919	mminit_dprintk(MMINIT_TRACE, "memmap_init",
2920			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
2921			pgdat->node_id,
2922			(unsigned long)zone_idx(zone),
2923			zone_start_pfn, (zone_start_pfn + size));
2924
2925	zone_init_free_lists(zone);
2926
2927	return 0;
2928}
2929
2930#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2931/*
2932 * Basic iterator support. Return the first range of PFNs for a node
2933 * Note: nid == MAX_NUMNODES returns first region regardless of node
2934 */
2935static int __meminit first_active_region_index_in_nid(int nid)
2936{
2937	int i;
2938
2939	for (i = 0; i < nr_nodemap_entries; i++)
2940		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2941			return i;
2942
2943	return -1;
2944}
2945
2946/*
2947 * Basic iterator support. Return the next active range of PFNs for a node
2948 * Note: nid == MAX_NUMNODES returns next region regardless of node
2949 */
2950static int __meminit next_active_region_index_in_nid(int index, int nid)
2951{
2952	for (index = index + 1; index < nr_nodemap_entries; index++)
2953		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2954			return index;
2955
2956	return -1;
2957}
2958
2959#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2960/*
2961 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2962 * Architectures may implement their own version but if add_active_range()
2963 * was used and there are no special requirements, this is a convenient
2964 * alternative
2965 */
2966int __meminit early_pfn_to_nid(unsigned long pfn)
2967{
2968	int i;
2969
2970	for (i = 0; i < nr_nodemap_entries; i++) {
2971		unsigned long start_pfn = early_node_map[i].start_pfn;
2972		unsigned long end_pfn = early_node_map[i].end_pfn;
2973
2974		if (start_pfn <= pfn && pfn < end_pfn)
2975			return early_node_map[i].nid;
2976	}
2977
2978	return 0;
2979}
2980#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2981
2982/* Basic iterator support to walk early_node_map[] */
2983#define for_each_active_range_index_in_nid(i, nid) \
2984	for (i = first_active_region_index_in_nid(nid); i != -1; \
2985				i = next_active_region_index_in_nid(i, nid))
2986
2987/**
2988 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
2989 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2990 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
2991 *
2992 * If an architecture guarantees that all ranges registered with
2993 * add_active_ranges() contain no holes and may be freed, this
2994 * this function may be used instead of calling free_bootmem() manually.
2995 */
2996void __init free_bootmem_with_active_regions(int nid,
2997						unsigned long max_low_pfn)
2998{
2999	int i;
3000
3001	for_each_active_range_index_in_nid(i, nid) {
3002		unsigned long size_pages = 0;
3003		unsigned long end_pfn = early_node_map[i].end_pfn;
3004
3005		if (early_node_map[i].start_pfn >= max_low_pfn)
3006			continue;
3007
3008		if (end_pfn > max_low_pfn)
3009			end_pfn = max_low_pfn;
3010
3011		size_pages = end_pfn - early_node_map[i].start_pfn;
3012		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3013				PFN_PHYS(early_node_map[i].start_pfn),
3014				size_pages << PAGE_SHIFT);
3015	}
3016}
3017
3018void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3019{
3020	int i;
3021	int ret;
3022
3023	for_each_active_range_index_in_nid(i, nid) {
3024		ret = work_fn(early_node_map[i].start_pfn,
3025			      early_node_map[i].end_pfn, data);
3026		if (ret)
3027			break;
3028	}
3029}
3030/**
3031 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3032 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3033 *
3034 * If an architecture guarantees that all ranges registered with
3035 * add_active_ranges() contain no holes and may be freed, this
3036 * function may be used instead of calling memory_present() manually.
3037 */
3038void __init sparse_memory_present_with_active_regions(int nid)
3039{
3040	int i;
3041
3042	for_each_active_range_index_in_nid(i, nid)
3043		memory_present(early_node_map[i].nid,
3044				early_node_map[i].start_pfn,
3045				early_node_map[i].end_pfn);
3046}
3047
3048/**
3049 * push_node_boundaries - Push node boundaries to at least the requested boundary
3050 * @nid: The nid of the node to push the boundary for
3051 * @start_pfn: The start pfn of the node
3052 * @end_pfn: The end pfn of the node
3053 *
3054 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
3055 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
3056 * be hotplugged even though no physical memory exists. This function allows
3057 * an arch to push out the node boundaries so mem_map is allocated that can
3058 * be used later.
3059 */
3060#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3061void __init push_node_boundaries(unsigned int nid,
3062		unsigned long start_pfn, unsigned long end_pfn)
3063{
3064	mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3065			"Entering push_node_boundaries(%u, %lu, %lu)\n",
3066			nid, start_pfn, end_pfn);
3067
3068	/* Initialise the boundary for this node if necessary */
3069	if (node_boundary_end_pfn[nid] == 0)
3070		node_boundary_start_pfn[nid] = -1UL;
3071
3072	/* Update the boundaries */
3073	if (node_boundary_start_pfn[nid] > start_pfn)
3074		node_boundary_start_pfn[nid] = start_pfn;
3075	if (node_boundary_end_pfn[nid] < end_pfn)
3076		node_boundary_end_pfn[nid] = end_pfn;
3077}
3078
3079/* If necessary, push the node boundary out for reserve hotadd */
3080static void __meminit account_node_boundary(unsigned int nid,
3081		unsigned long *start_pfn, unsigned long *end_pfn)
3082{
3083	mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3084			"Entering account_node_boundary(%u, %lu, %lu)\n",
3085			nid, *start_pfn, *end_pfn);
3086
3087	/* Return if boundary information has not been provided */
3088	if (node_boundary_end_pfn[nid] == 0)
3089		return;
3090
3091	/* Check the boundaries and update if necessary */
3092	if (node_boundary_start_pfn[nid] < *start_pfn)
3093		*start_pfn = node_boundary_start_pfn[nid];
3094	if (node_boundary_end_pfn[nid] > *end_pfn)
3095		*end_pfn = node_boundary_end_pfn[nid];
3096}
3097#else
3098void __init push_node_boundaries(unsigned int nid,
3099		unsigned long start_pfn, unsigned long end_pfn) {}
3100
3101static void __meminit account_node_boundary(unsigned int nid,
3102		unsigned long *start_pfn, unsigned long *end_pfn) {}
3103#endif
3104
3105
3106/**
3107 * get_pfn_range_for_nid - Return the start and end page frames for a node
3108 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3109 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3110 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3111 *
3112 * It returns the start and end page frame of a node based on information
3113 * provided by an arch calling add_active_range(). If called for a node
3114 * with no available memory, a warning is printed and the start and end
3115 * PFNs will be 0.
3116 */
3117void __meminit get_pfn_range_for_nid(unsigned int nid,
3118			unsigned long *start_pfn, unsigned long *end_pfn)
3119{
3120	int i;
3121	*start_pfn = -1UL;
3122	*end_pfn = 0;
3123
3124	for_each_active_range_index_in_nid(i, nid) {
3125		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3126		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3127	}
3128
3129	if (*start_pfn == -1UL)
3130		*start_pfn = 0;
3131
3132	/* Push the node boundaries out if requested */
3133	account_node_boundary(nid, start_pfn, end_pfn);
3134}
3135
3136/*
3137 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3138 * assumption is made that zones within a node are ordered in monotonic
3139 * increasing memory addresses so that the "highest" populated zone is used
3140 */
3141static void __init find_usable_zone_for_movable(void)
3142{
3143	int zone_index;
3144	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3145		if (zone_index == ZONE_MOVABLE)
3146			continue;
3147
3148		if (arch_zone_highest_possible_pfn[zone_index] >
3149				arch_zone_lowest_possible_pfn[zone_index])
3150			break;
3151	}
3152
3153	VM_BUG_ON(zone_index == -1);
3154	movable_zone = zone_index;
3155}
3156
3157/*
3158 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3159 * because it is sized independant of architecture. Unlike the other zones,
3160 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3161 * in each node depending on the size of each node and how evenly kernelcore
3162 * is distributed. This helper function adjusts the zone ranges
3163 * provided by the architecture for a given node by using the end of the
3164 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3165 * zones within a node are in order of monotonic increases memory addresses
3166 */
3167static void __meminit adjust_zone_range_for_zone_movable(int nid,
3168					unsigned long zone_type,
3169					unsigned long node_start_pfn,
3170					unsigned long node_end_pfn,
3171					unsigned long *zone_start_pfn,
3172					unsigned long *zone_end_pfn)
3173{
3174	/* Only adjust if ZONE_MOVABLE is on this node */
3175	if (zone_movable_pfn[nid]) {
3176		/* Size ZONE_MOVABLE */
3177		if (zone_type == ZONE_MOVABLE) {
3178			*zone_start_pfn = zone_movable_pfn[nid];
3179			*zone_end_pfn = min(node_end_pfn,
3180				arch_zone_highest_possible_pfn[movable_zone]);
3181
3182		/* Adjust for ZONE_MOVABLE starting within this range */
3183		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3184				*zone_end_pfn > zone_movable_pfn[nid]) {
3185			*zone_end_pfn = zone_movable_pfn[nid];
3186
3187		/* Check if this whole range is within ZONE_MOVABLE */
3188		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3189			*zone_start_pfn = *zone_end_pfn;
3190	}
3191}
3192
3193/*
3194 * Return the number of pages a zone spans in a node, including holes
3195 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3196 */
3197static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3198					unsigned long zone_type,
3199					unsigned long *ignored)
3200{
3201	unsigned long node_start_pfn, node_end_pfn;
3202	unsigned long zone_start_pfn, zone_end_pfn;
3203
3204	/* Get the start and end of the node and zone */
3205	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3206	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3207	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3208	adjust_zone_range_for_zone_movable(nid, zone_type,
3209				node_start_pfn, node_end_pfn,
3210				&zone_start_pfn, &zone_end_pfn);
3211
3212	/* Check that this node has pages within the zone's required range */
3213	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3214		return 0;
3215
3216	/* Move the zone boundaries inside the node if necessary */
3217	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3218	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3219
3220	/* Return the spanned pages */
3221	return zone_end_pfn - zone_start_pfn;
3222}
3223
3224/*
3225 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3226 * then all holes in the requested range will be accounted for.
3227 */
3228static unsigned long __meminit __absent_pages_in_range(int nid,
3229				unsigned long range_start_pfn,
3230				unsigned long range_end_pfn)
3231{
3232	int i = 0;
3233	unsigned long prev_end_pfn = 0, hole_pages = 0;
3234	unsigned long start_pfn;
3235
3236	/* Find the end_pfn of the first active range of pfns in the node */
3237	i = first_active_region_index_in_nid(nid);
3238	if (i == -1)
3239		return 0;
3240
3241	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3242
3243	/* Account for ranges before physical memory on this node */
3244	if (early_node_map[i].start_pfn > range_start_pfn)
3245		hole_pages = prev_end_pfn - range_start_pfn;
3246
3247	/* Find all holes for the zone within the node */
3248	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3249
3250		/* No need to continue if prev_end_pfn is outside the zone */
3251		if (prev_end_pfn >= range_end_pfn)
3252			break;
3253
3254		/* Make sure the end of the zone is not within the hole */
3255		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3256		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3257
3258		/* Update the hole size cound and move on */
3259		if (start_pfn > range_start_pfn) {
3260			BUG_ON(prev_end_pfn > start_pfn);
3261			hole_pages += start_pfn - prev_end_pfn;
3262		}
3263		prev_end_pfn = early_node_map[i].end_pfn;
3264	}
3265
3266	/* Account for ranges past physical memory on this node */
3267	if (range_end_pfn > prev_end_pfn)
3268		hole_pages += range_end_pfn -
3269				max(range_start_pfn, prev_end_pfn);
3270
3271	return hole_pages;
3272}
3273
3274/**
3275 * absent_pages_in_range - Return number of page frames in holes within a range
3276 * @start_pfn: The start PFN to start searching for holes
3277 * @end_pfn: The end PFN to stop searching for holes
3278 *
3279 * It returns the number of pages frames in memory holes within a range.
3280 */
3281unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3282							unsigned long end_pfn)
3283{
3284	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3285}
3286
3287/* Return the number of page frames in holes in a zone on a node */
3288static unsigned long __meminit zone_absent_pages_in_node(int nid,
3289					unsigned long zone_type,
3290					unsigned long *ignored)
3291{
3292	unsigned long node_start_pfn, node_end_pfn;
3293	unsigned long zone_start_pfn, zone_end_pfn;
3294
3295	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3296	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3297							node_start_pfn);
3298	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3299							node_end_pfn);
3300
3301	adjust_zone_range_for_zone_movable(nid, zone_type,
3302			node_start_pfn, node_end_pfn,
3303			&zone_start_pfn, &zone_end_pfn);
3304	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3305}
3306
3307#else
3308static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3309					unsigned long zone_type,
3310					unsigned long *zones_size)
3311{
3312	return zones_size[zone_type];
3313}
3314
3315static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3316						unsigned long zone_type,
3317						unsigned long *zholes_size)
3318{
3319	if (!zholes_size)
3320		return 0;
3321
3322	return zholes_size[zone_type];
3323}
3324
3325#endif
3326
3327static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3328		unsigned long *zones_size, unsigned long *zholes_size)
3329{
3330	unsigned long realtotalpages, totalpages = 0;
3331	enum zone_type i;
3332
3333	for (i = 0; i < MAX_NR_ZONES; i++)
3334		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3335								zones_size);
3336	pgdat->node_spanned_pages = totalpages;
3337
3338	realtotalpages = totalpages;
3339	for (i = 0; i < MAX_NR_ZONES; i++)
3340		realtotalpages -=
3341			zone_absent_pages_in_node(pgdat->node_id, i,
3342								zholes_size);
3343	pgdat->node_present_pages = realtotalpages;
3344	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3345							realtotalpages);
3346}
3347
3348#ifndef CONFIG_SPARSEMEM
3349/*
3350 * Calculate the size of the zone->blockflags rounded to an unsigned long
3351 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3352 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3353 * round what is now in bits to nearest long in bits, then return it in
3354 * bytes.
3355 */
3356static unsigned long __init usemap_size(unsigned long zonesize)
3357{
3358	unsigned long usemapsize;
3359
3360	usemapsize = roundup(zonesize, pageblock_nr_pages);
3361	usemapsize = usemapsize >> pageblock_order;
3362	usemapsize *= NR_PAGEBLOCK_BITS;
3363	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3364
3365	return usemapsize / 8;
3366}
3367
3368static void __init setup_usemap(struct pglist_data *pgdat,
3369				struct zone *zone, unsigned long zonesize)
3370{
3371	unsigned long usemapsize = usemap_size(zonesize);
3372	zone->pageblock_flags = NULL;
3373	if (usemapsize) {
3374		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3375		memset(zone->pageblock_flags, 0, usemapsize);
3376	}
3377}
3378#else
3379static void inline setup_usemap(struct pglist_data *pgdat,
3380				struct zone *zone, unsigned long zonesize) {}
3381#endif /* CONFIG_SPARSEMEM */
3382
3383#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3384
3385/* Return a sensible default order for the pageblock size. */
3386static inline int pageblock_default_order(void)
3387{
3388	if (HPAGE_SHIFT > PAGE_SHIFT)
3389		return HUGETLB_PAGE_ORDER;
3390
3391	return MAX_ORDER-1;
3392}
3393
3394/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3395static inline void __init set_pageblock_order(unsigned int order)
3396{
3397	/* Check that pageblock_nr_pages has not already been setup */
3398	if (pageblock_order)
3399		return;
3400
3401	/*
3402	 * Assume the largest contiguous order of interest is a huge page.
3403	 * This value may be variable depending on boot parameters on IA64
3404	 */
3405	pageblock_order = order;
3406}
3407#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3408
3409/*
3410 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3411 * and pageblock_default_order() are unused as pageblock_order is set
3412 * at compile-time. See include/linux/pageblock-flags.h for the values of
3413 * pageblock_order based on the kernel config
3414 */
3415static inline int pageblock_default_order(unsigned int order)
3416{
3417	return MAX_ORDER-1;
3418}
3419#define set_pageblock_order(x)	do {} while (0)
3420
3421#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3422
3423/*
3424 * Set up the zone data structures:
3425 *   - mark all pages reserved
3426 *   - mark all memory queues empty
3427 *   - clear the memory bitmaps
3428 */
3429static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3430		unsigned long *zones_size, unsigned long *zholes_size)
3431{
3432	enum zone_type j;
3433	int nid = pgdat->node_id;
3434	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3435	int ret;
3436
3437	pgdat_resize_init(pgdat);
3438	pgdat->nr_zones = 0;
3439	init_waitqueue_head(&pgdat->kswapd_wait);
3440	pgdat->kswapd_max_order = 0;
3441
3442	for (j = 0; j < MAX_NR_ZONES; j++) {
3443		struct zone *zone = pgdat->node_zones + j;
3444		unsigned long size, realsize, memmap_pages;
3445		enum lru_list l;
3446
3447		size = zone_spanned_pages_in_node(nid, j, zones_size);
3448		realsize = size - zone_absent_pages_in_node(nid, j,
3449								zholes_size);
3450
3451		/*
3452		 * Adjust realsize so that it accounts for how much memory
3453		 * is used by this zone for memmap. This affects the watermark
3454		 * and per-cpu initialisations
3455		 */
3456		memmap_pages =
3457			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3458		if (realsize >= memmap_pages) {
3459			realsize -= memmap_pages;
3460			mminit_dprintk(MMINIT_TRACE, "memmap_init",
3461				"%s zone: %lu pages used for memmap\n",
3462				zone_names[j], memmap_pages);
3463		} else
3464			printk(KERN_WARNING
3465				"  %s zone: %lu pages exceeds realsize %lu\n",
3466				zone_names[j], memmap_pages, realsize);
3467
3468		/* Account for reserved pages */
3469		if (j == 0 && realsize > dma_reserve) {
3470			realsize -= dma_reserve;
3471			mminit_dprintk(MMINIT_TRACE, "memmap_init",
3472					"%s zone: %lu pages reserved\n",
3473					zone_names[0], dma_reserve);
3474		}
3475
3476		if (!is_highmem_idx(j))
3477			nr_kernel_pages += realsize;
3478		nr_all_pages += realsize;
3479
3480		zone->spanned_pages = size;
3481		zone->present_pages = realsize;
3482#ifdef CONFIG_NUMA
3483		zone->node = nid;
3484		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3485						/ 100;
3486		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3487#endif
3488		zone->name = zone_names[j];
3489		spin_lock_init(&zone->lock);
3490		spin_lock_init(&zone->lru_lock);
3491		zone_seqlock_init(zone);
3492		zone->zone_pgdat = pgdat;
3493
3494		zone->prev_priority = DEF_PRIORITY;
3495
3496		zone_pcp_init(zone);
3497		for_each_lru(l) {
3498			INIT_LIST_HEAD(&zone->lru[l].list);
3499			zone->lru[l].nr_scan = 0;
3500		}
3501		zone->recent_rotated[0] = 0;
3502		zone->recent_rotated[1] = 0;
3503		zone->recent_scanned[0] = 0;
3504		zone->recent_scanned[1] = 0;
3505		zap_zone_vm_stats(zone);
3506		zone->flags = 0;
3507		if (!size)
3508			continue;
3509
3510		set_pageblock_order(pageblock_default_order());
3511		setup_usemap(pgdat, zone, size);
3512		ret = init_currently_empty_zone(zone, zone_start_pfn,
3513						size, MEMMAP_EARLY);
3514		BUG_ON(ret);
3515		memmap_init(size, nid, j, zone_start_pfn);
3516		zone_start_pfn += size;
3517	}
3518}
3519
3520static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3521{
3522	/* Skip empty nodes */
3523	if (!pgdat->node_spanned_pages)
3524		return;
3525
3526#ifdef CONFIG_FLAT_NODE_MEM_MAP
3527	/* ia64 gets its own node_mem_map, before this, without bootmem */
3528	if (!pgdat->node_mem_map) {
3529		unsigned long size, start, end;
3530		struct page *map;
3531
3532		/*
3533		 * The zone's endpoints aren't required to be MAX_ORDER
3534		 * aligned but the node_mem_map endpoints must be in order
3535		 * for the buddy allocator to function correctly.
3536		 */
3537		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3538		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3539		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3540		size =  (end - start) * sizeof(struct page);
3541		map = alloc_remap(pgdat->node_id, size);
3542		if (!map)
3543			map = alloc_bootmem_node(pgdat, size);
3544		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3545	}
3546#ifndef CONFIG_NEED_MULTIPLE_NODES
3547	/*
3548	 * With no DISCONTIG, the global mem_map is just set as node 0's
3549	 */
3550	if (pgdat == NODE_DATA(0)) {
3551		mem_map = NODE_DATA(0)->node_mem_map;
3552#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3553		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3554			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3555#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3556	}
3557#endif
3558#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3559}
3560
3561void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3562		unsigned long node_start_pfn, unsigned long *zholes_size)
3563{
3564	pg_data_t *pgdat = NODE_DATA(nid);
3565
3566	pgdat->node_id = nid;
3567	pgdat->node_start_pfn = node_start_pfn;
3568	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3569
3570	alloc_node_mem_map(pgdat);
3571#ifdef CONFIG_FLAT_NODE_MEM_MAP
3572	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3573		nid, (unsigned long)pgdat,
3574		(unsigned long)pgdat->node_mem_map);
3575#endif
3576
3577	free_area_init_core(pgdat, zones_size, zholes_size);
3578}
3579
3580#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3581
3582#if MAX_NUMNODES > 1
3583/*
3584 * Figure out the number of possible node ids.
3585 */
3586static void __init setup_nr_node_ids(void)
3587{
3588	unsigned int node;
3589	unsigned int highest = 0;
3590
3591	for_each_node_mask(node, node_possible_map)
3592		highest = node;
3593	nr_node_ids = highest + 1;
3594}
3595#else
3596static inline void setup_nr_node_ids(void)
3597{
3598}
3599#endif
3600
3601/**
3602 * add_active_range - Register a range of PFNs backed by physical memory
3603 * @nid: The node ID the range resides on
3604 * @start_pfn: The start PFN of the available physical memory
3605 * @end_pfn: The end PFN of the available physical memory
3606 *
3607 * These ranges are stored in an early_node_map[] and later used by
3608 * free_area_init_nodes() to calculate zone sizes and holes. If the
3609 * range spans a memory hole, it is up to the architecture to ensure
3610 * the memory is not freed by the bootmem allocator. If possible
3611 * the range being registered will be merged with existing ranges.
3612 */
3613void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3614						unsigned long end_pfn)
3615{
3616	int i;
3617
3618	mminit_dprintk(MMINIT_TRACE, "memory_register",
3619			"Entering add_active_range(%d, %#lx, %#lx) "
3620			"%d entries of %d used\n",
3621			nid, start_pfn, end_pfn,
3622			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3623
3624	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3625
3626	/* Merge with existing active regions if possible */
3627	for (i = 0; i < nr_nodemap_entries; i++) {
3628		if (early_node_map[i].nid != nid)
3629			continue;
3630
3631		/* Skip if an existing region covers this new one */
3632		if (start_pfn >= early_node_map[i].start_pfn &&
3633				end_pfn <= early_node_map[i].end_pfn)
3634			return;
3635
3636		/* Merge forward if suitable */
3637		if (start_pfn <= early_node_map[i].end_pfn &&
3638				end_pfn > early_node_map[i].end_pfn) {
3639			early_node_map[i].end_pfn = end_pfn;
3640			return;
3641		}
3642
3643		/* Merge backward if suitable */
3644		if (start_pfn < early_node_map[i].end_pfn &&
3645				end_pfn >= early_node_map[i].start_pfn) {
3646			early_node_map[i].start_pfn = start_pfn;
3647			return;
3648		}
3649	}
3650
3651	/* Check that early_node_map is large enough */
3652	if (i >= MAX_ACTIVE_REGIONS) {
3653		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3654							MAX_ACTIVE_REGIONS);
3655		return;
3656	}
3657
3658	early_node_map[i].nid = nid;
3659	early_node_map[i].start_pfn = start_pfn;
3660	early_node_map[i].end_pfn = end_pfn;
3661	nr_nodemap_entries = i + 1;
3662}
3663
3664/**
3665 * remove_active_range - Shrink an existing registered range of PFNs
3666 * @nid: The node id the range is on that should be shrunk
3667 * @start_pfn: The new PFN of the range
3668 * @end_pfn: The new PFN of the range
3669 *
3670 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3671 * The map is kept near the end physical page range that has already been
3672 * registered. This function allows an arch to shrink an existing registered
3673 * range.
3674 */
3675void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3676				unsigned long end_pfn)
3677{
3678	int i, j;
3679	int removed = 0;
3680
3681	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3682			  nid, start_pfn, end_pfn);
3683
3684	/* Find the old active region end and shrink */
3685	for_each_active_range_index_in_nid(i, nid) {
3686		if (early_node_map[i].start_pfn >= start_pfn &&
3687		    early_node_map[i].end_pfn <= end_pfn) {
3688			/* clear it */
3689			early_node_map[i].start_pfn = 0;
3690			early_node_map[i].end_pfn = 0;
3691			removed = 1;
3692			continue;
3693		}
3694		if (early_node_map[i].start_pfn < start_pfn &&
3695		    early_node_map[i].end_pfn > start_pfn) {
3696			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3697			early_node_map[i].end_pfn = start_pfn;
3698			if (temp_end_pfn > end_pfn)
3699				add_active_range(nid, end_pfn, temp_end_pfn);
3700			continue;
3701		}
3702		if (early_node_map[i].start_pfn >= start_pfn &&
3703		    early_node_map[i].end_pfn > end_pfn &&
3704		    early_node_map[i].start_pfn < end_pfn) {
3705			early_node_map[i].start_pfn = end_pfn;
3706			continue;
3707		}
3708	}
3709
3710	if (!removed)
3711		return;
3712
3713	/* remove the blank ones */
3714	for (i = nr_nodemap_entries - 1; i > 0; i--) {
3715		if (early_node_map[i].nid != nid)
3716			continue;
3717		if (early_node_map[i].end_pfn)
3718			continue;
3719		/* we found it, get rid of it */
3720		for (j = i; j < nr_nodemap_entries - 1; j++)
3721			memcpy(&early_node_map[j], &early_node_map[j+1],
3722				sizeof(early_node_map[j]));
3723		j = nr_nodemap_entries - 1;
3724		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3725		nr_nodemap_entries--;
3726	}
3727}
3728
3729/**
3730 * remove_all_active_ranges - Remove all currently registered regions
3731 *
3732 * During discovery, it may be found that a table like SRAT is invalid
3733 * and an alternative discovery method must be used. This function removes
3734 * all currently registered regions.
3735 */
3736void __init remove_all_active_ranges(void)
3737{
3738	memset(early_node_map, 0, sizeof(early_node_map));
3739	nr_nodemap_entries = 0;
3740#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3741	memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3742	memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3743#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
3744}
3745
3746/* Compare two active node_active_regions */
3747static int __init cmp_node_active_region(const void *a, const void *b)
3748{
3749	struct node_active_region *arange = (struct node_active_region *)a;
3750	struct node_active_region *brange = (struct node_active_region *)b;
3751
3752	/* Done this way to avoid overflows */
3753	if (arange->start_pfn > brange->start_pfn)
3754		return 1;
3755	if (arange->start_pfn < brange->start_pfn)
3756		return -1;
3757
3758	return 0;
3759}
3760
3761/* sort the node_map by start_pfn */
3762static void __init sort_node_map(void)
3763{
3764	sort(early_node_map, (size_t)nr_nodemap_entries,
3765			sizeof(struct node_active_region),
3766			cmp_node_active_region, NULL);
3767}
3768
3769/* Find the lowest pfn for a node */
3770static unsigned long __init find_min_pfn_for_node(int nid)
3771{
3772	int i;
3773	unsigned long min_pfn = ULONG_MAX;
3774
3775	/* Assuming a sorted map, the first range found has the starting pfn */
3776	for_each_active_range_index_in_nid(i, nid)
3777		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3778
3779	if (min_pfn == ULONG_MAX) {
3780		printk(KERN_WARNING
3781			"Could not find start_pfn for node %d\n", nid);
3782		return 0;
3783	}
3784
3785	return min_pfn;
3786}
3787
3788/**
3789 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3790 *
3791 * It returns the minimum PFN based on information provided via
3792 * add_active_range().
3793 */
3794unsigned long __init find_min_pfn_with_active_regions(void)
3795{
3796	return find_min_pfn_for_node(MAX_NUMNODES);
3797}
3798
3799/*
3800 * early_calculate_totalpages()
3801 * Sum pages in active regions for movable zone.
3802 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3803 */
3804static unsigned long __init early_calculate_totalpages(void)
3805{
3806	int i;
3807	unsigned long totalpages = 0;
3808
3809	for (i = 0; i < nr_nodemap_entries; i++) {
3810		unsigned long pages = early_node_map[i].end_pfn -
3811						early_node_map[i].start_pfn;
3812		totalpages += pages;
3813		if (pages)
3814			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3815	}
3816  	return totalpages;
3817}
3818
3819/*
3820 * Find the PFN the Movable zone begins in each node. Kernel memory
3821 * is spread evenly between nodes as long as the nodes have enough
3822 * memory. When they don't, some nodes will have more kernelcore than
3823 * others
3824 */
3825static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3826{
3827	int i, nid;
3828	unsigned long usable_startpfn;
3829	unsigned long kernelcore_node, kernelcore_remaining;
3830	unsigned long totalpages = early_calculate_totalpages();
3831	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3832
3833	/*
3834	 * If movablecore was specified, calculate what size of
3835	 * kernelcore that corresponds so that memory usable for
3836	 * any allocation type is evenly spread. If both kernelcore
3837	 * and movablecore are specified, then the value of kernelcore
3838	 * will be used for required_kernelcore if it's greater than
3839	 * what movablecore would have allowed.
3840	 */
3841	if (required_movablecore) {
3842		unsigned long corepages;
3843
3844		/*
3845		 * Round-up so that ZONE_MOVABLE is at least as large as what
3846		 * was requested by the user
3847		 */
3848		required_movablecore =
3849			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3850		corepages = totalpages - required_movablecore;
3851
3852		required_kernelcore = max(required_kernelcore, corepages);
3853	}
3854
3855	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
3856	if (!required_kernelcore)
3857		return;
3858
3859	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3860	find_usable_zone_for_movable();
3861	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3862
3863restart:
3864	/* Spread kernelcore memory as evenly as possible throughout nodes */
3865	kernelcore_node = required_kernelcore / usable_nodes;
3866	for_each_node_state(nid, N_HIGH_MEMORY) {
3867		/*
3868		 * Recalculate kernelcore_node if the division per node
3869		 * now exceeds what is necessary to satisfy the requested
3870		 * amount of memory for the kernel
3871		 */
3872		if (required_kernelcore < kernelcore_node)
3873			kernelcore_node = required_kernelcore / usable_nodes;
3874
3875		/*
3876		 * As the map is walked, we track how much memory is usable
3877		 * by the kernel using kernelcore_remaining. When it is
3878		 * 0, the rest of the node is usable by ZONE_MOVABLE
3879		 */
3880		kernelcore_remaining = kernelcore_node;
3881
3882		/* Go through each range of PFNs within this node */
3883		for_each_active_range_index_in_nid(i, nid) {
3884			unsigned long start_pfn, end_pfn;
3885			unsigned long size_pages;
3886
3887			start_pfn = max(early_node_map[i].start_pfn,
3888						zone_movable_pfn[nid]);
3889			end_pfn = early_node_map[i].end_pfn;
3890			if (start_pfn >= end_pfn)
3891				continue;
3892
3893			/* Account for what is only usable for kernelcore */
3894			if (start_pfn < usable_startpfn) {
3895				unsigned long kernel_pages;
3896				kernel_pages = min(end_pfn, usable_startpfn)
3897								- start_pfn;
3898
3899				kernelcore_remaining -= min(kernel_pages,
3900							kernelcore_remaining);
3901				required_kernelcore -= min(kernel_pages,
3902							required_kernelcore);
3903
3904				/* Continue if range is now fully accounted */
3905				if (end_pfn <= usable_startpfn) {
3906
3907					/*
3908					 * Push zone_movable_pfn to the end so
3909					 * that if we have to rebalance
3910					 * kernelcore across nodes, we will
3911					 * not double account here
3912					 */
3913					zone_movable_pfn[nid] = end_pfn;
3914					continue;
3915				}
3916				start_pfn = usable_startpfn;
3917			}
3918
3919			/*
3920			 * The usable PFN range for ZONE_MOVABLE is from
3921			 * start_pfn->end_pfn. Calculate size_pages as the
3922			 * number of pages used as kernelcore
3923			 */
3924			size_pages = end_pfn - start_pfn;
3925			if (size_pages > kernelcore_remaining)
3926				size_pages = kernelcore_remaining;
3927			zone_movable_pfn[nid] = start_pfn + size_pages;
3928
3929			/*
3930			 * Some kernelcore has been met, update counts and
3931			 * break if the kernelcore for this node has been
3932			 * satisified
3933			 */
3934			required_kernelcore -= min(required_kernelcore,
3935								size_pages);
3936			kernelcore_remaining -= size_pages;
3937			if (!kernelcore_remaining)
3938				break;
3939		}
3940	}
3941
3942	/*
3943	 * If there is still required_kernelcore, we do another pass with one
3944	 * less node in the count. This will push zone_movable_pfn[nid] further
3945	 * along on the nodes that still have memory until kernelcore is
3946	 * satisified
3947	 */
3948	usable_nodes--;
3949	if (usable_nodes && required_kernelcore > usable_nodes)
3950		goto restart;
3951
3952	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3953	for (nid = 0; nid < MAX_NUMNODES; nid++)
3954		zone_movable_pfn[nid] =
3955			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3956}
3957
3958/* Any regular memory on that node ? */
3959static void check_for_regular_memory(pg_data_t *pgdat)
3960{
3961#ifdef CONFIG_HIGHMEM
3962	enum zone_type zone_type;
3963
3964	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3965		struct zone *zone = &pgdat->node_zones[zone_type];
3966		if (zone->present_pages)
3967			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3968	}
3969#endif
3970}
3971
3972/**
3973 * free_area_init_nodes - Initialise all pg_data_t and zone data
3974 * @max_zone_pfn: an array of max PFNs for each zone
3975 *
3976 * This will call free_area_init_node() for each active node in the system.
3977 * Using the page ranges provided by add_active_range(), the size of each
3978 * zone in each node and their holes is calculated. If the maximum PFN
3979 * between two adjacent zones match, it is assumed that the zone is empty.
3980 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3981 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3982 * starts where the previous one ended. For example, ZONE_DMA32 starts
3983 * at arch_max_dma_pfn.
3984 */
3985void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3986{
3987	unsigned long nid;
3988	int i;
3989
3990	/* Sort early_node_map as initialisation assumes it is sorted */
3991	sort_node_map();
3992
3993	/* Record where the zone boundaries are */
3994	memset(arch_zone_lowest_possible_pfn, 0,
3995				sizeof(arch_zone_lowest_possible_pfn));
3996	memset(arch_zone_highest_possible_pfn, 0,
3997				sizeof(arch_zone_highest_possible_pfn));
3998	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3999	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4000	for (i = 1; i < MAX_NR_ZONES; i++) {
4001		if (i == ZONE_MOVABLE)
4002			continue;
4003		arch_zone_lowest_possible_pfn[i] =
4004			arch_zone_highest_possible_pfn[i-1];
4005		arch_zone_highest_possible_pfn[i] =
4006			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4007	}
4008	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4009	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4010
4011	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4012	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4013	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4014
4015	/* Print out the zone ranges */
4016	printk("Zone PFN ranges:\n");
4017	for (i = 0; i < MAX_NR_ZONES; i++) {
4018		if (i == ZONE_MOVABLE)
4019			continue;
4020		printk("  %-8s %0#10lx -> %0#10lx\n",
4021				zone_names[i],
4022				arch_zone_lowest_possible_pfn[i],
4023				arch_zone_highest_possible_pfn[i]);
4024	}
4025
4026	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4027	printk("Movable zone start PFN for each node\n");
4028	for (i = 0; i < MAX_NUMNODES; i++) {
4029		if (zone_movable_pfn[i])
4030			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4031	}
4032
4033	/* Print out the early_node_map[] */
4034	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4035	for (i = 0; i < nr_nodemap_entries; i++)
4036		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4037						early_node_map[i].start_pfn,
4038						early_node_map[i].end_pfn);
4039
4040	/* Initialise every node */
4041	mminit_verify_pageflags_layout();
4042	setup_nr_node_ids();
4043	for_each_online_node(nid) {
4044		pg_data_t *pgdat = NODE_DATA(nid);
4045		free_area_init_node(nid, NULL,
4046				find_min_pfn_for_node(nid), NULL);
4047
4048		/* Any memory on that node */
4049		if (pgdat->node_present_pages)
4050			node_set_state(nid, N_HIGH_MEMORY);
4051		check_for_regular_memory(pgdat);
4052	}
4053}
4054
4055static int __init cmdline_parse_core(char *p, unsigned long *core)
4056{
4057	unsigned long long coremem;
4058	if (!p)
4059		return -EINVAL;
4060
4061	coremem = memparse(p, &p);
4062	*core = coremem >> PAGE_SHIFT;
4063
4064	/* Paranoid check that UL is enough for the coremem value */
4065	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4066
4067	return 0;
4068}
4069
4070/*
4071 * kernelcore=size sets the amount of memory for use for allocations that
4072 * cannot be reclaimed or migrated.
4073 */
4074static int __init cmdline_parse_kernelcore(char *p)
4075{
4076	return cmdline_parse_core(p, &required_kernelcore);
4077}
4078
4079/*
4080 * movablecore=size sets the amount of memory for use for allocations that
4081 * can be reclaimed or migrated.
4082 */
4083static int __init cmdline_parse_movablecore(char *p)
4084{
4085	return cmdline_parse_core(p, &required_movablecore);
4086}
4087
4088early_param("kernelcore", cmdline_parse_kernelcore);
4089early_param("movablecore", cmdline_parse_movablecore);
4090
4091#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4092
4093/**
4094 * set_dma_reserve - set the specified number of pages reserved in the first zone
4095 * @new_dma_reserve: The number of pages to mark reserved
4096 *
4097 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4098 * In the DMA zone, a significant percentage may be consumed by kernel image
4099 * and other unfreeable allocations which can skew the watermarks badly. This
4100 * function may optionally be used to account for unfreeable pages in the
4101 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4102 * smaller per-cpu batchsize.
4103 */
4104void __init set_dma_reserve(unsigned long new_dma_reserve)
4105{
4106	dma_reserve = new_dma_reserve;
4107}
4108
4109#ifndef CONFIG_NEED_MULTIPLE_NODES
4110struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4111EXPORT_SYMBOL(contig_page_data);
4112#endif
4113
4114void __init free_area_init(unsigned long *zones_size)
4115{
4116	free_area_init_node(0, zones_size,
4117			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4118}
4119
4120static int page_alloc_cpu_notify(struct notifier_block *self,
4121				 unsigned long action, void *hcpu)
4122{
4123	int cpu = (unsigned long)hcpu;
4124
4125	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4126		drain_pages(cpu);
4127
4128		/*
4129		 * Spill the event counters of the dead processor
4130		 * into the current processors event counters.
4131		 * This artificially elevates the count of the current
4132		 * processor.
4133		 */
4134		vm_events_fold_cpu(cpu);
4135
4136		/*
4137		 * Zero the differential counters of the dead processor
4138		 * so that the vm statistics are consistent.
4139		 *
4140		 * This is only okay since the processor is dead and cannot
4141		 * race with what we are doing.
4142		 */
4143		refresh_cpu_vm_stats(cpu);
4144	}
4145	return NOTIFY_OK;
4146}
4147
4148void __init page_alloc_init(void)
4149{
4150	hotcpu_notifier(page_alloc_cpu_notify, 0);
4151}
4152
4153/*
4154 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4155 *	or min_free_kbytes changes.
4156 */
4157static void calculate_totalreserve_pages(void)
4158{
4159	struct pglist_data *pgdat;
4160	unsigned long reserve_pages = 0;
4161	enum zone_type i, j;
4162
4163	for_each_online_pgdat(pgdat) {
4164		for (i = 0; i < MAX_NR_ZONES; i++) {
4165			struct zone *zone = pgdat->node_zones + i;
4166			unsigned long max = 0;
4167
4168			/* Find valid and maximum lowmem_reserve in the zone */
4169			for (j = i; j < MAX_NR_ZONES; j++) {
4170				if (zone->lowmem_reserve[j] > max)
4171					max = zone->lowmem_reserve[j];
4172			}
4173
4174			/* we treat pages_high as reserved pages. */
4175			max += zone->pages_high;
4176
4177			if (max > zone->present_pages)
4178				max = zone->present_pages;
4179			reserve_pages += max;
4180		}
4181	}
4182	totalreserve_pages = reserve_pages;
4183}
4184
4185/*
4186 * setup_per_zone_lowmem_reserve - called whenever
4187 *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4188 *	has a correct pages reserved value, so an adequate number of
4189 *	pages are left in the zone after a successful __alloc_pages().
4190 */
4191static void setup_per_zone_lowmem_reserve(void)
4192{
4193	struct pglist_data *pgdat;
4194	enum zone_type j, idx;
4195
4196	for_each_online_pgdat(pgdat) {
4197		for (j = 0; j < MAX_NR_ZONES; j++) {
4198			struct zone *zone = pgdat->node_zones + j;
4199			unsigned long present_pages = zone->present_pages;
4200
4201			zone->lowmem_reserve[j] = 0;
4202
4203			idx = j;
4204			while (idx) {
4205				struct zone *lower_zone;
4206
4207				idx--;
4208
4209				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4210					sysctl_lowmem_reserve_ratio[idx] = 1;
4211
4212				lower_zone = pgdat->node_zones + idx;
4213				lower_zone->lowmem_reserve[j] = present_pages /
4214					sysctl_lowmem_reserve_ratio[idx];
4215				present_pages += lower_zone->present_pages;
4216			}
4217		}
4218	}
4219
4220	/* update totalreserve_pages */
4221	calculate_totalreserve_pages();
4222}
4223
4224/**
4225 * setup_per_zone_pages_min - called when min_free_kbytes changes.
4226 *
4227 * Ensures that the pages_{min,low,high} values for each zone are set correctly
4228 * with respect to min_free_kbytes.
4229 */
4230void setup_per_zone_pages_min(void)
4231{
4232	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4233	unsigned long lowmem_pages = 0;
4234	struct zone *zone;
4235	unsigned long flags;
4236
4237	/* Calculate total number of !ZONE_HIGHMEM pages */
4238	for_each_zone(zone) {
4239		if (!is_highmem(zone))
4240			lowmem_pages += zone->present_pages;
4241	}
4242
4243	for_each_zone(zone) {
4244		u64 tmp;
4245
4246		spin_lock_irqsave(&zone->lru_lock, flags);
4247		tmp = (u64)pages_min * zone->present_pages;
4248		do_div(tmp, lowmem_pages);
4249		if (is_highmem(zone)) {
4250			/*
4251			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4252			 * need highmem pages, so cap pages_min to a small
4253			 * value here.
4254			 *
4255			 * The (pages_high-pages_low) and (pages_low-pages_min)
4256			 * deltas controls asynch page reclaim, and so should
4257			 * not be capped for highmem.
4258			 */
4259			int min_pages;
4260
4261			min_pages = zone->present_pages / 1024;
4262			if (min_pages < SWAP_CLUSTER_MAX)
4263				min_pages = SWAP_CLUSTER_MAX;
4264			if (min_pages > 128)
4265				min_pages = 128;
4266			zone->pages_min = min_pages;
4267		} else {
4268			/*
4269			 * If it's a lowmem zone, reserve a number of pages
4270			 * proportionate to the zone's size.
4271			 */
4272			zone->pages_min = tmp;
4273		}
4274
4275		zone->pages_low   = zone->pages_min + (tmp >> 2);
4276		zone->pages_high  = zone->pages_min + (tmp >> 1);
4277		setup_zone_migrate_reserve(zone);
4278		spin_unlock_irqrestore(&zone->lru_lock, flags);
4279	}
4280
4281	/* update totalreserve_pages */
4282	calculate_totalreserve_pages();
4283}
4284
4285/**
4286 * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
4287 *
4288 * The inactive anon list should be small enough that the VM never has to
4289 * do too much work, but large enough that each inactive page has a chance
4290 * to be referenced again before it is swapped out.
4291 *
4292 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4293 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4294 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4295 * the anonymous pages are kept on the inactive list.
4296 *
4297 * total     target    max
4298 * memory    ratio     inactive anon
4299 * -------------------------------------
4300 *   10MB       1         5MB
4301 *  100MB       1        50MB
4302 *    1GB       3       250MB
4303 *   10GB      10       0.9GB
4304 *  100GB      31         3GB
4305 *    1TB     101        10GB
4306 *   10TB     320        32GB
4307 */
4308void setup_per_zone_inactive_ratio(void)
4309{
4310	struct zone *zone;
4311
4312	for_each_zone(zone) {
4313		unsigned int gb, ratio;
4314
4315		/* Zone size in gigabytes */
4316		gb = zone->present_pages >> (30 - PAGE_SHIFT);
4317		ratio = int_sqrt(10 * gb);
4318		if (!ratio)
4319			ratio = 1;
4320
4321		zone->inactive_ratio = ratio;
4322	}
4323}
4324
4325/*
4326 * Initialise min_free_kbytes.
4327 *
4328 * For small machines we want it small (128k min).  For large machines
4329 * we want it large (64MB max).  But it is not linear, because network
4330 * bandwidth does not increase linearly with machine size.  We use
4331 *
4332 * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4333 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4334 *
4335 * which yields
4336 *
4337 * 16MB:	512k
4338 * 32MB:	724k
4339 * 64MB:	1024k
4340 * 128MB:	1448k
4341 * 256MB:	2048k
4342 * 512MB:	2896k
4343 * 1024MB:	4096k
4344 * 2048MB:	5792k
4345 * 4096MB:	8192k
4346 * 8192MB:	11584k
4347 * 16384MB:	16384k
4348 */
4349static int __init init_per_zone_pages_min(void)
4350{
4351	unsigned long lowmem_kbytes;
4352
4353	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4354
4355	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4356	if (min_free_kbytes < 128)
4357		min_free_kbytes = 128;
4358	if (min_free_kbytes > 65536)
4359		min_free_kbytes = 65536;
4360	setup_per_zone_pages_min();
4361	setup_per_zone_lowmem_reserve();
4362	setup_per_zone_inactive_ratio();
4363	return 0;
4364}
4365module_init(init_per_zone_pages_min)
4366
4367/*
4368 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4369 *	that we can call two helper functions whenever min_free_kbytes
4370 *	changes.
4371 */
4372int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4373	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4374{
4375	proc_dointvec(table, write, file, buffer, length, ppos);
4376	if (write)
4377		setup_per_zone_pages_min();
4378	return 0;
4379}
4380
4381#ifdef CONFIG_NUMA
4382int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4383	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4384{
4385	struct zone *zone;
4386	int rc;
4387
4388	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4389	if (rc)
4390		return rc;
4391
4392	for_each_zone(zone)
4393		zone->min_unmapped_pages = (zone->present_pages *
4394				sysctl_min_unmapped_ratio) / 100;
4395	return 0;
4396}
4397
4398int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4399	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4400{
4401	struct zone *zone;
4402	int rc;
4403
4404	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4405	if (rc)
4406		return rc;
4407
4408	for_each_zone(zone)
4409		zone->min_slab_pages = (zone->present_pages *
4410				sysctl_min_slab_ratio) / 100;
4411	return 0;
4412}
4413#endif
4414
4415/*
4416 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4417 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4418 *	whenever sysctl_lowmem_reserve_ratio changes.
4419 *
4420 * The reserve ratio obviously has absolutely no relation with the
4421 * pages_min watermarks. The lowmem reserve ratio can only make sense
4422 * if in function of the boot time zone sizes.
4423 */
4424int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4425	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4426{
4427	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4428	setup_per_zone_lowmem_reserve();
4429	return 0;
4430}
4431
4432/*
4433 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4434 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4435 * can have before it gets flushed back to buddy allocator.
4436 */
4437
4438int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4439	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4440{
4441	struct zone *zone;
4442	unsigned int cpu;
4443	int ret;
4444
4445	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4446	if (!write || (ret == -EINVAL))
4447		return ret;
4448	for_each_zone(zone) {
4449		for_each_online_cpu(cpu) {
4450			unsigned long  high;
4451			high = zone->present_pages / percpu_pagelist_fraction;
4452			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4453		}
4454	}
4455	return 0;
4456}
4457
4458int hashdist = HASHDIST_DEFAULT;
4459
4460#ifdef CONFIG_NUMA
4461static int __init set_hashdist(char *str)
4462{
4463	if (!str)
4464		return 0;
4465	hashdist = simple_strtoul(str, &str, 0);
4466	return 1;
4467}
4468__setup("hashdist=", set_hashdist);
4469#endif
4470
4471/*
4472 * allocate a large system hash table from bootmem
4473 * - it is assumed that the hash table must contain an exact power-of-2
4474 *   quantity of entries
4475 * - limit is the number of hash buckets, not the total allocation size
4476 */
4477void *__init alloc_large_system_hash(const char *tablename,
4478				     unsigned long bucketsize,
4479				     unsigned long numentries,
4480				     int scale,
4481				     int flags,
4482				     unsigned int *_hash_shift,
4483				     unsigned int *_hash_mask,
4484				     unsigned long limit)
4485{
4486	unsigned long long max = limit;
4487	unsigned long log2qty, size;
4488	void *table = NULL;
4489
4490	/* allow the kernel cmdline to have a say */
4491	if (!numentries) {
4492		/* round applicable memory size up to nearest megabyte */
4493		numentries = nr_kernel_pages;
4494		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4495		numentries >>= 20 - PAGE_SHIFT;
4496		numentries <<= 20 - PAGE_SHIFT;
4497
4498		/* limit to 1 bucket per 2^scale bytes of low memory */
4499		if (scale > PAGE_SHIFT)
4500			numentries >>= (scale - PAGE_SHIFT);
4501		else
4502			numentries <<= (PAGE_SHIFT - scale);
4503
4504		/* Make sure we've got at least a 0-order allocation.. */
4505		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4506			numentries = PAGE_SIZE / bucketsize;
4507	}
4508	numentries = roundup_pow_of_two(numentries);
4509
4510	/* limit allocation size to 1/16 total memory by default */
4511	if (max == 0) {
4512		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4513		do_div(max, bucketsize);
4514	}
4515
4516	if (numentries > max)
4517		numentries = max;
4518
4519	log2qty = ilog2(numentries);
4520
4521	do {
4522		size = bucketsize << log2qty;
4523		if (flags & HASH_EARLY)
4524			table = alloc_bootmem_nopanic(size);
4525		else if (hashdist)
4526			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4527		else {
4528			unsigned long order = get_order(size);
4529			table = (void*) __get_free_pages(GFP_ATOMIC, order);
4530			/*
4531			 * If bucketsize is not a power-of-two, we may free
4532			 * some pages at the end of hash table.
4533			 */
4534			if (table) {
4535				unsigned long alloc_end = (unsigned long)table +
4536						(PAGE_SIZE << order);
4537				unsigned long used = (unsigned long)table +
4538						PAGE_ALIGN(size);
4539				split_page(virt_to_page(table), order);
4540				while (used < alloc_end) {
4541					free_page(used);
4542					used += PAGE_SIZE;
4543				}
4544			}
4545		}
4546	} while (!table && size > PAGE_SIZE && --log2qty);
4547
4548	if (!table)
4549		panic("Failed to allocate %s hash table\n", tablename);
4550
4551	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4552	       tablename,
4553	       (1U << log2qty),
4554	       ilog2(size) - PAGE_SHIFT,
4555	       size);
4556
4557	if (_hash_shift)
4558		*_hash_shift = log2qty;
4559	if (_hash_mask)
4560		*_hash_mask = (1 << log2qty) - 1;
4561
4562	return table;
4563}
4564
4565#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
4566struct page *pfn_to_page(unsigned long pfn)
4567{
4568	return __pfn_to_page(pfn);
4569}
4570unsigned long page_to_pfn(struct page *page)
4571{
4572	return __page_to_pfn(page);
4573}
4574EXPORT_SYMBOL(pfn_to_page);
4575EXPORT_SYMBOL(page_to_pfn);
4576#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
4577
4578/* Return a pointer to the bitmap storing bits affecting a block of pages */
4579static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4580							unsigned long pfn)
4581{
4582#ifdef CONFIG_SPARSEMEM
4583	return __pfn_to_section(pfn)->pageblock_flags;
4584#else
4585	return zone->pageblock_flags;
4586#endif /* CONFIG_SPARSEMEM */
4587}
4588
4589static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4590{
4591#ifdef CONFIG_SPARSEMEM
4592	pfn &= (PAGES_PER_SECTION-1);
4593	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4594#else
4595	pfn = pfn - zone->zone_start_pfn;
4596	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4597#endif /* CONFIG_SPARSEMEM */
4598}
4599
4600/**
4601 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4602 * @page: The page within the block of interest
4603 * @start_bitidx: The first bit of interest to retrieve
4604 * @end_bitidx: The last bit of interest
4605 * returns pageblock_bits flags
4606 */
4607unsigned long get_pageblock_flags_group(struct page *page,
4608					int start_bitidx, int end_bitidx)
4609{
4610	struct zone *zone;
4611	unsigned long *bitmap;
4612	unsigned long pfn, bitidx;
4613	unsigned long flags = 0;
4614	unsigned long value = 1;
4615
4616	zone = page_zone(page);
4617	pfn = page_to_pfn(page);
4618	bitmap = get_pageblock_bitmap(zone, pfn);
4619	bitidx = pfn_to_bitidx(zone, pfn);
4620
4621	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4622		if (test_bit(bitidx + start_bitidx, bitmap))
4623			flags |= value;
4624
4625	return flags;
4626}
4627
4628/**
4629 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4630 * @page: The page within the block of interest
4631 * @start_bitidx: The first bit of interest
4632 * @end_bitidx: The last bit of interest
4633 * @flags: The flags to set
4634 */
4635void set_pageblock_flags_group(struct page *page, unsigned long flags,
4636					int start_bitidx, int end_bitidx)
4637{
4638	struct zone *zone;
4639	unsigned long *bitmap;
4640	unsigned long pfn, bitidx;
4641	unsigned long value = 1;
4642
4643	zone = page_zone(page);
4644	pfn = page_to_pfn(page);
4645	bitmap = get_pageblock_bitmap(zone, pfn);
4646	bitidx = pfn_to_bitidx(zone, pfn);
4647	VM_BUG_ON(pfn < zone->zone_start_pfn);
4648	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4649
4650	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4651		if (flags & value)
4652			__set_bit(bitidx + start_bitidx, bitmap);
4653		else
4654			__clear_bit(bitidx + start_bitidx, bitmap);
4655}
4656
4657/*
4658 * This is designed as sub function...plz see page_isolation.c also.
4659 * set/clear page block's type to be ISOLATE.
4660 * page allocater never alloc memory from ISOLATE block.
4661 */
4662
4663int set_migratetype_isolate(struct page *page)
4664{
4665	struct zone *zone;
4666	unsigned long flags;
4667	int ret = -EBUSY;
4668
4669	zone = page_zone(page);
4670	spin_lock_irqsave(&zone->lock, flags);
4671	/*
4672	 * In future, more migrate types will be able to be isolation target.
4673	 */
4674	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4675		goto out;
4676	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4677	move_freepages_block(zone, page, MIGRATE_ISOLATE);
4678	ret = 0;
4679out:
4680	spin_unlock_irqrestore(&zone->lock, flags);
4681	if (!ret)
4682		drain_all_pages();
4683	return ret;
4684}
4685
4686void unset_migratetype_isolate(struct page *page)
4687{
4688	struct zone *zone;
4689	unsigned long flags;
4690	zone = page_zone(page);
4691	spin_lock_irqsave(&zone->lock, flags);
4692	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4693		goto out;
4694	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4695	move_freepages_block(zone, page, MIGRATE_MOVABLE);
4696out:
4697	spin_unlock_irqrestore(&zone->lock, flags);
4698}
4699
4700#ifdef CONFIG_MEMORY_HOTREMOVE
4701/*
4702 * All pages in the range must be isolated before calling this.
4703 */
4704void
4705__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4706{
4707	struct page *page;
4708	struct zone *zone;
4709	int order, i;
4710	unsigned long pfn;
4711	unsigned long flags;
4712	/* find the first valid pfn */
4713	for (pfn = start_pfn; pfn < end_pfn; pfn++)
4714		if (pfn_valid(pfn))
4715			break;
4716	if (pfn == end_pfn)
4717		return;
4718	zone = page_zone(pfn_to_page(pfn));
4719	spin_lock_irqsave(&zone->lock, flags);
4720	pfn = start_pfn;
4721	while (pfn < end_pfn) {
4722		if (!pfn_valid(pfn)) {
4723			pfn++;
4724			continue;
4725		}
4726		page = pfn_to_page(pfn);
4727		BUG_ON(page_count(page));
4728		BUG_ON(!PageBuddy(page));
4729		order = page_order(page);
4730#ifdef CONFIG_DEBUG_VM
4731		printk(KERN_INFO "remove from free list %lx %d %lx\n",
4732		       pfn, 1 << order, end_pfn);
4733#endif
4734		list_del(&page->lru);
4735		rmv_page_order(page);
4736		zone->free_area[order].nr_free--;
4737		__mod_zone_page_state(zone, NR_FREE_PAGES,
4738				      - (1UL << order));
4739		for (i = 0; i < (1 << order); i++)
4740			SetPageReserved((page+i));
4741		pfn += (1 << order);
4742	}
4743	spin_unlock_irqrestore(&zone->lock, flags);
4744}
4745#endif
4746