page_alloc.c revision 1e548deb5d1630ca14ba04da04e3b6b3766178c7
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/bootmem.h> 23#include <linux/compiler.h> 24#include <linux/kernel.h> 25#include <linux/module.h> 26#include <linux/suspend.h> 27#include <linux/pagevec.h> 28#include <linux/blkdev.h> 29#include <linux/slab.h> 30#include <linux/oom.h> 31#include <linux/notifier.h> 32#include <linux/topology.h> 33#include <linux/sysctl.h> 34#include <linux/cpu.h> 35#include <linux/cpuset.h> 36#include <linux/memory_hotplug.h> 37#include <linux/nodemask.h> 38#include <linux/vmalloc.h> 39#include <linux/mempolicy.h> 40#include <linux/stop_machine.h> 41#include <linux/sort.h> 42#include <linux/pfn.h> 43#include <linux/backing-dev.h> 44#include <linux/fault-inject.h> 45#include <linux/page-isolation.h> 46 47#include <asm/tlbflush.h> 48#include <asm/div64.h> 49#include "internal.h" 50 51/* 52 * Array of node states. 53 */ 54nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 55 [N_POSSIBLE] = NODE_MASK_ALL, 56 [N_ONLINE] = { { [0] = 1UL } }, 57#ifndef CONFIG_NUMA 58 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 59#ifdef CONFIG_HIGHMEM 60 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 61#endif 62 [N_CPU] = { { [0] = 1UL } }, 63#endif /* NUMA */ 64}; 65EXPORT_SYMBOL(node_states); 66 67unsigned long totalram_pages __read_mostly; 68unsigned long totalreserve_pages __read_mostly; 69long nr_swap_pages; 70int percpu_pagelist_fraction; 71 72#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 73int pageblock_order __read_mostly; 74#endif 75 76static void __free_pages_ok(struct page *page, unsigned int order); 77 78/* 79 * results with 256, 32 in the lowmem_reserve sysctl: 80 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 81 * 1G machine -> (16M dma, 784M normal, 224M high) 82 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 83 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 84 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 85 * 86 * TBD: should special case ZONE_DMA32 machines here - in those we normally 87 * don't need any ZONE_NORMAL reservation 88 */ 89int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 90#ifdef CONFIG_ZONE_DMA 91 256, 92#endif 93#ifdef CONFIG_ZONE_DMA32 94 256, 95#endif 96#ifdef CONFIG_HIGHMEM 97 32, 98#endif 99 32, 100}; 101 102EXPORT_SYMBOL(totalram_pages); 103 104static char * const zone_names[MAX_NR_ZONES] = { 105#ifdef CONFIG_ZONE_DMA 106 "DMA", 107#endif 108#ifdef CONFIG_ZONE_DMA32 109 "DMA32", 110#endif 111 "Normal", 112#ifdef CONFIG_HIGHMEM 113 "HighMem", 114#endif 115 "Movable", 116}; 117 118int min_free_kbytes = 1024; 119 120unsigned long __meminitdata nr_kernel_pages; 121unsigned long __meminitdata nr_all_pages; 122static unsigned long __meminitdata dma_reserve; 123 124#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 125 /* 126 * MAX_ACTIVE_REGIONS determines the maximum number of distinct 127 * ranges of memory (RAM) that may be registered with add_active_range(). 128 * Ranges passed to add_active_range() will be merged if possible 129 * so the number of times add_active_range() can be called is 130 * related to the number of nodes and the number of holes 131 */ 132 #ifdef CONFIG_MAX_ACTIVE_REGIONS 133 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 134 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 135 #else 136 #if MAX_NUMNODES >= 32 137 /* If there can be many nodes, allow up to 50 holes per node */ 138 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 139 #else 140 /* By default, allow up to 256 distinct regions */ 141 #define MAX_ACTIVE_REGIONS 256 142 #endif 143 #endif 144 145 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; 146 static int __meminitdata nr_nodemap_entries; 147 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 148 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 149#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 150 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; 151 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; 152#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 153 unsigned long __initdata required_kernelcore; 154 static unsigned long __initdata required_movablecore; 155 unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 156 157 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 158 int movable_zone; 159 EXPORT_SYMBOL(movable_zone); 160#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 161 162#if MAX_NUMNODES > 1 163int nr_node_ids __read_mostly = MAX_NUMNODES; 164EXPORT_SYMBOL(nr_node_ids); 165#endif 166 167int page_group_by_mobility_disabled __read_mostly; 168 169static void set_pageblock_migratetype(struct page *page, int migratetype) 170{ 171 set_pageblock_flags_group(page, (unsigned long)migratetype, 172 PB_migrate, PB_migrate_end); 173} 174 175#ifdef CONFIG_DEBUG_VM 176static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 177{ 178 int ret = 0; 179 unsigned seq; 180 unsigned long pfn = page_to_pfn(page); 181 182 do { 183 seq = zone_span_seqbegin(zone); 184 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 185 ret = 1; 186 else if (pfn < zone->zone_start_pfn) 187 ret = 1; 188 } while (zone_span_seqretry(zone, seq)); 189 190 return ret; 191} 192 193static int page_is_consistent(struct zone *zone, struct page *page) 194{ 195 if (!pfn_valid_within(page_to_pfn(page))) 196 return 0; 197 if (zone != page_zone(page)) 198 return 0; 199 200 return 1; 201} 202/* 203 * Temporary debugging check for pages not lying within a given zone. 204 */ 205static int bad_range(struct zone *zone, struct page *page) 206{ 207 if (page_outside_zone_boundaries(zone, page)) 208 return 1; 209 if (!page_is_consistent(zone, page)) 210 return 1; 211 212 return 0; 213} 214#else 215static inline int bad_range(struct zone *zone, struct page *page) 216{ 217 return 0; 218} 219#endif 220 221static void bad_page(struct page *page) 222{ 223 printk(KERN_EMERG "Bad page state in process '%s'\n" 224 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 225 KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 226 KERN_EMERG "Backtrace:\n", 227 current->comm, page, (int)(2*sizeof(unsigned long)), 228 (unsigned long)page->flags, page->mapping, 229 page_mapcount(page), page_count(page)); 230 dump_stack(); 231 page->flags &= ~(1 << PG_lru | 232 1 << PG_private | 233 1 << PG_locked | 234 1 << PG_active | 235 1 << PG_dirty | 236 1 << PG_reclaim | 237 1 << PG_slab | 238 1 << PG_swapcache | 239 1 << PG_writeback | 240 1 << PG_buddy ); 241 set_page_count(page, 0); 242 reset_page_mapcount(page); 243 page->mapping = NULL; 244 add_taint(TAINT_BAD_PAGE); 245} 246 247/* 248 * Higher-order pages are called "compound pages". They are structured thusly: 249 * 250 * The first PAGE_SIZE page is called the "head page". 251 * 252 * The remaining PAGE_SIZE pages are called "tail pages". 253 * 254 * All pages have PG_compound set. All pages have their ->private pointing at 255 * the head page (even the head page has this). 256 * 257 * The first tail page's ->lru.next holds the address of the compound page's 258 * put_page() function. Its ->lru.prev holds the order of allocation. 259 * This usage means that zero-order pages may not be compound. 260 */ 261 262static void free_compound_page(struct page *page) 263{ 264 __free_pages_ok(page, compound_order(page)); 265} 266 267static void prep_compound_page(struct page *page, unsigned long order) 268{ 269 int i; 270 int nr_pages = 1 << order; 271 272 set_compound_page_dtor(page, free_compound_page); 273 set_compound_order(page, order); 274 __SetPageHead(page); 275 for (i = 1; i < nr_pages; i++) { 276 struct page *p = page + i; 277 278 __SetPageTail(p); 279 p->first_page = page; 280 } 281} 282 283static void destroy_compound_page(struct page *page, unsigned long order) 284{ 285 int i; 286 int nr_pages = 1 << order; 287 288 if (unlikely(compound_order(page) != order)) 289 bad_page(page); 290 291 if (unlikely(!PageHead(page))) 292 bad_page(page); 293 __ClearPageHead(page); 294 for (i = 1; i < nr_pages; i++) { 295 struct page *p = page + i; 296 297 if (unlikely(!PageTail(p) | 298 (p->first_page != page))) 299 bad_page(page); 300 __ClearPageTail(p); 301 } 302} 303 304static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 305{ 306 int i; 307 308 /* 309 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 310 * and __GFP_HIGHMEM from hard or soft interrupt context. 311 */ 312 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 313 for (i = 0; i < (1 << order); i++) 314 clear_highpage(page + i); 315} 316 317static inline void set_page_order(struct page *page, int order) 318{ 319 set_page_private(page, order); 320 __SetPageBuddy(page); 321} 322 323static inline void rmv_page_order(struct page *page) 324{ 325 __ClearPageBuddy(page); 326 set_page_private(page, 0); 327} 328 329/* 330 * Locate the struct page for both the matching buddy in our 331 * pair (buddy1) and the combined O(n+1) page they form (page). 332 * 333 * 1) Any buddy B1 will have an order O twin B2 which satisfies 334 * the following equation: 335 * B2 = B1 ^ (1 << O) 336 * For example, if the starting buddy (buddy2) is #8 its order 337 * 1 buddy is #10: 338 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 339 * 340 * 2) Any buddy B will have an order O+1 parent P which 341 * satisfies the following equation: 342 * P = B & ~(1 << O) 343 * 344 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 345 */ 346static inline struct page * 347__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 348{ 349 unsigned long buddy_idx = page_idx ^ (1 << order); 350 351 return page + (buddy_idx - page_idx); 352} 353 354static inline unsigned long 355__find_combined_index(unsigned long page_idx, unsigned int order) 356{ 357 return (page_idx & ~(1 << order)); 358} 359 360/* 361 * This function checks whether a page is free && is the buddy 362 * we can do coalesce a page and its buddy if 363 * (a) the buddy is not in a hole && 364 * (b) the buddy is in the buddy system && 365 * (c) a page and its buddy have the same order && 366 * (d) a page and its buddy are in the same zone. 367 * 368 * For recording whether a page is in the buddy system, we use PG_buddy. 369 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 370 * 371 * For recording page's order, we use page_private(page). 372 */ 373static inline int page_is_buddy(struct page *page, struct page *buddy, 374 int order) 375{ 376 if (!pfn_valid_within(page_to_pfn(buddy))) 377 return 0; 378 379 if (page_zone_id(page) != page_zone_id(buddy)) 380 return 0; 381 382 if (PageBuddy(buddy) && page_order(buddy) == order) { 383 BUG_ON(page_count(buddy) != 0); 384 return 1; 385 } 386 return 0; 387} 388 389/* 390 * Freeing function for a buddy system allocator. 391 * 392 * The concept of a buddy system is to maintain direct-mapped table 393 * (containing bit values) for memory blocks of various "orders". 394 * The bottom level table contains the map for the smallest allocatable 395 * units of memory (here, pages), and each level above it describes 396 * pairs of units from the levels below, hence, "buddies". 397 * At a high level, all that happens here is marking the table entry 398 * at the bottom level available, and propagating the changes upward 399 * as necessary, plus some accounting needed to play nicely with other 400 * parts of the VM system. 401 * At each level, we keep a list of pages, which are heads of continuous 402 * free pages of length of (1 << order) and marked with PG_buddy. Page's 403 * order is recorded in page_private(page) field. 404 * So when we are allocating or freeing one, we can derive the state of the 405 * other. That is, if we allocate a small block, and both were 406 * free, the remainder of the region must be split into blocks. 407 * If a block is freed, and its buddy is also free, then this 408 * triggers coalescing into a block of larger size. 409 * 410 * -- wli 411 */ 412 413static inline void __free_one_page(struct page *page, 414 struct zone *zone, unsigned int order) 415{ 416 unsigned long page_idx; 417 int order_size = 1 << order; 418 int migratetype = get_pageblock_migratetype(page); 419 420 if (unlikely(PageCompound(page))) 421 destroy_compound_page(page, order); 422 423 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 424 425 VM_BUG_ON(page_idx & (order_size - 1)); 426 VM_BUG_ON(bad_range(zone, page)); 427 428 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); 429 while (order < MAX_ORDER-1) { 430 unsigned long combined_idx; 431 struct page *buddy; 432 433 buddy = __page_find_buddy(page, page_idx, order); 434 if (!page_is_buddy(page, buddy, order)) 435 break; /* Move the buddy up one level. */ 436 437 list_del(&buddy->lru); 438 zone->free_area[order].nr_free--; 439 rmv_page_order(buddy); 440 combined_idx = __find_combined_index(page_idx, order); 441 page = page + (combined_idx - page_idx); 442 page_idx = combined_idx; 443 order++; 444 } 445 set_page_order(page, order); 446 list_add(&page->lru, 447 &zone->free_area[order].free_list[migratetype]); 448 zone->free_area[order].nr_free++; 449} 450 451static inline int free_pages_check(struct page *page) 452{ 453 if (unlikely(page_mapcount(page) | 454 (page->mapping != NULL) | 455 (page_count(page) != 0) | 456 (page->flags & ( 457 1 << PG_lru | 458 1 << PG_private | 459 1 << PG_locked | 460 1 << PG_active | 461 1 << PG_slab | 462 1 << PG_swapcache | 463 1 << PG_writeback | 464 1 << PG_reserved | 465 1 << PG_buddy )))) 466 bad_page(page); 467 if (PageDirty(page)) 468 __ClearPageDirty(page); 469 /* 470 * For now, we report if PG_reserved was found set, but do not 471 * clear it, and do not free the page. But we shall soon need 472 * to do more, for when the ZERO_PAGE count wraps negative. 473 */ 474 return PageReserved(page); 475} 476 477/* 478 * Frees a list of pages. 479 * Assumes all pages on list are in same zone, and of same order. 480 * count is the number of pages to free. 481 * 482 * If the zone was previously in an "all pages pinned" state then look to 483 * see if this freeing clears that state. 484 * 485 * And clear the zone's pages_scanned counter, to hold off the "all pages are 486 * pinned" detection logic. 487 */ 488static void free_pages_bulk(struct zone *zone, int count, 489 struct list_head *list, int order) 490{ 491 spin_lock(&zone->lock); 492 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 493 zone->pages_scanned = 0; 494 while (count--) { 495 struct page *page; 496 497 VM_BUG_ON(list_empty(list)); 498 page = list_entry(list->prev, struct page, lru); 499 /* have to delete it as __free_one_page list manipulates */ 500 list_del(&page->lru); 501 __free_one_page(page, zone, order); 502 } 503 spin_unlock(&zone->lock); 504} 505 506static void free_one_page(struct zone *zone, struct page *page, int order) 507{ 508 spin_lock(&zone->lock); 509 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 510 zone->pages_scanned = 0; 511 __free_one_page(page, zone, order); 512 spin_unlock(&zone->lock); 513} 514 515static void __free_pages_ok(struct page *page, unsigned int order) 516{ 517 unsigned long flags; 518 int i; 519 int reserved = 0; 520 521 for (i = 0 ; i < (1 << order) ; ++i) 522 reserved += free_pages_check(page + i); 523 if (reserved) 524 return; 525 526 if (!PageHighMem(page)) 527 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 528 arch_free_page(page, order); 529 kernel_map_pages(page, 1 << order, 0); 530 531 local_irq_save(flags); 532 __count_vm_events(PGFREE, 1 << order); 533 free_one_page(page_zone(page), page, order); 534 local_irq_restore(flags); 535} 536 537/* 538 * permit the bootmem allocator to evade page validation on high-order frees 539 */ 540void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) 541{ 542 if (order == 0) { 543 __ClearPageReserved(page); 544 set_page_count(page, 0); 545 set_page_refcounted(page); 546 __free_page(page); 547 } else { 548 int loop; 549 550 prefetchw(page); 551 for (loop = 0; loop < BITS_PER_LONG; loop++) { 552 struct page *p = &page[loop]; 553 554 if (loop + 1 < BITS_PER_LONG) 555 prefetchw(p + 1); 556 __ClearPageReserved(p); 557 set_page_count(p, 0); 558 } 559 560 set_page_refcounted(page); 561 __free_pages(page, order); 562 } 563} 564 565 566/* 567 * The order of subdivision here is critical for the IO subsystem. 568 * Please do not alter this order without good reasons and regression 569 * testing. Specifically, as large blocks of memory are subdivided, 570 * the order in which smaller blocks are delivered depends on the order 571 * they're subdivided in this function. This is the primary factor 572 * influencing the order in which pages are delivered to the IO 573 * subsystem according to empirical testing, and this is also justified 574 * by considering the behavior of a buddy system containing a single 575 * large block of memory acted on by a series of small allocations. 576 * This behavior is a critical factor in sglist merging's success. 577 * 578 * -- wli 579 */ 580static inline void expand(struct zone *zone, struct page *page, 581 int low, int high, struct free_area *area, 582 int migratetype) 583{ 584 unsigned long size = 1 << high; 585 586 while (high > low) { 587 area--; 588 high--; 589 size >>= 1; 590 VM_BUG_ON(bad_range(zone, &page[size])); 591 list_add(&page[size].lru, &area->free_list[migratetype]); 592 area->nr_free++; 593 set_page_order(&page[size], high); 594 } 595} 596 597/* 598 * This page is about to be returned from the page allocator 599 */ 600static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 601{ 602 if (unlikely(page_mapcount(page) | 603 (page->mapping != NULL) | 604 (page_count(page) != 0) | 605 (page->flags & ( 606 1 << PG_lru | 607 1 << PG_private | 608 1 << PG_locked | 609 1 << PG_active | 610 1 << PG_dirty | 611 1 << PG_slab | 612 1 << PG_swapcache | 613 1 << PG_writeback | 614 1 << PG_reserved | 615 1 << PG_buddy )))) 616 bad_page(page); 617 618 /* 619 * For now, we report if PG_reserved was found set, but do not 620 * clear it, and do not allocate the page: as a safety net. 621 */ 622 if (PageReserved(page)) 623 return 1; 624 625 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead | 626 1 << PG_referenced | 1 << PG_arch_1 | 627 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk); 628 set_page_private(page, 0); 629 set_page_refcounted(page); 630 631 arch_alloc_page(page, order); 632 kernel_map_pages(page, 1 << order, 1); 633 634 if (gfp_flags & __GFP_ZERO) 635 prep_zero_page(page, order, gfp_flags); 636 637 if (order && (gfp_flags & __GFP_COMP)) 638 prep_compound_page(page, order); 639 640 return 0; 641} 642 643/* 644 * Go through the free lists for the given migratetype and remove 645 * the smallest available page from the freelists 646 */ 647static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 648 int migratetype) 649{ 650 unsigned int current_order; 651 struct free_area * area; 652 struct page *page; 653 654 /* Find a page of the appropriate size in the preferred list */ 655 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 656 area = &(zone->free_area[current_order]); 657 if (list_empty(&area->free_list[migratetype])) 658 continue; 659 660 page = list_entry(area->free_list[migratetype].next, 661 struct page, lru); 662 list_del(&page->lru); 663 rmv_page_order(page); 664 area->nr_free--; 665 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); 666 expand(zone, page, order, current_order, area, migratetype); 667 return page; 668 } 669 670 return NULL; 671} 672 673 674/* 675 * This array describes the order lists are fallen back to when 676 * the free lists for the desirable migrate type are depleted 677 */ 678static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 679 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 680 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 681 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 682 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ 683}; 684 685/* 686 * Move the free pages in a range to the free lists of the requested type. 687 * Note that start_page and end_pages are not aligned on a pageblock 688 * boundary. If alignment is required, use move_freepages_block() 689 */ 690int move_freepages(struct zone *zone, 691 struct page *start_page, struct page *end_page, 692 int migratetype) 693{ 694 struct page *page; 695 unsigned long order; 696 int pages_moved = 0; 697 698#ifndef CONFIG_HOLES_IN_ZONE 699 /* 700 * page_zone is not safe to call in this context when 701 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 702 * anyway as we check zone boundaries in move_freepages_block(). 703 * Remove at a later date when no bug reports exist related to 704 * grouping pages by mobility 705 */ 706 BUG_ON(page_zone(start_page) != page_zone(end_page)); 707#endif 708 709 for (page = start_page; page <= end_page;) { 710 if (!pfn_valid_within(page_to_pfn(page))) { 711 page++; 712 continue; 713 } 714 715 if (!PageBuddy(page)) { 716 page++; 717 continue; 718 } 719 720 order = page_order(page); 721 list_del(&page->lru); 722 list_add(&page->lru, 723 &zone->free_area[order].free_list[migratetype]); 724 page += 1 << order; 725 pages_moved += 1 << order; 726 } 727 728 return pages_moved; 729} 730 731int move_freepages_block(struct zone *zone, struct page *page, int migratetype) 732{ 733 unsigned long start_pfn, end_pfn; 734 struct page *start_page, *end_page; 735 736 start_pfn = page_to_pfn(page); 737 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 738 start_page = pfn_to_page(start_pfn); 739 end_page = start_page + pageblock_nr_pages - 1; 740 end_pfn = start_pfn + pageblock_nr_pages - 1; 741 742 /* Do not cross zone boundaries */ 743 if (start_pfn < zone->zone_start_pfn) 744 start_page = page; 745 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) 746 return 0; 747 748 return move_freepages(zone, start_page, end_page, migratetype); 749} 750 751/* Remove an element from the buddy allocator from the fallback list */ 752static struct page *__rmqueue_fallback(struct zone *zone, int order, 753 int start_migratetype) 754{ 755 struct free_area * area; 756 int current_order; 757 struct page *page; 758 int migratetype, i; 759 760 /* Find the largest possible block of pages in the other list */ 761 for (current_order = MAX_ORDER-1; current_order >= order; 762 --current_order) { 763 for (i = 0; i < MIGRATE_TYPES - 1; i++) { 764 migratetype = fallbacks[start_migratetype][i]; 765 766 /* MIGRATE_RESERVE handled later if necessary */ 767 if (migratetype == MIGRATE_RESERVE) 768 continue; 769 770 area = &(zone->free_area[current_order]); 771 if (list_empty(&area->free_list[migratetype])) 772 continue; 773 774 page = list_entry(area->free_list[migratetype].next, 775 struct page, lru); 776 area->nr_free--; 777 778 /* 779 * If breaking a large block of pages, move all free 780 * pages to the preferred allocation list. If falling 781 * back for a reclaimable kernel allocation, be more 782 * agressive about taking ownership of free pages 783 */ 784 if (unlikely(current_order >= (pageblock_order >> 1)) || 785 start_migratetype == MIGRATE_RECLAIMABLE) { 786 unsigned long pages; 787 pages = move_freepages_block(zone, page, 788 start_migratetype); 789 790 /* Claim the whole block if over half of it is free */ 791 if (pages >= (1 << (pageblock_order-1))) 792 set_pageblock_migratetype(page, 793 start_migratetype); 794 795 migratetype = start_migratetype; 796 } 797 798 /* Remove the page from the freelists */ 799 list_del(&page->lru); 800 rmv_page_order(page); 801 __mod_zone_page_state(zone, NR_FREE_PAGES, 802 -(1UL << order)); 803 804 if (current_order == pageblock_order) 805 set_pageblock_migratetype(page, 806 start_migratetype); 807 808 expand(zone, page, order, current_order, area, migratetype); 809 return page; 810 } 811 } 812 813 /* Use MIGRATE_RESERVE rather than fail an allocation */ 814 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); 815} 816 817/* 818 * Do the hard work of removing an element from the buddy allocator. 819 * Call me with the zone->lock already held. 820 */ 821static struct page *__rmqueue(struct zone *zone, unsigned int order, 822 int migratetype) 823{ 824 struct page *page; 825 826 page = __rmqueue_smallest(zone, order, migratetype); 827 828 if (unlikely(!page)) 829 page = __rmqueue_fallback(zone, order, migratetype); 830 831 return page; 832} 833 834/* 835 * Obtain a specified number of elements from the buddy allocator, all under 836 * a single hold of the lock, for efficiency. Add them to the supplied list. 837 * Returns the number of new pages which were placed at *list. 838 */ 839static int rmqueue_bulk(struct zone *zone, unsigned int order, 840 unsigned long count, struct list_head *list, 841 int migratetype) 842{ 843 int i; 844 845 spin_lock(&zone->lock); 846 for (i = 0; i < count; ++i) { 847 struct page *page = __rmqueue(zone, order, migratetype); 848 if (unlikely(page == NULL)) 849 break; 850 851 /* 852 * Split buddy pages returned by expand() are received here 853 * in physical page order. The page is added to the callers and 854 * list and the list head then moves forward. From the callers 855 * perspective, the linked list is ordered by page number in 856 * some conditions. This is useful for IO devices that can 857 * merge IO requests if the physical pages are ordered 858 * properly. 859 */ 860 list_add(&page->lru, list); 861 set_page_private(page, migratetype); 862 list = &page->lru; 863 } 864 spin_unlock(&zone->lock); 865 return i; 866} 867 868#ifdef CONFIG_NUMA 869/* 870 * Called from the vmstat counter updater to drain pagesets of this 871 * currently executing processor on remote nodes after they have 872 * expired. 873 * 874 * Note that this function must be called with the thread pinned to 875 * a single processor. 876 */ 877void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 878{ 879 unsigned long flags; 880 int to_drain; 881 882 local_irq_save(flags); 883 if (pcp->count >= pcp->batch) 884 to_drain = pcp->batch; 885 else 886 to_drain = pcp->count; 887 free_pages_bulk(zone, to_drain, &pcp->list, 0); 888 pcp->count -= to_drain; 889 local_irq_restore(flags); 890} 891#endif 892 893/* 894 * Drain pages of the indicated processor. 895 * 896 * The processor must either be the current processor and the 897 * thread pinned to the current processor or a processor that 898 * is not online. 899 */ 900static void drain_pages(unsigned int cpu) 901{ 902 unsigned long flags; 903 struct zone *zone; 904 905 for_each_zone(zone) { 906 struct per_cpu_pageset *pset; 907 struct per_cpu_pages *pcp; 908 909 if (!populated_zone(zone)) 910 continue; 911 912 pset = zone_pcp(zone, cpu); 913 914 pcp = &pset->pcp; 915 local_irq_save(flags); 916 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 917 pcp->count = 0; 918 local_irq_restore(flags); 919 } 920} 921 922/* 923 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 924 */ 925void drain_local_pages(void *arg) 926{ 927 drain_pages(smp_processor_id()); 928} 929 930/* 931 * Spill all the per-cpu pages from all CPUs back into the buddy allocator 932 */ 933void drain_all_pages(void) 934{ 935 on_each_cpu(drain_local_pages, NULL, 0, 1); 936} 937 938#ifdef CONFIG_HIBERNATION 939 940void mark_free_pages(struct zone *zone) 941{ 942 unsigned long pfn, max_zone_pfn; 943 unsigned long flags; 944 int order, t; 945 struct list_head *curr; 946 947 if (!zone->spanned_pages) 948 return; 949 950 spin_lock_irqsave(&zone->lock, flags); 951 952 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 953 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 954 if (pfn_valid(pfn)) { 955 struct page *page = pfn_to_page(pfn); 956 957 if (!swsusp_page_is_forbidden(page)) 958 swsusp_unset_page_free(page); 959 } 960 961 for_each_migratetype_order(order, t) { 962 list_for_each(curr, &zone->free_area[order].free_list[t]) { 963 unsigned long i; 964 965 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 966 for (i = 0; i < (1UL << order); i++) 967 swsusp_set_page_free(pfn_to_page(pfn + i)); 968 } 969 } 970 spin_unlock_irqrestore(&zone->lock, flags); 971} 972#endif /* CONFIG_PM */ 973 974/* 975 * Free a 0-order page 976 */ 977static void fastcall free_hot_cold_page(struct page *page, int cold) 978{ 979 struct zone *zone = page_zone(page); 980 struct per_cpu_pages *pcp; 981 unsigned long flags; 982 983 if (PageAnon(page)) 984 page->mapping = NULL; 985 if (free_pages_check(page)) 986 return; 987 988 if (!PageHighMem(page)) 989 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 990 arch_free_page(page, 0); 991 kernel_map_pages(page, 1, 0); 992 993 pcp = &zone_pcp(zone, get_cpu())->pcp; 994 local_irq_save(flags); 995 __count_vm_event(PGFREE); 996 if (cold) 997 list_add_tail(&page->lru, &pcp->list); 998 else 999 list_add(&page->lru, &pcp->list); 1000 set_page_private(page, get_pageblock_migratetype(page)); 1001 pcp->count++; 1002 if (pcp->count >= pcp->high) { 1003 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 1004 pcp->count -= pcp->batch; 1005 } 1006 local_irq_restore(flags); 1007 put_cpu(); 1008} 1009 1010void fastcall free_hot_page(struct page *page) 1011{ 1012 free_hot_cold_page(page, 0); 1013} 1014 1015void fastcall free_cold_page(struct page *page) 1016{ 1017 free_hot_cold_page(page, 1); 1018} 1019 1020/* 1021 * split_page takes a non-compound higher-order page, and splits it into 1022 * n (1<<order) sub-pages: page[0..n] 1023 * Each sub-page must be freed individually. 1024 * 1025 * Note: this is probably too low level an operation for use in drivers. 1026 * Please consult with lkml before using this in your driver. 1027 */ 1028void split_page(struct page *page, unsigned int order) 1029{ 1030 int i; 1031 1032 VM_BUG_ON(PageCompound(page)); 1033 VM_BUG_ON(!page_count(page)); 1034 for (i = 1; i < (1 << order); i++) 1035 set_page_refcounted(page + i); 1036} 1037 1038/* 1039 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1040 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1041 * or two. 1042 */ 1043static struct page *buffered_rmqueue(struct zonelist *zonelist, 1044 struct zone *zone, int order, gfp_t gfp_flags) 1045{ 1046 unsigned long flags; 1047 struct page *page; 1048 int cold = !!(gfp_flags & __GFP_COLD); 1049 int cpu; 1050 int migratetype = allocflags_to_migratetype(gfp_flags); 1051 1052again: 1053 cpu = get_cpu(); 1054 if (likely(order == 0)) { 1055 struct per_cpu_pages *pcp; 1056 1057 pcp = &zone_pcp(zone, cpu)->pcp; 1058 local_irq_save(flags); 1059 if (!pcp->count) { 1060 pcp->count = rmqueue_bulk(zone, 0, 1061 pcp->batch, &pcp->list, migratetype); 1062 if (unlikely(!pcp->count)) 1063 goto failed; 1064 } 1065 1066 /* Find a page of the appropriate migrate type */ 1067 if (cold) { 1068 list_for_each_entry_reverse(page, &pcp->list, lru) 1069 if (page_private(page) == migratetype) 1070 break; 1071 } else { 1072 list_for_each_entry(page, &pcp->list, lru) 1073 if (page_private(page) == migratetype) 1074 break; 1075 } 1076 1077 /* Allocate more to the pcp list if necessary */ 1078 if (unlikely(&page->lru == &pcp->list)) { 1079 pcp->count += rmqueue_bulk(zone, 0, 1080 pcp->batch, &pcp->list, migratetype); 1081 page = list_entry(pcp->list.next, struct page, lru); 1082 } 1083 1084 list_del(&page->lru); 1085 pcp->count--; 1086 } else { 1087 spin_lock_irqsave(&zone->lock, flags); 1088 page = __rmqueue(zone, order, migratetype); 1089 spin_unlock(&zone->lock); 1090 if (!page) 1091 goto failed; 1092 } 1093 1094 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1095 zone_statistics(zonelist, zone); 1096 local_irq_restore(flags); 1097 put_cpu(); 1098 1099 VM_BUG_ON(bad_range(zone, page)); 1100 if (prep_new_page(page, order, gfp_flags)) 1101 goto again; 1102 return page; 1103 1104failed: 1105 local_irq_restore(flags); 1106 put_cpu(); 1107 return NULL; 1108} 1109 1110#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 1111#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 1112#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 1113#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 1114#define ALLOC_HARDER 0x10 /* try to alloc harder */ 1115#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 1116#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1117 1118#ifdef CONFIG_FAIL_PAGE_ALLOC 1119 1120static struct fail_page_alloc_attr { 1121 struct fault_attr attr; 1122 1123 u32 ignore_gfp_highmem; 1124 u32 ignore_gfp_wait; 1125 u32 min_order; 1126 1127#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1128 1129 struct dentry *ignore_gfp_highmem_file; 1130 struct dentry *ignore_gfp_wait_file; 1131 struct dentry *min_order_file; 1132 1133#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1134 1135} fail_page_alloc = { 1136 .attr = FAULT_ATTR_INITIALIZER, 1137 .ignore_gfp_wait = 1, 1138 .ignore_gfp_highmem = 1, 1139 .min_order = 1, 1140}; 1141 1142static int __init setup_fail_page_alloc(char *str) 1143{ 1144 return setup_fault_attr(&fail_page_alloc.attr, str); 1145} 1146__setup("fail_page_alloc=", setup_fail_page_alloc); 1147 1148static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1149{ 1150 if (order < fail_page_alloc.min_order) 1151 return 0; 1152 if (gfp_mask & __GFP_NOFAIL) 1153 return 0; 1154 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1155 return 0; 1156 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1157 return 0; 1158 1159 return should_fail(&fail_page_alloc.attr, 1 << order); 1160} 1161 1162#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1163 1164static int __init fail_page_alloc_debugfs(void) 1165{ 1166 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1167 struct dentry *dir; 1168 int err; 1169 1170 err = init_fault_attr_dentries(&fail_page_alloc.attr, 1171 "fail_page_alloc"); 1172 if (err) 1173 return err; 1174 dir = fail_page_alloc.attr.dentries.dir; 1175 1176 fail_page_alloc.ignore_gfp_wait_file = 1177 debugfs_create_bool("ignore-gfp-wait", mode, dir, 1178 &fail_page_alloc.ignore_gfp_wait); 1179 1180 fail_page_alloc.ignore_gfp_highmem_file = 1181 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1182 &fail_page_alloc.ignore_gfp_highmem); 1183 fail_page_alloc.min_order_file = 1184 debugfs_create_u32("min-order", mode, dir, 1185 &fail_page_alloc.min_order); 1186 1187 if (!fail_page_alloc.ignore_gfp_wait_file || 1188 !fail_page_alloc.ignore_gfp_highmem_file || 1189 !fail_page_alloc.min_order_file) { 1190 err = -ENOMEM; 1191 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 1192 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 1193 debugfs_remove(fail_page_alloc.min_order_file); 1194 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 1195 } 1196 1197 return err; 1198} 1199 1200late_initcall(fail_page_alloc_debugfs); 1201 1202#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1203 1204#else /* CONFIG_FAIL_PAGE_ALLOC */ 1205 1206static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1207{ 1208 return 0; 1209} 1210 1211#endif /* CONFIG_FAIL_PAGE_ALLOC */ 1212 1213/* 1214 * Return 1 if free pages are above 'mark'. This takes into account the order 1215 * of the allocation. 1216 */ 1217int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1218 int classzone_idx, int alloc_flags) 1219{ 1220 /* free_pages my go negative - that's OK */ 1221 long min = mark; 1222 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 1223 int o; 1224 1225 if (alloc_flags & ALLOC_HIGH) 1226 min -= min / 2; 1227 if (alloc_flags & ALLOC_HARDER) 1228 min -= min / 4; 1229 1230 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1231 return 0; 1232 for (o = 0; o < order; o++) { 1233 /* At the next order, this order's pages become unavailable */ 1234 free_pages -= z->free_area[o].nr_free << o; 1235 1236 /* Require fewer higher order pages to be free */ 1237 min >>= 1; 1238 1239 if (free_pages <= min) 1240 return 0; 1241 } 1242 return 1; 1243} 1244 1245#ifdef CONFIG_NUMA 1246/* 1247 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1248 * skip over zones that are not allowed by the cpuset, or that have 1249 * been recently (in last second) found to be nearly full. See further 1250 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1251 * that have to skip over a lot of full or unallowed zones. 1252 * 1253 * If the zonelist cache is present in the passed in zonelist, then 1254 * returns a pointer to the allowed node mask (either the current 1255 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) 1256 * 1257 * If the zonelist cache is not available for this zonelist, does 1258 * nothing and returns NULL. 1259 * 1260 * If the fullzones BITMAP in the zonelist cache is stale (more than 1261 * a second since last zap'd) then we zap it out (clear its bits.) 1262 * 1263 * We hold off even calling zlc_setup, until after we've checked the 1264 * first zone in the zonelist, on the theory that most allocations will 1265 * be satisfied from that first zone, so best to examine that zone as 1266 * quickly as we can. 1267 */ 1268static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1269{ 1270 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1271 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1272 1273 zlc = zonelist->zlcache_ptr; 1274 if (!zlc) 1275 return NULL; 1276 1277 if (jiffies - zlc->last_full_zap > 1 * HZ) { 1278 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1279 zlc->last_full_zap = jiffies; 1280 } 1281 1282 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1283 &cpuset_current_mems_allowed : 1284 &node_states[N_HIGH_MEMORY]; 1285 return allowednodes; 1286} 1287 1288/* 1289 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1290 * if it is worth looking at further for free memory: 1291 * 1) Check that the zone isn't thought to be full (doesn't have its 1292 * bit set in the zonelist_cache fullzones BITMAP). 1293 * 2) Check that the zones node (obtained from the zonelist_cache 1294 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1295 * Return true (non-zero) if zone is worth looking at further, or 1296 * else return false (zero) if it is not. 1297 * 1298 * This check -ignores- the distinction between various watermarks, 1299 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1300 * found to be full for any variation of these watermarks, it will 1301 * be considered full for up to one second by all requests, unless 1302 * we are so low on memory on all allowed nodes that we are forced 1303 * into the second scan of the zonelist. 1304 * 1305 * In the second scan we ignore this zonelist cache and exactly 1306 * apply the watermarks to all zones, even it is slower to do so. 1307 * We are low on memory in the second scan, and should leave no stone 1308 * unturned looking for a free page. 1309 */ 1310static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, 1311 nodemask_t *allowednodes) 1312{ 1313 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1314 int i; /* index of *z in zonelist zones */ 1315 int n; /* node that zone *z is on */ 1316 1317 zlc = zonelist->zlcache_ptr; 1318 if (!zlc) 1319 return 1; 1320 1321 i = z - zonelist->zones; 1322 n = zlc->z_to_n[i]; 1323 1324 /* This zone is worth trying if it is allowed but not full */ 1325 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1326} 1327 1328/* 1329 * Given 'z' scanning a zonelist, set the corresponding bit in 1330 * zlc->fullzones, so that subsequent attempts to allocate a page 1331 * from that zone don't waste time re-examining it. 1332 */ 1333static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) 1334{ 1335 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1336 int i; /* index of *z in zonelist zones */ 1337 1338 zlc = zonelist->zlcache_ptr; 1339 if (!zlc) 1340 return; 1341 1342 i = z - zonelist->zones; 1343 1344 set_bit(i, zlc->fullzones); 1345} 1346 1347#else /* CONFIG_NUMA */ 1348 1349static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1350{ 1351 return NULL; 1352} 1353 1354static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, 1355 nodemask_t *allowednodes) 1356{ 1357 return 1; 1358} 1359 1360static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) 1361{ 1362} 1363#endif /* CONFIG_NUMA */ 1364 1365/* 1366 * get_page_from_freelist goes through the zonelist trying to allocate 1367 * a page. 1368 */ 1369static struct page * 1370get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 1371 struct zonelist *zonelist, int alloc_flags) 1372{ 1373 struct zone **z; 1374 struct page *page = NULL; 1375 int classzone_idx = zone_idx(zonelist->zones[0]); 1376 struct zone *zone; 1377 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1378 int zlc_active = 0; /* set if using zonelist_cache */ 1379 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1380 enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */ 1381 1382zonelist_scan: 1383 /* 1384 * Scan zonelist, looking for a zone with enough free. 1385 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1386 */ 1387 z = zonelist->zones; 1388 1389 do { 1390 /* 1391 * In NUMA, this could be a policy zonelist which contains 1392 * zones that may not be allowed by the current gfp_mask. 1393 * Check the zone is allowed by the current flags 1394 */ 1395 if (unlikely(alloc_should_filter_zonelist(zonelist))) { 1396 if (highest_zoneidx == -1) 1397 highest_zoneidx = gfp_zone(gfp_mask); 1398 if (zone_idx(*z) > highest_zoneidx) 1399 continue; 1400 } 1401 1402 if (NUMA_BUILD && zlc_active && 1403 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1404 continue; 1405 zone = *z; 1406 if ((alloc_flags & ALLOC_CPUSET) && 1407 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1408 goto try_next_zone; 1409 1410 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1411 unsigned long mark; 1412 if (alloc_flags & ALLOC_WMARK_MIN) 1413 mark = zone->pages_min; 1414 else if (alloc_flags & ALLOC_WMARK_LOW) 1415 mark = zone->pages_low; 1416 else 1417 mark = zone->pages_high; 1418 if (!zone_watermark_ok(zone, order, mark, 1419 classzone_idx, alloc_flags)) { 1420 if (!zone_reclaim_mode || 1421 !zone_reclaim(zone, gfp_mask, order)) 1422 goto this_zone_full; 1423 } 1424 } 1425 1426 page = buffered_rmqueue(zonelist, zone, order, gfp_mask); 1427 if (page) 1428 break; 1429this_zone_full: 1430 if (NUMA_BUILD) 1431 zlc_mark_zone_full(zonelist, z); 1432try_next_zone: 1433 if (NUMA_BUILD && !did_zlc_setup) { 1434 /* we do zlc_setup after the first zone is tried */ 1435 allowednodes = zlc_setup(zonelist, alloc_flags); 1436 zlc_active = 1; 1437 did_zlc_setup = 1; 1438 } 1439 } while (*(++z) != NULL); 1440 1441 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1442 /* Disable zlc cache for second zonelist scan */ 1443 zlc_active = 0; 1444 goto zonelist_scan; 1445 } 1446 return page; 1447} 1448 1449/* 1450 * This is the 'heart' of the zoned buddy allocator. 1451 */ 1452struct page * fastcall 1453__alloc_pages(gfp_t gfp_mask, unsigned int order, 1454 struct zonelist *zonelist) 1455{ 1456 const gfp_t wait = gfp_mask & __GFP_WAIT; 1457 struct zone **z; 1458 struct page *page; 1459 struct reclaim_state reclaim_state; 1460 struct task_struct *p = current; 1461 int do_retry; 1462 int alloc_flags; 1463 int did_some_progress; 1464 1465 might_sleep_if(wait); 1466 1467 if (should_fail_alloc_page(gfp_mask, order)) 1468 return NULL; 1469 1470restart: 1471 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 1472 1473 if (unlikely(*z == NULL)) { 1474 /* 1475 * Happens if we have an empty zonelist as a result of 1476 * GFP_THISNODE being used on a memoryless node 1477 */ 1478 return NULL; 1479 } 1480 1481 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1482 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1483 if (page) 1484 goto got_pg; 1485 1486 /* 1487 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1488 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1489 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1490 * using a larger set of nodes after it has established that the 1491 * allowed per node queues are empty and that nodes are 1492 * over allocated. 1493 */ 1494 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1495 goto nopage; 1496 1497 for (z = zonelist->zones; *z; z++) 1498 wakeup_kswapd(*z, order); 1499 1500 /* 1501 * OK, we're below the kswapd watermark and have kicked background 1502 * reclaim. Now things get more complex, so set up alloc_flags according 1503 * to how we want to proceed. 1504 * 1505 * The caller may dip into page reserves a bit more if the caller 1506 * cannot run direct reclaim, or if the caller has realtime scheduling 1507 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1508 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1509 */ 1510 alloc_flags = ALLOC_WMARK_MIN; 1511 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 1512 alloc_flags |= ALLOC_HARDER; 1513 if (gfp_mask & __GFP_HIGH) 1514 alloc_flags |= ALLOC_HIGH; 1515 if (wait) 1516 alloc_flags |= ALLOC_CPUSET; 1517 1518 /* 1519 * Go through the zonelist again. Let __GFP_HIGH and allocations 1520 * coming from realtime tasks go deeper into reserves. 1521 * 1522 * This is the last chance, in general, before the goto nopage. 1523 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1524 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1525 */ 1526 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 1527 if (page) 1528 goto got_pg; 1529 1530 /* This allocation should allow future memory freeing. */ 1531 1532rebalance: 1533 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1534 && !in_interrupt()) { 1535 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1536nofail_alloc: 1537 /* go through the zonelist yet again, ignoring mins */ 1538 page = get_page_from_freelist(gfp_mask, order, 1539 zonelist, ALLOC_NO_WATERMARKS); 1540 if (page) 1541 goto got_pg; 1542 if (gfp_mask & __GFP_NOFAIL) { 1543 congestion_wait(WRITE, HZ/50); 1544 goto nofail_alloc; 1545 } 1546 } 1547 goto nopage; 1548 } 1549 1550 /* Atomic allocations - we can't balance anything */ 1551 if (!wait) 1552 goto nopage; 1553 1554 cond_resched(); 1555 1556 /* We now go into synchronous reclaim */ 1557 cpuset_memory_pressure_bump(); 1558 p->flags |= PF_MEMALLOC; 1559 reclaim_state.reclaimed_slab = 0; 1560 p->reclaim_state = &reclaim_state; 1561 1562 did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask); 1563 1564 p->reclaim_state = NULL; 1565 p->flags &= ~PF_MEMALLOC; 1566 1567 cond_resched(); 1568 1569 if (order != 0) 1570 drain_all_pages(); 1571 1572 if (likely(did_some_progress)) { 1573 page = get_page_from_freelist(gfp_mask, order, 1574 zonelist, alloc_flags); 1575 if (page) 1576 goto got_pg; 1577 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1578 if (!try_set_zone_oom(zonelist)) { 1579 schedule_timeout_uninterruptible(1); 1580 goto restart; 1581 } 1582 1583 /* 1584 * Go through the zonelist yet one more time, keep 1585 * very high watermark here, this is only to catch 1586 * a parallel oom killing, we must fail if we're still 1587 * under heavy pressure. 1588 */ 1589 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1590 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1591 if (page) { 1592 clear_zonelist_oom(zonelist); 1593 goto got_pg; 1594 } 1595 1596 /* The OOM killer will not help higher order allocs so fail */ 1597 if (order > PAGE_ALLOC_COSTLY_ORDER) { 1598 clear_zonelist_oom(zonelist); 1599 goto nopage; 1600 } 1601 1602 out_of_memory(zonelist, gfp_mask, order); 1603 clear_zonelist_oom(zonelist); 1604 goto restart; 1605 } 1606 1607 /* 1608 * Don't let big-order allocations loop unless the caller explicitly 1609 * requests that. Wait for some write requests to complete then retry. 1610 * 1611 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 1612 * <= 3, but that may not be true in other implementations. 1613 */ 1614 do_retry = 0; 1615 if (!(gfp_mask & __GFP_NORETRY)) { 1616 if ((order <= PAGE_ALLOC_COSTLY_ORDER) || 1617 (gfp_mask & __GFP_REPEAT)) 1618 do_retry = 1; 1619 if (gfp_mask & __GFP_NOFAIL) 1620 do_retry = 1; 1621 } 1622 if (do_retry) { 1623 congestion_wait(WRITE, HZ/50); 1624 goto rebalance; 1625 } 1626 1627nopage: 1628 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1629 printk(KERN_WARNING "%s: page allocation failure." 1630 " order:%d, mode:0x%x\n", 1631 p->comm, order, gfp_mask); 1632 dump_stack(); 1633 show_mem(); 1634 } 1635got_pg: 1636 return page; 1637} 1638 1639EXPORT_SYMBOL(__alloc_pages); 1640 1641/* 1642 * Common helper functions. 1643 */ 1644fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1645{ 1646 struct page * page; 1647 page = alloc_pages(gfp_mask, order); 1648 if (!page) 1649 return 0; 1650 return (unsigned long) page_address(page); 1651} 1652 1653EXPORT_SYMBOL(__get_free_pages); 1654 1655fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1656{ 1657 struct page * page; 1658 1659 /* 1660 * get_zeroed_page() returns a 32-bit address, which cannot represent 1661 * a highmem page 1662 */ 1663 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1664 1665 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1666 if (page) 1667 return (unsigned long) page_address(page); 1668 return 0; 1669} 1670 1671EXPORT_SYMBOL(get_zeroed_page); 1672 1673void __pagevec_free(struct pagevec *pvec) 1674{ 1675 int i = pagevec_count(pvec); 1676 1677 while (--i >= 0) 1678 free_hot_cold_page(pvec->pages[i], pvec->cold); 1679} 1680 1681fastcall void __free_pages(struct page *page, unsigned int order) 1682{ 1683 if (put_page_testzero(page)) { 1684 if (order == 0) 1685 free_hot_page(page); 1686 else 1687 __free_pages_ok(page, order); 1688 } 1689} 1690 1691EXPORT_SYMBOL(__free_pages); 1692 1693fastcall void free_pages(unsigned long addr, unsigned int order) 1694{ 1695 if (addr != 0) { 1696 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1697 __free_pages(virt_to_page((void *)addr), order); 1698 } 1699} 1700 1701EXPORT_SYMBOL(free_pages); 1702 1703static unsigned int nr_free_zone_pages(int offset) 1704{ 1705 /* Just pick one node, since fallback list is circular */ 1706 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1707 unsigned int sum = 0; 1708 1709 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1710 struct zone **zonep = zonelist->zones; 1711 struct zone *zone; 1712 1713 for (zone = *zonep++; zone; zone = *zonep++) { 1714 unsigned long size = zone->present_pages; 1715 unsigned long high = zone->pages_high; 1716 if (size > high) 1717 sum += size - high; 1718 } 1719 1720 return sum; 1721} 1722 1723/* 1724 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1725 */ 1726unsigned int nr_free_buffer_pages(void) 1727{ 1728 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1729} 1730EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 1731 1732/* 1733 * Amount of free RAM allocatable within all zones 1734 */ 1735unsigned int nr_free_pagecache_pages(void) 1736{ 1737 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 1738} 1739 1740static inline void show_node(struct zone *zone) 1741{ 1742 if (NUMA_BUILD) 1743 printk("Node %d ", zone_to_nid(zone)); 1744} 1745 1746void si_meminfo(struct sysinfo *val) 1747{ 1748 val->totalram = totalram_pages; 1749 val->sharedram = 0; 1750 val->freeram = global_page_state(NR_FREE_PAGES); 1751 val->bufferram = nr_blockdev_pages(); 1752 val->totalhigh = totalhigh_pages; 1753 val->freehigh = nr_free_highpages(); 1754 val->mem_unit = PAGE_SIZE; 1755} 1756 1757EXPORT_SYMBOL(si_meminfo); 1758 1759#ifdef CONFIG_NUMA 1760void si_meminfo_node(struct sysinfo *val, int nid) 1761{ 1762 pg_data_t *pgdat = NODE_DATA(nid); 1763 1764 val->totalram = pgdat->node_present_pages; 1765 val->freeram = node_page_state(nid, NR_FREE_PAGES); 1766#ifdef CONFIG_HIGHMEM 1767 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1768 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 1769 NR_FREE_PAGES); 1770#else 1771 val->totalhigh = 0; 1772 val->freehigh = 0; 1773#endif 1774 val->mem_unit = PAGE_SIZE; 1775} 1776#endif 1777 1778#define K(x) ((x) << (PAGE_SHIFT-10)) 1779 1780/* 1781 * Show free area list (used inside shift_scroll-lock stuff) 1782 * We also calculate the percentage fragmentation. We do this by counting the 1783 * memory on each free list with the exception of the first item on the list. 1784 */ 1785void show_free_areas(void) 1786{ 1787 int cpu; 1788 struct zone *zone; 1789 1790 for_each_zone(zone) { 1791 if (!populated_zone(zone)) 1792 continue; 1793 1794 show_node(zone); 1795 printk("%s per-cpu:\n", zone->name); 1796 1797 for_each_online_cpu(cpu) { 1798 struct per_cpu_pageset *pageset; 1799 1800 pageset = zone_pcp(zone, cpu); 1801 1802 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 1803 cpu, pageset->pcp.high, 1804 pageset->pcp.batch, pageset->pcp.count); 1805 } 1806 } 1807 1808 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" 1809 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 1810 global_page_state(NR_ACTIVE), 1811 global_page_state(NR_INACTIVE), 1812 global_page_state(NR_FILE_DIRTY), 1813 global_page_state(NR_WRITEBACK), 1814 global_page_state(NR_UNSTABLE_NFS), 1815 global_page_state(NR_FREE_PAGES), 1816 global_page_state(NR_SLAB_RECLAIMABLE) + 1817 global_page_state(NR_SLAB_UNRECLAIMABLE), 1818 global_page_state(NR_FILE_MAPPED), 1819 global_page_state(NR_PAGETABLE), 1820 global_page_state(NR_BOUNCE)); 1821 1822 for_each_zone(zone) { 1823 int i; 1824 1825 if (!populated_zone(zone)) 1826 continue; 1827 1828 show_node(zone); 1829 printk("%s" 1830 " free:%lukB" 1831 " min:%lukB" 1832 " low:%lukB" 1833 " high:%lukB" 1834 " active:%lukB" 1835 " inactive:%lukB" 1836 " present:%lukB" 1837 " pages_scanned:%lu" 1838 " all_unreclaimable? %s" 1839 "\n", 1840 zone->name, 1841 K(zone_page_state(zone, NR_FREE_PAGES)), 1842 K(zone->pages_min), 1843 K(zone->pages_low), 1844 K(zone->pages_high), 1845 K(zone_page_state(zone, NR_ACTIVE)), 1846 K(zone_page_state(zone, NR_INACTIVE)), 1847 K(zone->present_pages), 1848 zone->pages_scanned, 1849 (zone_is_all_unreclaimable(zone) ? "yes" : "no") 1850 ); 1851 printk("lowmem_reserve[]:"); 1852 for (i = 0; i < MAX_NR_ZONES; i++) 1853 printk(" %lu", zone->lowmem_reserve[i]); 1854 printk("\n"); 1855 } 1856 1857 for_each_zone(zone) { 1858 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1859 1860 if (!populated_zone(zone)) 1861 continue; 1862 1863 show_node(zone); 1864 printk("%s: ", zone->name); 1865 1866 spin_lock_irqsave(&zone->lock, flags); 1867 for (order = 0; order < MAX_ORDER; order++) { 1868 nr[order] = zone->free_area[order].nr_free; 1869 total += nr[order] << order; 1870 } 1871 spin_unlock_irqrestore(&zone->lock, flags); 1872 for (order = 0; order < MAX_ORDER; order++) 1873 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1874 printk("= %lukB\n", K(total)); 1875 } 1876 1877 show_swap_cache_info(); 1878} 1879 1880/* 1881 * Builds allocation fallback zone lists. 1882 * 1883 * Add all populated zones of a node to the zonelist. 1884 */ 1885static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 1886 int nr_zones, enum zone_type zone_type) 1887{ 1888 struct zone *zone; 1889 1890 BUG_ON(zone_type >= MAX_NR_ZONES); 1891 zone_type++; 1892 1893 do { 1894 zone_type--; 1895 zone = pgdat->node_zones + zone_type; 1896 if (populated_zone(zone)) { 1897 zonelist->zones[nr_zones++] = zone; 1898 check_highest_zone(zone_type); 1899 } 1900 1901 } while (zone_type); 1902 return nr_zones; 1903} 1904 1905 1906/* 1907 * zonelist_order: 1908 * 0 = automatic detection of better ordering. 1909 * 1 = order by ([node] distance, -zonetype) 1910 * 2 = order by (-zonetype, [node] distance) 1911 * 1912 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 1913 * the same zonelist. So only NUMA can configure this param. 1914 */ 1915#define ZONELIST_ORDER_DEFAULT 0 1916#define ZONELIST_ORDER_NODE 1 1917#define ZONELIST_ORDER_ZONE 2 1918 1919/* zonelist order in the kernel. 1920 * set_zonelist_order() will set this to NODE or ZONE. 1921 */ 1922static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 1923static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 1924 1925 1926#ifdef CONFIG_NUMA 1927/* The value user specified ....changed by config */ 1928static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 1929/* string for sysctl */ 1930#define NUMA_ZONELIST_ORDER_LEN 16 1931char numa_zonelist_order[16] = "default"; 1932 1933/* 1934 * interface for configure zonelist ordering. 1935 * command line option "numa_zonelist_order" 1936 * = "[dD]efault - default, automatic configuration. 1937 * = "[nN]ode - order by node locality, then by zone within node 1938 * = "[zZ]one - order by zone, then by locality within zone 1939 */ 1940 1941static int __parse_numa_zonelist_order(char *s) 1942{ 1943 if (*s == 'd' || *s == 'D') { 1944 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 1945 } else if (*s == 'n' || *s == 'N') { 1946 user_zonelist_order = ZONELIST_ORDER_NODE; 1947 } else if (*s == 'z' || *s == 'Z') { 1948 user_zonelist_order = ZONELIST_ORDER_ZONE; 1949 } else { 1950 printk(KERN_WARNING 1951 "Ignoring invalid numa_zonelist_order value: " 1952 "%s\n", s); 1953 return -EINVAL; 1954 } 1955 return 0; 1956} 1957 1958static __init int setup_numa_zonelist_order(char *s) 1959{ 1960 if (s) 1961 return __parse_numa_zonelist_order(s); 1962 return 0; 1963} 1964early_param("numa_zonelist_order", setup_numa_zonelist_order); 1965 1966/* 1967 * sysctl handler for numa_zonelist_order 1968 */ 1969int numa_zonelist_order_handler(ctl_table *table, int write, 1970 struct file *file, void __user *buffer, size_t *length, 1971 loff_t *ppos) 1972{ 1973 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 1974 int ret; 1975 1976 if (write) 1977 strncpy(saved_string, (char*)table->data, 1978 NUMA_ZONELIST_ORDER_LEN); 1979 ret = proc_dostring(table, write, file, buffer, length, ppos); 1980 if (ret) 1981 return ret; 1982 if (write) { 1983 int oldval = user_zonelist_order; 1984 if (__parse_numa_zonelist_order((char*)table->data)) { 1985 /* 1986 * bogus value. restore saved string 1987 */ 1988 strncpy((char*)table->data, saved_string, 1989 NUMA_ZONELIST_ORDER_LEN); 1990 user_zonelist_order = oldval; 1991 } else if (oldval != user_zonelist_order) 1992 build_all_zonelists(); 1993 } 1994 return 0; 1995} 1996 1997 1998#define MAX_NODE_LOAD (num_online_nodes()) 1999static int node_load[MAX_NUMNODES]; 2000 2001/** 2002 * find_next_best_node - find the next node that should appear in a given node's fallback list 2003 * @node: node whose fallback list we're appending 2004 * @used_node_mask: nodemask_t of already used nodes 2005 * 2006 * We use a number of factors to determine which is the next node that should 2007 * appear on a given node's fallback list. The node should not have appeared 2008 * already in @node's fallback list, and it should be the next closest node 2009 * according to the distance array (which contains arbitrary distance values 2010 * from each node to each node in the system), and should also prefer nodes 2011 * with no CPUs, since presumably they'll have very little allocation pressure 2012 * on them otherwise. 2013 * It returns -1 if no node is found. 2014 */ 2015static int find_next_best_node(int node, nodemask_t *used_node_mask) 2016{ 2017 int n, val; 2018 int min_val = INT_MAX; 2019 int best_node = -1; 2020 2021 /* Use the local node if we haven't already */ 2022 if (!node_isset(node, *used_node_mask)) { 2023 node_set(node, *used_node_mask); 2024 return node; 2025 } 2026 2027 for_each_node_state(n, N_HIGH_MEMORY) { 2028 cpumask_t tmp; 2029 2030 /* Don't want a node to appear more than once */ 2031 if (node_isset(n, *used_node_mask)) 2032 continue; 2033 2034 /* Use the distance array to find the distance */ 2035 val = node_distance(node, n); 2036 2037 /* Penalize nodes under us ("prefer the next node") */ 2038 val += (n < node); 2039 2040 /* Give preference to headless and unused nodes */ 2041 tmp = node_to_cpumask(n); 2042 if (!cpus_empty(tmp)) 2043 val += PENALTY_FOR_NODE_WITH_CPUS; 2044 2045 /* Slight preference for less loaded node */ 2046 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 2047 val += node_load[n]; 2048 2049 if (val < min_val) { 2050 min_val = val; 2051 best_node = n; 2052 } 2053 } 2054 2055 if (best_node >= 0) 2056 node_set(best_node, *used_node_mask); 2057 2058 return best_node; 2059} 2060 2061 2062/* 2063 * Build zonelists ordered by node and zones within node. 2064 * This results in maximum locality--normal zone overflows into local 2065 * DMA zone, if any--but risks exhausting DMA zone. 2066 */ 2067static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 2068{ 2069 enum zone_type i; 2070 int j; 2071 struct zonelist *zonelist; 2072 2073 for (i = 0; i < MAX_NR_ZONES; i++) { 2074 zonelist = pgdat->node_zonelists + i; 2075 for (j = 0; zonelist->zones[j] != NULL; j++) 2076 ; 2077 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 2078 zonelist->zones[j] = NULL; 2079 } 2080} 2081 2082/* 2083 * Build gfp_thisnode zonelists 2084 */ 2085static void build_thisnode_zonelists(pg_data_t *pgdat) 2086{ 2087 enum zone_type i; 2088 int j; 2089 struct zonelist *zonelist; 2090 2091 for (i = 0; i < MAX_NR_ZONES; i++) { 2092 zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i; 2093 j = build_zonelists_node(pgdat, zonelist, 0, i); 2094 zonelist->zones[j] = NULL; 2095 } 2096} 2097 2098/* 2099 * Build zonelists ordered by zone and nodes within zones. 2100 * This results in conserving DMA zone[s] until all Normal memory is 2101 * exhausted, but results in overflowing to remote node while memory 2102 * may still exist in local DMA zone. 2103 */ 2104static int node_order[MAX_NUMNODES]; 2105 2106static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 2107{ 2108 enum zone_type i; 2109 int pos, j, node; 2110 int zone_type; /* needs to be signed */ 2111 struct zone *z; 2112 struct zonelist *zonelist; 2113 2114 for (i = 0; i < MAX_NR_ZONES; i++) { 2115 zonelist = pgdat->node_zonelists + i; 2116 pos = 0; 2117 for (zone_type = i; zone_type >= 0; zone_type--) { 2118 for (j = 0; j < nr_nodes; j++) { 2119 node = node_order[j]; 2120 z = &NODE_DATA(node)->node_zones[zone_type]; 2121 if (populated_zone(z)) { 2122 zonelist->zones[pos++] = z; 2123 check_highest_zone(zone_type); 2124 } 2125 } 2126 } 2127 zonelist->zones[pos] = NULL; 2128 } 2129} 2130 2131static int default_zonelist_order(void) 2132{ 2133 int nid, zone_type; 2134 unsigned long low_kmem_size,total_size; 2135 struct zone *z; 2136 int average_size; 2137 /* 2138 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem. 2139 * If they are really small and used heavily, the system can fall 2140 * into OOM very easily. 2141 * This function detect ZONE_DMA/DMA32 size and confgigures zone order. 2142 */ 2143 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ 2144 low_kmem_size = 0; 2145 total_size = 0; 2146 for_each_online_node(nid) { 2147 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2148 z = &NODE_DATA(nid)->node_zones[zone_type]; 2149 if (populated_zone(z)) { 2150 if (zone_type < ZONE_NORMAL) 2151 low_kmem_size += z->present_pages; 2152 total_size += z->present_pages; 2153 } 2154 } 2155 } 2156 if (!low_kmem_size || /* there are no DMA area. */ 2157 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ 2158 return ZONELIST_ORDER_NODE; 2159 /* 2160 * look into each node's config. 2161 * If there is a node whose DMA/DMA32 memory is very big area on 2162 * local memory, NODE_ORDER may be suitable. 2163 */ 2164 average_size = total_size / 2165 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); 2166 for_each_online_node(nid) { 2167 low_kmem_size = 0; 2168 total_size = 0; 2169 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2170 z = &NODE_DATA(nid)->node_zones[zone_type]; 2171 if (populated_zone(z)) { 2172 if (zone_type < ZONE_NORMAL) 2173 low_kmem_size += z->present_pages; 2174 total_size += z->present_pages; 2175 } 2176 } 2177 if (low_kmem_size && 2178 total_size > average_size && /* ignore small node */ 2179 low_kmem_size > total_size * 70/100) 2180 return ZONELIST_ORDER_NODE; 2181 } 2182 return ZONELIST_ORDER_ZONE; 2183} 2184 2185static void set_zonelist_order(void) 2186{ 2187 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 2188 current_zonelist_order = default_zonelist_order(); 2189 else 2190 current_zonelist_order = user_zonelist_order; 2191} 2192 2193static void build_zonelists(pg_data_t *pgdat) 2194{ 2195 int j, node, load; 2196 enum zone_type i; 2197 nodemask_t used_mask; 2198 int local_node, prev_node; 2199 struct zonelist *zonelist; 2200 int order = current_zonelist_order; 2201 2202 /* initialize zonelists */ 2203 for (i = 0; i < MAX_ZONELISTS; i++) { 2204 zonelist = pgdat->node_zonelists + i; 2205 zonelist->zones[0] = NULL; 2206 } 2207 2208 /* NUMA-aware ordering of nodes */ 2209 local_node = pgdat->node_id; 2210 load = num_online_nodes(); 2211 prev_node = local_node; 2212 nodes_clear(used_mask); 2213 2214 memset(node_load, 0, sizeof(node_load)); 2215 memset(node_order, 0, sizeof(node_order)); 2216 j = 0; 2217 2218 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 2219 int distance = node_distance(local_node, node); 2220 2221 /* 2222 * If another node is sufficiently far away then it is better 2223 * to reclaim pages in a zone before going off node. 2224 */ 2225 if (distance > RECLAIM_DISTANCE) 2226 zone_reclaim_mode = 1; 2227 2228 /* 2229 * We don't want to pressure a particular node. 2230 * So adding penalty to the first node in same 2231 * distance group to make it round-robin. 2232 */ 2233 if (distance != node_distance(local_node, prev_node)) 2234 node_load[node] = load; 2235 2236 prev_node = node; 2237 load--; 2238 if (order == ZONELIST_ORDER_NODE) 2239 build_zonelists_in_node_order(pgdat, node); 2240 else 2241 node_order[j++] = node; /* remember order */ 2242 } 2243 2244 if (order == ZONELIST_ORDER_ZONE) { 2245 /* calculate node order -- i.e., DMA last! */ 2246 build_zonelists_in_zone_order(pgdat, j); 2247 } 2248 2249 build_thisnode_zonelists(pgdat); 2250} 2251 2252/* Construct the zonelist performance cache - see further mmzone.h */ 2253static void build_zonelist_cache(pg_data_t *pgdat) 2254{ 2255 int i; 2256 2257 for (i = 0; i < MAX_NR_ZONES; i++) { 2258 struct zonelist *zonelist; 2259 struct zonelist_cache *zlc; 2260 struct zone **z; 2261 2262 zonelist = pgdat->node_zonelists + i; 2263 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 2264 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 2265 for (z = zonelist->zones; *z; z++) 2266 zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z); 2267 } 2268} 2269 2270 2271#else /* CONFIG_NUMA */ 2272 2273static void set_zonelist_order(void) 2274{ 2275 current_zonelist_order = ZONELIST_ORDER_ZONE; 2276} 2277 2278static void build_zonelists(pg_data_t *pgdat) 2279{ 2280 int node, local_node; 2281 enum zone_type i,j; 2282 2283 local_node = pgdat->node_id; 2284 for (i = 0; i < MAX_NR_ZONES; i++) { 2285 struct zonelist *zonelist; 2286 2287 zonelist = pgdat->node_zonelists + i; 2288 2289 j = build_zonelists_node(pgdat, zonelist, 0, i); 2290 /* 2291 * Now we build the zonelist so that it contains the zones 2292 * of all the other nodes. 2293 * We don't want to pressure a particular node, so when 2294 * building the zones for node N, we make sure that the 2295 * zones coming right after the local ones are those from 2296 * node N+1 (modulo N) 2297 */ 2298 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 2299 if (!node_online(node)) 2300 continue; 2301 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 2302 } 2303 for (node = 0; node < local_node; node++) { 2304 if (!node_online(node)) 2305 continue; 2306 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 2307 } 2308 2309 zonelist->zones[j] = NULL; 2310 } 2311} 2312 2313/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 2314static void build_zonelist_cache(pg_data_t *pgdat) 2315{ 2316 int i; 2317 2318 for (i = 0; i < MAX_NR_ZONES; i++) 2319 pgdat->node_zonelists[i].zlcache_ptr = NULL; 2320} 2321 2322#endif /* CONFIG_NUMA */ 2323 2324/* return values int ....just for stop_machine_run() */ 2325static int __build_all_zonelists(void *dummy) 2326{ 2327 int nid; 2328 2329 for_each_online_node(nid) { 2330 pg_data_t *pgdat = NODE_DATA(nid); 2331 2332 build_zonelists(pgdat); 2333 build_zonelist_cache(pgdat); 2334 } 2335 return 0; 2336} 2337 2338void build_all_zonelists(void) 2339{ 2340 set_zonelist_order(); 2341 2342 if (system_state == SYSTEM_BOOTING) { 2343 __build_all_zonelists(NULL); 2344 cpuset_init_current_mems_allowed(); 2345 } else { 2346 /* we have to stop all cpus to guarantee there is no user 2347 of zonelist */ 2348 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 2349 /* cpuset refresh routine should be here */ 2350 } 2351 vm_total_pages = nr_free_pagecache_pages(); 2352 /* 2353 * Disable grouping by mobility if the number of pages in the 2354 * system is too low to allow the mechanism to work. It would be 2355 * more accurate, but expensive to check per-zone. This check is 2356 * made on memory-hotadd so a system can start with mobility 2357 * disabled and enable it later 2358 */ 2359 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 2360 page_group_by_mobility_disabled = 1; 2361 else 2362 page_group_by_mobility_disabled = 0; 2363 2364 printk("Built %i zonelists in %s order, mobility grouping %s. " 2365 "Total pages: %ld\n", 2366 num_online_nodes(), 2367 zonelist_order_name[current_zonelist_order], 2368 page_group_by_mobility_disabled ? "off" : "on", 2369 vm_total_pages); 2370#ifdef CONFIG_NUMA 2371 printk("Policy zone: %s\n", zone_names[policy_zone]); 2372#endif 2373} 2374 2375/* 2376 * Helper functions to size the waitqueue hash table. 2377 * Essentially these want to choose hash table sizes sufficiently 2378 * large so that collisions trying to wait on pages are rare. 2379 * But in fact, the number of active page waitqueues on typical 2380 * systems is ridiculously low, less than 200. So this is even 2381 * conservative, even though it seems large. 2382 * 2383 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 2384 * waitqueues, i.e. the size of the waitq table given the number of pages. 2385 */ 2386#define PAGES_PER_WAITQUEUE 256 2387 2388#ifndef CONFIG_MEMORY_HOTPLUG 2389static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2390{ 2391 unsigned long size = 1; 2392 2393 pages /= PAGES_PER_WAITQUEUE; 2394 2395 while (size < pages) 2396 size <<= 1; 2397 2398 /* 2399 * Once we have dozens or even hundreds of threads sleeping 2400 * on IO we've got bigger problems than wait queue collision. 2401 * Limit the size of the wait table to a reasonable size. 2402 */ 2403 size = min(size, 4096UL); 2404 2405 return max(size, 4UL); 2406} 2407#else 2408/* 2409 * A zone's size might be changed by hot-add, so it is not possible to determine 2410 * a suitable size for its wait_table. So we use the maximum size now. 2411 * 2412 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 2413 * 2414 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 2415 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 2416 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 2417 * 2418 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 2419 * or more by the traditional way. (See above). It equals: 2420 * 2421 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 2422 * ia64(16K page size) : = ( 8G + 4M)byte. 2423 * powerpc (64K page size) : = (32G +16M)byte. 2424 */ 2425static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2426{ 2427 return 4096UL; 2428} 2429#endif 2430 2431/* 2432 * This is an integer logarithm so that shifts can be used later 2433 * to extract the more random high bits from the multiplicative 2434 * hash function before the remainder is taken. 2435 */ 2436static inline unsigned long wait_table_bits(unsigned long size) 2437{ 2438 return ffz(~size); 2439} 2440 2441#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 2442 2443/* 2444 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 2445 * of blocks reserved is based on zone->pages_min. The memory within the 2446 * reserve will tend to store contiguous free pages. Setting min_free_kbytes 2447 * higher will lead to a bigger reserve which will get freed as contiguous 2448 * blocks as reclaim kicks in 2449 */ 2450static void setup_zone_migrate_reserve(struct zone *zone) 2451{ 2452 unsigned long start_pfn, pfn, end_pfn; 2453 struct page *page; 2454 unsigned long reserve, block_migratetype; 2455 2456 /* Get the start pfn, end pfn and the number of blocks to reserve */ 2457 start_pfn = zone->zone_start_pfn; 2458 end_pfn = start_pfn + zone->spanned_pages; 2459 reserve = roundup(zone->pages_min, pageblock_nr_pages) >> 2460 pageblock_order; 2461 2462 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 2463 if (!pfn_valid(pfn)) 2464 continue; 2465 page = pfn_to_page(pfn); 2466 2467 /* Blocks with reserved pages will never free, skip them. */ 2468 if (PageReserved(page)) 2469 continue; 2470 2471 block_migratetype = get_pageblock_migratetype(page); 2472 2473 /* If this block is reserved, account for it */ 2474 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { 2475 reserve--; 2476 continue; 2477 } 2478 2479 /* Suitable for reserving if this block is movable */ 2480 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { 2481 set_pageblock_migratetype(page, MIGRATE_RESERVE); 2482 move_freepages_block(zone, page, MIGRATE_RESERVE); 2483 reserve--; 2484 continue; 2485 } 2486 2487 /* 2488 * If the reserve is met and this is a previous reserved block, 2489 * take it back 2490 */ 2491 if (block_migratetype == MIGRATE_RESERVE) { 2492 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2493 move_freepages_block(zone, page, MIGRATE_MOVABLE); 2494 } 2495 } 2496} 2497 2498/* 2499 * Initially all pages are reserved - free ones are freed 2500 * up by free_all_bootmem() once the early boot process is 2501 * done. Non-atomic initialization, single-pass. 2502 */ 2503void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 2504 unsigned long start_pfn, enum memmap_context context) 2505{ 2506 struct page *page; 2507 unsigned long end_pfn = start_pfn + size; 2508 unsigned long pfn; 2509 2510 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 2511 /* 2512 * There can be holes in boot-time mem_map[]s 2513 * handed to this function. They do not 2514 * exist on hotplugged memory. 2515 */ 2516 if (context == MEMMAP_EARLY) { 2517 if (!early_pfn_valid(pfn)) 2518 continue; 2519 if (!early_pfn_in_nid(pfn, nid)) 2520 continue; 2521 } 2522 page = pfn_to_page(pfn); 2523 set_page_links(page, zone, nid, pfn); 2524 init_page_count(page); 2525 reset_page_mapcount(page); 2526 SetPageReserved(page); 2527 2528 /* 2529 * Mark the block movable so that blocks are reserved for 2530 * movable at startup. This will force kernel allocations 2531 * to reserve their blocks rather than leaking throughout 2532 * the address space during boot when many long-lived 2533 * kernel allocations are made. Later some blocks near 2534 * the start are marked MIGRATE_RESERVE by 2535 * setup_zone_migrate_reserve() 2536 */ 2537 if ((pfn & (pageblock_nr_pages-1))) 2538 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2539 2540 INIT_LIST_HEAD(&page->lru); 2541#ifdef WANT_PAGE_VIRTUAL 2542 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 2543 if (!is_highmem_idx(zone)) 2544 set_page_address(page, __va(pfn << PAGE_SHIFT)); 2545#endif 2546 } 2547} 2548 2549static void __meminit zone_init_free_lists(struct zone *zone) 2550{ 2551 int order, t; 2552 for_each_migratetype_order(order, t) { 2553 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 2554 zone->free_area[order].nr_free = 0; 2555 } 2556} 2557 2558#ifndef __HAVE_ARCH_MEMMAP_INIT 2559#define memmap_init(size, nid, zone, start_pfn) \ 2560 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 2561#endif 2562 2563static int zone_batchsize(struct zone *zone) 2564{ 2565 int batch; 2566 2567 /* 2568 * The per-cpu-pages pools are set to around 1000th of the 2569 * size of the zone. But no more than 1/2 of a meg. 2570 * 2571 * OK, so we don't know how big the cache is. So guess. 2572 */ 2573 batch = zone->present_pages / 1024; 2574 if (batch * PAGE_SIZE > 512 * 1024) 2575 batch = (512 * 1024) / PAGE_SIZE; 2576 batch /= 4; /* We effectively *= 4 below */ 2577 if (batch < 1) 2578 batch = 1; 2579 2580 /* 2581 * Clamp the batch to a 2^n - 1 value. Having a power 2582 * of 2 value was found to be more likely to have 2583 * suboptimal cache aliasing properties in some cases. 2584 * 2585 * For example if 2 tasks are alternately allocating 2586 * batches of pages, one task can end up with a lot 2587 * of pages of one half of the possible page colors 2588 * and the other with pages of the other colors. 2589 */ 2590 batch = (1 << (fls(batch + batch/2)-1)) - 1; 2591 2592 return batch; 2593} 2594 2595inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2596{ 2597 struct per_cpu_pages *pcp; 2598 2599 memset(p, 0, sizeof(*p)); 2600 2601 pcp = &p->pcp; 2602 pcp->count = 0; 2603 pcp->high = 6 * batch; 2604 pcp->batch = max(1UL, 1 * batch); 2605 INIT_LIST_HEAD(&pcp->list); 2606} 2607 2608/* 2609 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 2610 * to the value high for the pageset p. 2611 */ 2612 2613static void setup_pagelist_highmark(struct per_cpu_pageset *p, 2614 unsigned long high) 2615{ 2616 struct per_cpu_pages *pcp; 2617 2618 pcp = &p->pcp; 2619 pcp->high = high; 2620 pcp->batch = max(1UL, high/4); 2621 if ((high/4) > (PAGE_SHIFT * 8)) 2622 pcp->batch = PAGE_SHIFT * 8; 2623} 2624 2625 2626#ifdef CONFIG_NUMA 2627/* 2628 * Boot pageset table. One per cpu which is going to be used for all 2629 * zones and all nodes. The parameters will be set in such a way 2630 * that an item put on a list will immediately be handed over to 2631 * the buddy list. This is safe since pageset manipulation is done 2632 * with interrupts disabled. 2633 * 2634 * Some NUMA counter updates may also be caught by the boot pagesets. 2635 * 2636 * The boot_pagesets must be kept even after bootup is complete for 2637 * unused processors and/or zones. They do play a role for bootstrapping 2638 * hotplugged processors. 2639 * 2640 * zoneinfo_show() and maybe other functions do 2641 * not check if the processor is online before following the pageset pointer. 2642 * Other parts of the kernel may not check if the zone is available. 2643 */ 2644static struct per_cpu_pageset boot_pageset[NR_CPUS]; 2645 2646/* 2647 * Dynamically allocate memory for the 2648 * per cpu pageset array in struct zone. 2649 */ 2650static int __cpuinit process_zones(int cpu) 2651{ 2652 struct zone *zone, *dzone; 2653 int node = cpu_to_node(cpu); 2654 2655 node_set_state(node, N_CPU); /* this node has a cpu */ 2656 2657 for_each_zone(zone) { 2658 2659 if (!populated_zone(zone)) 2660 continue; 2661 2662 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2663 GFP_KERNEL, node); 2664 if (!zone_pcp(zone, cpu)) 2665 goto bad; 2666 2667 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 2668 2669 if (percpu_pagelist_fraction) 2670 setup_pagelist_highmark(zone_pcp(zone, cpu), 2671 (zone->present_pages / percpu_pagelist_fraction)); 2672 } 2673 2674 return 0; 2675bad: 2676 for_each_zone(dzone) { 2677 if (!populated_zone(dzone)) 2678 continue; 2679 if (dzone == zone) 2680 break; 2681 kfree(zone_pcp(dzone, cpu)); 2682 zone_pcp(dzone, cpu) = NULL; 2683 } 2684 return -ENOMEM; 2685} 2686 2687static inline void free_zone_pagesets(int cpu) 2688{ 2689 struct zone *zone; 2690 2691 for_each_zone(zone) { 2692 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 2693 2694 /* Free per_cpu_pageset if it is slab allocated */ 2695 if (pset != &boot_pageset[cpu]) 2696 kfree(pset); 2697 zone_pcp(zone, cpu) = NULL; 2698 } 2699} 2700 2701static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 2702 unsigned long action, 2703 void *hcpu) 2704{ 2705 int cpu = (long)hcpu; 2706 int ret = NOTIFY_OK; 2707 2708 switch (action) { 2709 case CPU_UP_PREPARE: 2710 case CPU_UP_PREPARE_FROZEN: 2711 if (process_zones(cpu)) 2712 ret = NOTIFY_BAD; 2713 break; 2714 case CPU_UP_CANCELED: 2715 case CPU_UP_CANCELED_FROZEN: 2716 case CPU_DEAD: 2717 case CPU_DEAD_FROZEN: 2718 free_zone_pagesets(cpu); 2719 break; 2720 default: 2721 break; 2722 } 2723 return ret; 2724} 2725 2726static struct notifier_block __cpuinitdata pageset_notifier = 2727 { &pageset_cpuup_callback, NULL, 0 }; 2728 2729void __init setup_per_cpu_pageset(void) 2730{ 2731 int err; 2732 2733 /* Initialize per_cpu_pageset for cpu 0. 2734 * A cpuup callback will do this for every cpu 2735 * as it comes online 2736 */ 2737 err = process_zones(smp_processor_id()); 2738 BUG_ON(err); 2739 register_cpu_notifier(&pageset_notifier); 2740} 2741 2742#endif 2743 2744static noinline __init_refok 2745int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2746{ 2747 int i; 2748 struct pglist_data *pgdat = zone->zone_pgdat; 2749 size_t alloc_size; 2750 2751 /* 2752 * The per-page waitqueue mechanism uses hashed waitqueues 2753 * per zone. 2754 */ 2755 zone->wait_table_hash_nr_entries = 2756 wait_table_hash_nr_entries(zone_size_pages); 2757 zone->wait_table_bits = 2758 wait_table_bits(zone->wait_table_hash_nr_entries); 2759 alloc_size = zone->wait_table_hash_nr_entries 2760 * sizeof(wait_queue_head_t); 2761 2762 if (system_state == SYSTEM_BOOTING) { 2763 zone->wait_table = (wait_queue_head_t *) 2764 alloc_bootmem_node(pgdat, alloc_size); 2765 } else { 2766 /* 2767 * This case means that a zone whose size was 0 gets new memory 2768 * via memory hot-add. 2769 * But it may be the case that a new node was hot-added. In 2770 * this case vmalloc() will not be able to use this new node's 2771 * memory - this wait_table must be initialized to use this new 2772 * node itself as well. 2773 * To use this new node's memory, further consideration will be 2774 * necessary. 2775 */ 2776 zone->wait_table = vmalloc(alloc_size); 2777 } 2778 if (!zone->wait_table) 2779 return -ENOMEM; 2780 2781 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 2782 init_waitqueue_head(zone->wait_table + i); 2783 2784 return 0; 2785} 2786 2787static __meminit void zone_pcp_init(struct zone *zone) 2788{ 2789 int cpu; 2790 unsigned long batch = zone_batchsize(zone); 2791 2792 for (cpu = 0; cpu < NR_CPUS; cpu++) { 2793#ifdef CONFIG_NUMA 2794 /* Early boot. Slab allocator not functional yet */ 2795 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 2796 setup_pageset(&boot_pageset[cpu],0); 2797#else 2798 setup_pageset(zone_pcp(zone,cpu), batch); 2799#endif 2800 } 2801 if (zone->present_pages) 2802 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 2803 zone->name, zone->present_pages, batch); 2804} 2805 2806__meminit int init_currently_empty_zone(struct zone *zone, 2807 unsigned long zone_start_pfn, 2808 unsigned long size, 2809 enum memmap_context context) 2810{ 2811 struct pglist_data *pgdat = zone->zone_pgdat; 2812 int ret; 2813 ret = zone_wait_table_init(zone, size); 2814 if (ret) 2815 return ret; 2816 pgdat->nr_zones = zone_idx(zone) + 1; 2817 2818 zone->zone_start_pfn = zone_start_pfn; 2819 2820 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2821 2822 zone_init_free_lists(zone); 2823 2824 return 0; 2825} 2826 2827#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2828/* 2829 * Basic iterator support. Return the first range of PFNs for a node 2830 * Note: nid == MAX_NUMNODES returns first region regardless of node 2831 */ 2832static int __meminit first_active_region_index_in_nid(int nid) 2833{ 2834 int i; 2835 2836 for (i = 0; i < nr_nodemap_entries; i++) 2837 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 2838 return i; 2839 2840 return -1; 2841} 2842 2843/* 2844 * Basic iterator support. Return the next active range of PFNs for a node 2845 * Note: nid == MAX_NUMNODES returns next region regardless of node 2846 */ 2847static int __meminit next_active_region_index_in_nid(int index, int nid) 2848{ 2849 for (index = index + 1; index < nr_nodemap_entries; index++) 2850 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2851 return index; 2852 2853 return -1; 2854} 2855 2856#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 2857/* 2858 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 2859 * Architectures may implement their own version but if add_active_range() 2860 * was used and there are no special requirements, this is a convenient 2861 * alternative 2862 */ 2863int __meminit early_pfn_to_nid(unsigned long pfn) 2864{ 2865 int i; 2866 2867 for (i = 0; i < nr_nodemap_entries; i++) { 2868 unsigned long start_pfn = early_node_map[i].start_pfn; 2869 unsigned long end_pfn = early_node_map[i].end_pfn; 2870 2871 if (start_pfn <= pfn && pfn < end_pfn) 2872 return early_node_map[i].nid; 2873 } 2874 2875 return 0; 2876} 2877#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 2878 2879/* Basic iterator support to walk early_node_map[] */ 2880#define for_each_active_range_index_in_nid(i, nid) \ 2881 for (i = first_active_region_index_in_nid(nid); i != -1; \ 2882 i = next_active_region_index_in_nid(i, nid)) 2883 2884/** 2885 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2886 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 2887 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 2888 * 2889 * If an architecture guarantees that all ranges registered with 2890 * add_active_ranges() contain no holes and may be freed, this 2891 * this function may be used instead of calling free_bootmem() manually. 2892 */ 2893void __init free_bootmem_with_active_regions(int nid, 2894 unsigned long max_low_pfn) 2895{ 2896 int i; 2897 2898 for_each_active_range_index_in_nid(i, nid) { 2899 unsigned long size_pages = 0; 2900 unsigned long end_pfn = early_node_map[i].end_pfn; 2901 2902 if (early_node_map[i].start_pfn >= max_low_pfn) 2903 continue; 2904 2905 if (end_pfn > max_low_pfn) 2906 end_pfn = max_low_pfn; 2907 2908 size_pages = end_pfn - early_node_map[i].start_pfn; 2909 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 2910 PFN_PHYS(early_node_map[i].start_pfn), 2911 size_pages << PAGE_SHIFT); 2912 } 2913} 2914 2915/** 2916 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2917 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2918 * 2919 * If an architecture guarantees that all ranges registered with 2920 * add_active_ranges() contain no holes and may be freed, this 2921 * function may be used instead of calling memory_present() manually. 2922 */ 2923void __init sparse_memory_present_with_active_regions(int nid) 2924{ 2925 int i; 2926 2927 for_each_active_range_index_in_nid(i, nid) 2928 memory_present(early_node_map[i].nid, 2929 early_node_map[i].start_pfn, 2930 early_node_map[i].end_pfn); 2931} 2932 2933/** 2934 * push_node_boundaries - Push node boundaries to at least the requested boundary 2935 * @nid: The nid of the node to push the boundary for 2936 * @start_pfn: The start pfn of the node 2937 * @end_pfn: The end pfn of the node 2938 * 2939 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd 2940 * time. Specifically, on x86_64, SRAT will report ranges that can potentially 2941 * be hotplugged even though no physical memory exists. This function allows 2942 * an arch to push out the node boundaries so mem_map is allocated that can 2943 * be used later. 2944 */ 2945#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2946void __init push_node_boundaries(unsigned int nid, 2947 unsigned long start_pfn, unsigned long end_pfn) 2948{ 2949 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", 2950 nid, start_pfn, end_pfn); 2951 2952 /* Initialise the boundary for this node if necessary */ 2953 if (node_boundary_end_pfn[nid] == 0) 2954 node_boundary_start_pfn[nid] = -1UL; 2955 2956 /* Update the boundaries */ 2957 if (node_boundary_start_pfn[nid] > start_pfn) 2958 node_boundary_start_pfn[nid] = start_pfn; 2959 if (node_boundary_end_pfn[nid] < end_pfn) 2960 node_boundary_end_pfn[nid] = end_pfn; 2961} 2962 2963/* If necessary, push the node boundary out for reserve hotadd */ 2964static void __meminit account_node_boundary(unsigned int nid, 2965 unsigned long *start_pfn, unsigned long *end_pfn) 2966{ 2967 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", 2968 nid, *start_pfn, *end_pfn); 2969 2970 /* Return if boundary information has not been provided */ 2971 if (node_boundary_end_pfn[nid] == 0) 2972 return; 2973 2974 /* Check the boundaries and update if necessary */ 2975 if (node_boundary_start_pfn[nid] < *start_pfn) 2976 *start_pfn = node_boundary_start_pfn[nid]; 2977 if (node_boundary_end_pfn[nid] > *end_pfn) 2978 *end_pfn = node_boundary_end_pfn[nid]; 2979} 2980#else 2981void __init push_node_boundaries(unsigned int nid, 2982 unsigned long start_pfn, unsigned long end_pfn) {} 2983 2984static void __meminit account_node_boundary(unsigned int nid, 2985 unsigned long *start_pfn, unsigned long *end_pfn) {} 2986#endif 2987 2988 2989/** 2990 * get_pfn_range_for_nid - Return the start and end page frames for a node 2991 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 2992 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 2993 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 2994 * 2995 * It returns the start and end page frame of a node based on information 2996 * provided by an arch calling add_active_range(). If called for a node 2997 * with no available memory, a warning is printed and the start and end 2998 * PFNs will be 0. 2999 */ 3000void __meminit get_pfn_range_for_nid(unsigned int nid, 3001 unsigned long *start_pfn, unsigned long *end_pfn) 3002{ 3003 int i; 3004 *start_pfn = -1UL; 3005 *end_pfn = 0; 3006 3007 for_each_active_range_index_in_nid(i, nid) { 3008 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 3009 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 3010 } 3011 3012 if (*start_pfn == -1UL) 3013 *start_pfn = 0; 3014 3015 /* Push the node boundaries out if requested */ 3016 account_node_boundary(nid, start_pfn, end_pfn); 3017} 3018 3019/* 3020 * This finds a zone that can be used for ZONE_MOVABLE pages. The 3021 * assumption is made that zones within a node are ordered in monotonic 3022 * increasing memory addresses so that the "highest" populated zone is used 3023 */ 3024void __init find_usable_zone_for_movable(void) 3025{ 3026 int zone_index; 3027 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 3028 if (zone_index == ZONE_MOVABLE) 3029 continue; 3030 3031 if (arch_zone_highest_possible_pfn[zone_index] > 3032 arch_zone_lowest_possible_pfn[zone_index]) 3033 break; 3034 } 3035 3036 VM_BUG_ON(zone_index == -1); 3037 movable_zone = zone_index; 3038} 3039 3040/* 3041 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 3042 * because it is sized independant of architecture. Unlike the other zones, 3043 * the starting point for ZONE_MOVABLE is not fixed. It may be different 3044 * in each node depending on the size of each node and how evenly kernelcore 3045 * is distributed. This helper function adjusts the zone ranges 3046 * provided by the architecture for a given node by using the end of the 3047 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 3048 * zones within a node are in order of monotonic increases memory addresses 3049 */ 3050void __meminit adjust_zone_range_for_zone_movable(int nid, 3051 unsigned long zone_type, 3052 unsigned long node_start_pfn, 3053 unsigned long node_end_pfn, 3054 unsigned long *zone_start_pfn, 3055 unsigned long *zone_end_pfn) 3056{ 3057 /* Only adjust if ZONE_MOVABLE is on this node */ 3058 if (zone_movable_pfn[nid]) { 3059 /* Size ZONE_MOVABLE */ 3060 if (zone_type == ZONE_MOVABLE) { 3061 *zone_start_pfn = zone_movable_pfn[nid]; 3062 *zone_end_pfn = min(node_end_pfn, 3063 arch_zone_highest_possible_pfn[movable_zone]); 3064 3065 /* Adjust for ZONE_MOVABLE starting within this range */ 3066 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 3067 *zone_end_pfn > zone_movable_pfn[nid]) { 3068 *zone_end_pfn = zone_movable_pfn[nid]; 3069 3070 /* Check if this whole range is within ZONE_MOVABLE */ 3071 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 3072 *zone_start_pfn = *zone_end_pfn; 3073 } 3074} 3075 3076/* 3077 * Return the number of pages a zone spans in a node, including holes 3078 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 3079 */ 3080static unsigned long __meminit zone_spanned_pages_in_node(int nid, 3081 unsigned long zone_type, 3082 unsigned long *ignored) 3083{ 3084 unsigned long node_start_pfn, node_end_pfn; 3085 unsigned long zone_start_pfn, zone_end_pfn; 3086 3087 /* Get the start and end of the node and zone */ 3088 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3089 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 3090 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 3091 adjust_zone_range_for_zone_movable(nid, zone_type, 3092 node_start_pfn, node_end_pfn, 3093 &zone_start_pfn, &zone_end_pfn); 3094 3095 /* Check that this node has pages within the zone's required range */ 3096 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 3097 return 0; 3098 3099 /* Move the zone boundaries inside the node if necessary */ 3100 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 3101 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 3102 3103 /* Return the spanned pages */ 3104 return zone_end_pfn - zone_start_pfn; 3105} 3106 3107/* 3108 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 3109 * then all holes in the requested range will be accounted for. 3110 */ 3111unsigned long __meminit __absent_pages_in_range(int nid, 3112 unsigned long range_start_pfn, 3113 unsigned long range_end_pfn) 3114{ 3115 int i = 0; 3116 unsigned long prev_end_pfn = 0, hole_pages = 0; 3117 unsigned long start_pfn; 3118 3119 /* Find the end_pfn of the first active range of pfns in the node */ 3120 i = first_active_region_index_in_nid(nid); 3121 if (i == -1) 3122 return 0; 3123 3124 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3125 3126 /* Account for ranges before physical memory on this node */ 3127 if (early_node_map[i].start_pfn > range_start_pfn) 3128 hole_pages = prev_end_pfn - range_start_pfn; 3129 3130 /* Find all holes for the zone within the node */ 3131 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 3132 3133 /* No need to continue if prev_end_pfn is outside the zone */ 3134 if (prev_end_pfn >= range_end_pfn) 3135 break; 3136 3137 /* Make sure the end of the zone is not within the hole */ 3138 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3139 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 3140 3141 /* Update the hole size cound and move on */ 3142 if (start_pfn > range_start_pfn) { 3143 BUG_ON(prev_end_pfn > start_pfn); 3144 hole_pages += start_pfn - prev_end_pfn; 3145 } 3146 prev_end_pfn = early_node_map[i].end_pfn; 3147 } 3148 3149 /* Account for ranges past physical memory on this node */ 3150 if (range_end_pfn > prev_end_pfn) 3151 hole_pages += range_end_pfn - 3152 max(range_start_pfn, prev_end_pfn); 3153 3154 return hole_pages; 3155} 3156 3157/** 3158 * absent_pages_in_range - Return number of page frames in holes within a range 3159 * @start_pfn: The start PFN to start searching for holes 3160 * @end_pfn: The end PFN to stop searching for holes 3161 * 3162 * It returns the number of pages frames in memory holes within a range. 3163 */ 3164unsigned long __init absent_pages_in_range(unsigned long start_pfn, 3165 unsigned long end_pfn) 3166{ 3167 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 3168} 3169 3170/* Return the number of page frames in holes in a zone on a node */ 3171static unsigned long __meminit zone_absent_pages_in_node(int nid, 3172 unsigned long zone_type, 3173 unsigned long *ignored) 3174{ 3175 unsigned long node_start_pfn, node_end_pfn; 3176 unsigned long zone_start_pfn, zone_end_pfn; 3177 3178 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3179 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 3180 node_start_pfn); 3181 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 3182 node_end_pfn); 3183 3184 adjust_zone_range_for_zone_movable(nid, zone_type, 3185 node_start_pfn, node_end_pfn, 3186 &zone_start_pfn, &zone_end_pfn); 3187 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 3188} 3189 3190#else 3191static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 3192 unsigned long zone_type, 3193 unsigned long *zones_size) 3194{ 3195 return zones_size[zone_type]; 3196} 3197 3198static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 3199 unsigned long zone_type, 3200 unsigned long *zholes_size) 3201{ 3202 if (!zholes_size) 3203 return 0; 3204 3205 return zholes_size[zone_type]; 3206} 3207 3208#endif 3209 3210static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 3211 unsigned long *zones_size, unsigned long *zholes_size) 3212{ 3213 unsigned long realtotalpages, totalpages = 0; 3214 enum zone_type i; 3215 3216 for (i = 0; i < MAX_NR_ZONES; i++) 3217 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 3218 zones_size); 3219 pgdat->node_spanned_pages = totalpages; 3220 3221 realtotalpages = totalpages; 3222 for (i = 0; i < MAX_NR_ZONES; i++) 3223 realtotalpages -= 3224 zone_absent_pages_in_node(pgdat->node_id, i, 3225 zholes_size); 3226 pgdat->node_present_pages = realtotalpages; 3227 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 3228 realtotalpages); 3229} 3230 3231#ifndef CONFIG_SPARSEMEM 3232/* 3233 * Calculate the size of the zone->blockflags rounded to an unsigned long 3234 * Start by making sure zonesize is a multiple of pageblock_order by rounding 3235 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 3236 * round what is now in bits to nearest long in bits, then return it in 3237 * bytes. 3238 */ 3239static unsigned long __init usemap_size(unsigned long zonesize) 3240{ 3241 unsigned long usemapsize; 3242 3243 usemapsize = roundup(zonesize, pageblock_nr_pages); 3244 usemapsize = usemapsize >> pageblock_order; 3245 usemapsize *= NR_PAGEBLOCK_BITS; 3246 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 3247 3248 return usemapsize / 8; 3249} 3250 3251static void __init setup_usemap(struct pglist_data *pgdat, 3252 struct zone *zone, unsigned long zonesize) 3253{ 3254 unsigned long usemapsize = usemap_size(zonesize); 3255 zone->pageblock_flags = NULL; 3256 if (usemapsize) { 3257 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 3258 memset(zone->pageblock_flags, 0, usemapsize); 3259 } 3260} 3261#else 3262static void inline setup_usemap(struct pglist_data *pgdat, 3263 struct zone *zone, unsigned long zonesize) {} 3264#endif /* CONFIG_SPARSEMEM */ 3265 3266#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 3267 3268/* Return a sensible default order for the pageblock size. */ 3269static inline int pageblock_default_order(void) 3270{ 3271 if (HPAGE_SHIFT > PAGE_SHIFT) 3272 return HUGETLB_PAGE_ORDER; 3273 3274 return MAX_ORDER-1; 3275} 3276 3277/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 3278static inline void __init set_pageblock_order(unsigned int order) 3279{ 3280 /* Check that pageblock_nr_pages has not already been setup */ 3281 if (pageblock_order) 3282 return; 3283 3284 /* 3285 * Assume the largest contiguous order of interest is a huge page. 3286 * This value may be variable depending on boot parameters on IA64 3287 */ 3288 pageblock_order = order; 3289} 3290#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3291 3292/* 3293 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 3294 * and pageblock_default_order() are unused as pageblock_order is set 3295 * at compile-time. See include/linux/pageblock-flags.h for the values of 3296 * pageblock_order based on the kernel config 3297 */ 3298static inline int pageblock_default_order(unsigned int order) 3299{ 3300 return MAX_ORDER-1; 3301} 3302#define set_pageblock_order(x) do {} while (0) 3303 3304#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3305 3306/* 3307 * Set up the zone data structures: 3308 * - mark all pages reserved 3309 * - mark all memory queues empty 3310 * - clear the memory bitmaps 3311 */ 3312static void __meminit free_area_init_core(struct pglist_data *pgdat, 3313 unsigned long *zones_size, unsigned long *zholes_size) 3314{ 3315 enum zone_type j; 3316 int nid = pgdat->node_id; 3317 unsigned long zone_start_pfn = pgdat->node_start_pfn; 3318 int ret; 3319 3320 pgdat_resize_init(pgdat); 3321 pgdat->nr_zones = 0; 3322 init_waitqueue_head(&pgdat->kswapd_wait); 3323 pgdat->kswapd_max_order = 0; 3324 3325 for (j = 0; j < MAX_NR_ZONES; j++) { 3326 struct zone *zone = pgdat->node_zones + j; 3327 unsigned long size, realsize, memmap_pages; 3328 3329 size = zone_spanned_pages_in_node(nid, j, zones_size); 3330 realsize = size - zone_absent_pages_in_node(nid, j, 3331 zholes_size); 3332 3333 /* 3334 * Adjust realsize so that it accounts for how much memory 3335 * is used by this zone for memmap. This affects the watermark 3336 * and per-cpu initialisations 3337 */ 3338 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; 3339 if (realsize >= memmap_pages) { 3340 realsize -= memmap_pages; 3341 printk(KERN_DEBUG 3342 " %s zone: %lu pages used for memmap\n", 3343 zone_names[j], memmap_pages); 3344 } else 3345 printk(KERN_WARNING 3346 " %s zone: %lu pages exceeds realsize %lu\n", 3347 zone_names[j], memmap_pages, realsize); 3348 3349 /* Account for reserved pages */ 3350 if (j == 0 && realsize > dma_reserve) { 3351 realsize -= dma_reserve; 3352 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 3353 zone_names[0], dma_reserve); 3354 } 3355 3356 if (!is_highmem_idx(j)) 3357 nr_kernel_pages += realsize; 3358 nr_all_pages += realsize; 3359 3360 zone->spanned_pages = size; 3361 zone->present_pages = realsize; 3362#ifdef CONFIG_NUMA 3363 zone->node = nid; 3364 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 3365 / 100; 3366 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 3367#endif 3368 zone->name = zone_names[j]; 3369 spin_lock_init(&zone->lock); 3370 spin_lock_init(&zone->lru_lock); 3371 zone_seqlock_init(zone); 3372 zone->zone_pgdat = pgdat; 3373 3374 zone->prev_priority = DEF_PRIORITY; 3375 3376 zone_pcp_init(zone); 3377 INIT_LIST_HEAD(&zone->active_list); 3378 INIT_LIST_HEAD(&zone->inactive_list); 3379 zone->nr_scan_active = 0; 3380 zone->nr_scan_inactive = 0; 3381 zap_zone_vm_stats(zone); 3382 zone->flags = 0; 3383 if (!size) 3384 continue; 3385 3386 set_pageblock_order(pageblock_default_order()); 3387 setup_usemap(pgdat, zone, size); 3388 ret = init_currently_empty_zone(zone, zone_start_pfn, 3389 size, MEMMAP_EARLY); 3390 BUG_ON(ret); 3391 zone_start_pfn += size; 3392 } 3393} 3394 3395static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 3396{ 3397 /* Skip empty nodes */ 3398 if (!pgdat->node_spanned_pages) 3399 return; 3400 3401#ifdef CONFIG_FLAT_NODE_MEM_MAP 3402 /* ia64 gets its own node_mem_map, before this, without bootmem */ 3403 if (!pgdat->node_mem_map) { 3404 unsigned long size, start, end; 3405 struct page *map; 3406 3407 /* 3408 * The zone's endpoints aren't required to be MAX_ORDER 3409 * aligned but the node_mem_map endpoints must be in order 3410 * for the buddy allocator to function correctly. 3411 */ 3412 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 3413 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 3414 end = ALIGN(end, MAX_ORDER_NR_PAGES); 3415 size = (end - start) * sizeof(struct page); 3416 map = alloc_remap(pgdat->node_id, size); 3417 if (!map) 3418 map = alloc_bootmem_node(pgdat, size); 3419 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 3420 } 3421#ifndef CONFIG_NEED_MULTIPLE_NODES 3422 /* 3423 * With no DISCONTIG, the global mem_map is just set as node 0's 3424 */ 3425 if (pgdat == NODE_DATA(0)) { 3426 mem_map = NODE_DATA(0)->node_mem_map; 3427#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3428 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 3429 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 3430#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3431 } 3432#endif 3433#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 3434} 3435 3436void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, 3437 unsigned long *zones_size, unsigned long node_start_pfn, 3438 unsigned long *zholes_size) 3439{ 3440 pgdat->node_id = nid; 3441 pgdat->node_start_pfn = node_start_pfn; 3442 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3443 3444 alloc_node_mem_map(pgdat); 3445 3446 free_area_init_core(pgdat, zones_size, zholes_size); 3447} 3448 3449#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3450 3451#if MAX_NUMNODES > 1 3452/* 3453 * Figure out the number of possible node ids. 3454 */ 3455static void __init setup_nr_node_ids(void) 3456{ 3457 unsigned int node; 3458 unsigned int highest = 0; 3459 3460 for_each_node_mask(node, node_possible_map) 3461 highest = node; 3462 nr_node_ids = highest + 1; 3463} 3464#else 3465static inline void setup_nr_node_ids(void) 3466{ 3467} 3468#endif 3469 3470/** 3471 * add_active_range - Register a range of PFNs backed by physical memory 3472 * @nid: The node ID the range resides on 3473 * @start_pfn: The start PFN of the available physical memory 3474 * @end_pfn: The end PFN of the available physical memory 3475 * 3476 * These ranges are stored in an early_node_map[] and later used by 3477 * free_area_init_nodes() to calculate zone sizes and holes. If the 3478 * range spans a memory hole, it is up to the architecture to ensure 3479 * the memory is not freed by the bootmem allocator. If possible 3480 * the range being registered will be merged with existing ranges. 3481 */ 3482void __init add_active_range(unsigned int nid, unsigned long start_pfn, 3483 unsigned long end_pfn) 3484{ 3485 int i; 3486 3487 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 3488 "%d entries of %d used\n", 3489 nid, start_pfn, end_pfn, 3490 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3491 3492 /* Merge with existing active regions if possible */ 3493 for (i = 0; i < nr_nodemap_entries; i++) { 3494 if (early_node_map[i].nid != nid) 3495 continue; 3496 3497 /* Skip if an existing region covers this new one */ 3498 if (start_pfn >= early_node_map[i].start_pfn && 3499 end_pfn <= early_node_map[i].end_pfn) 3500 return; 3501 3502 /* Merge forward if suitable */ 3503 if (start_pfn <= early_node_map[i].end_pfn && 3504 end_pfn > early_node_map[i].end_pfn) { 3505 early_node_map[i].end_pfn = end_pfn; 3506 return; 3507 } 3508 3509 /* Merge backward if suitable */ 3510 if (start_pfn < early_node_map[i].end_pfn && 3511 end_pfn >= early_node_map[i].start_pfn) { 3512 early_node_map[i].start_pfn = start_pfn; 3513 return; 3514 } 3515 } 3516 3517 /* Check that early_node_map is large enough */ 3518 if (i >= MAX_ACTIVE_REGIONS) { 3519 printk(KERN_CRIT "More than %d memory regions, truncating\n", 3520 MAX_ACTIVE_REGIONS); 3521 return; 3522 } 3523 3524 early_node_map[i].nid = nid; 3525 early_node_map[i].start_pfn = start_pfn; 3526 early_node_map[i].end_pfn = end_pfn; 3527 nr_nodemap_entries = i + 1; 3528} 3529 3530/** 3531 * shrink_active_range - Shrink an existing registered range of PFNs 3532 * @nid: The node id the range is on that should be shrunk 3533 * @old_end_pfn: The old end PFN of the range 3534 * @new_end_pfn: The new PFN of the range 3535 * 3536 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3537 * The map is kept at the end physical page range that has already been 3538 * registered with add_active_range(). This function allows an arch to shrink 3539 * an existing registered range. 3540 */ 3541void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 3542 unsigned long new_end_pfn) 3543{ 3544 int i; 3545 3546 /* Find the old active region end and shrink */ 3547 for_each_active_range_index_in_nid(i, nid) 3548 if (early_node_map[i].end_pfn == old_end_pfn) { 3549 early_node_map[i].end_pfn = new_end_pfn; 3550 break; 3551 } 3552} 3553 3554/** 3555 * remove_all_active_ranges - Remove all currently registered regions 3556 * 3557 * During discovery, it may be found that a table like SRAT is invalid 3558 * and an alternative discovery method must be used. This function removes 3559 * all currently registered regions. 3560 */ 3561void __init remove_all_active_ranges(void) 3562{ 3563 memset(early_node_map, 0, sizeof(early_node_map)); 3564 nr_nodemap_entries = 0; 3565#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 3566 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); 3567 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); 3568#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 3569} 3570 3571/* Compare two active node_active_regions */ 3572static int __init cmp_node_active_region(const void *a, const void *b) 3573{ 3574 struct node_active_region *arange = (struct node_active_region *)a; 3575 struct node_active_region *brange = (struct node_active_region *)b; 3576 3577 /* Done this way to avoid overflows */ 3578 if (arange->start_pfn > brange->start_pfn) 3579 return 1; 3580 if (arange->start_pfn < brange->start_pfn) 3581 return -1; 3582 3583 return 0; 3584} 3585 3586/* sort the node_map by start_pfn */ 3587static void __init sort_node_map(void) 3588{ 3589 sort(early_node_map, (size_t)nr_nodemap_entries, 3590 sizeof(struct node_active_region), 3591 cmp_node_active_region, NULL); 3592} 3593 3594/* Find the lowest pfn for a node */ 3595unsigned long __init find_min_pfn_for_node(unsigned long nid) 3596{ 3597 int i; 3598 unsigned long min_pfn = ULONG_MAX; 3599 3600 /* Assuming a sorted map, the first range found has the starting pfn */ 3601 for_each_active_range_index_in_nid(i, nid) 3602 min_pfn = min(min_pfn, early_node_map[i].start_pfn); 3603 3604 if (min_pfn == ULONG_MAX) { 3605 printk(KERN_WARNING 3606 "Could not find start_pfn for node %lu\n", nid); 3607 return 0; 3608 } 3609 3610 return min_pfn; 3611} 3612 3613/** 3614 * find_min_pfn_with_active_regions - Find the minimum PFN registered 3615 * 3616 * It returns the minimum PFN based on information provided via 3617 * add_active_range(). 3618 */ 3619unsigned long __init find_min_pfn_with_active_regions(void) 3620{ 3621 return find_min_pfn_for_node(MAX_NUMNODES); 3622} 3623 3624/** 3625 * find_max_pfn_with_active_regions - Find the maximum PFN registered 3626 * 3627 * It returns the maximum PFN based on information provided via 3628 * add_active_range(). 3629 */ 3630unsigned long __init find_max_pfn_with_active_regions(void) 3631{ 3632 int i; 3633 unsigned long max_pfn = 0; 3634 3635 for (i = 0; i < nr_nodemap_entries; i++) 3636 max_pfn = max(max_pfn, early_node_map[i].end_pfn); 3637 3638 return max_pfn; 3639} 3640 3641/* 3642 * early_calculate_totalpages() 3643 * Sum pages in active regions for movable zone. 3644 * Populate N_HIGH_MEMORY for calculating usable_nodes. 3645 */ 3646static unsigned long __init early_calculate_totalpages(void) 3647{ 3648 int i; 3649 unsigned long totalpages = 0; 3650 3651 for (i = 0; i < nr_nodemap_entries; i++) { 3652 unsigned long pages = early_node_map[i].end_pfn - 3653 early_node_map[i].start_pfn; 3654 totalpages += pages; 3655 if (pages) 3656 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); 3657 } 3658 return totalpages; 3659} 3660 3661/* 3662 * Find the PFN the Movable zone begins in each node. Kernel memory 3663 * is spread evenly between nodes as long as the nodes have enough 3664 * memory. When they don't, some nodes will have more kernelcore than 3665 * others 3666 */ 3667void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) 3668{ 3669 int i, nid; 3670 unsigned long usable_startpfn; 3671 unsigned long kernelcore_node, kernelcore_remaining; 3672 unsigned long totalpages = early_calculate_totalpages(); 3673 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 3674 3675 /* 3676 * If movablecore was specified, calculate what size of 3677 * kernelcore that corresponds so that memory usable for 3678 * any allocation type is evenly spread. If both kernelcore 3679 * and movablecore are specified, then the value of kernelcore 3680 * will be used for required_kernelcore if it's greater than 3681 * what movablecore would have allowed. 3682 */ 3683 if (required_movablecore) { 3684 unsigned long corepages; 3685 3686 /* 3687 * Round-up so that ZONE_MOVABLE is at least as large as what 3688 * was requested by the user 3689 */ 3690 required_movablecore = 3691 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 3692 corepages = totalpages - required_movablecore; 3693 3694 required_kernelcore = max(required_kernelcore, corepages); 3695 } 3696 3697 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 3698 if (!required_kernelcore) 3699 return; 3700 3701 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 3702 find_usable_zone_for_movable(); 3703 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 3704 3705restart: 3706 /* Spread kernelcore memory as evenly as possible throughout nodes */ 3707 kernelcore_node = required_kernelcore / usable_nodes; 3708 for_each_node_state(nid, N_HIGH_MEMORY) { 3709 /* 3710 * Recalculate kernelcore_node if the division per node 3711 * now exceeds what is necessary to satisfy the requested 3712 * amount of memory for the kernel 3713 */ 3714 if (required_kernelcore < kernelcore_node) 3715 kernelcore_node = required_kernelcore / usable_nodes; 3716 3717 /* 3718 * As the map is walked, we track how much memory is usable 3719 * by the kernel using kernelcore_remaining. When it is 3720 * 0, the rest of the node is usable by ZONE_MOVABLE 3721 */ 3722 kernelcore_remaining = kernelcore_node; 3723 3724 /* Go through each range of PFNs within this node */ 3725 for_each_active_range_index_in_nid(i, nid) { 3726 unsigned long start_pfn, end_pfn; 3727 unsigned long size_pages; 3728 3729 start_pfn = max(early_node_map[i].start_pfn, 3730 zone_movable_pfn[nid]); 3731 end_pfn = early_node_map[i].end_pfn; 3732 if (start_pfn >= end_pfn) 3733 continue; 3734 3735 /* Account for what is only usable for kernelcore */ 3736 if (start_pfn < usable_startpfn) { 3737 unsigned long kernel_pages; 3738 kernel_pages = min(end_pfn, usable_startpfn) 3739 - start_pfn; 3740 3741 kernelcore_remaining -= min(kernel_pages, 3742 kernelcore_remaining); 3743 required_kernelcore -= min(kernel_pages, 3744 required_kernelcore); 3745 3746 /* Continue if range is now fully accounted */ 3747 if (end_pfn <= usable_startpfn) { 3748 3749 /* 3750 * Push zone_movable_pfn to the end so 3751 * that if we have to rebalance 3752 * kernelcore across nodes, we will 3753 * not double account here 3754 */ 3755 zone_movable_pfn[nid] = end_pfn; 3756 continue; 3757 } 3758 start_pfn = usable_startpfn; 3759 } 3760 3761 /* 3762 * The usable PFN range for ZONE_MOVABLE is from 3763 * start_pfn->end_pfn. Calculate size_pages as the 3764 * number of pages used as kernelcore 3765 */ 3766 size_pages = end_pfn - start_pfn; 3767 if (size_pages > kernelcore_remaining) 3768 size_pages = kernelcore_remaining; 3769 zone_movable_pfn[nid] = start_pfn + size_pages; 3770 3771 /* 3772 * Some kernelcore has been met, update counts and 3773 * break if the kernelcore for this node has been 3774 * satisified 3775 */ 3776 required_kernelcore -= min(required_kernelcore, 3777 size_pages); 3778 kernelcore_remaining -= size_pages; 3779 if (!kernelcore_remaining) 3780 break; 3781 } 3782 } 3783 3784 /* 3785 * If there is still required_kernelcore, we do another pass with one 3786 * less node in the count. This will push zone_movable_pfn[nid] further 3787 * along on the nodes that still have memory until kernelcore is 3788 * satisified 3789 */ 3790 usable_nodes--; 3791 if (usable_nodes && required_kernelcore > usable_nodes) 3792 goto restart; 3793 3794 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 3795 for (nid = 0; nid < MAX_NUMNODES; nid++) 3796 zone_movable_pfn[nid] = 3797 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 3798} 3799 3800/* Any regular memory on that node ? */ 3801static void check_for_regular_memory(pg_data_t *pgdat) 3802{ 3803#ifdef CONFIG_HIGHMEM 3804 enum zone_type zone_type; 3805 3806 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { 3807 struct zone *zone = &pgdat->node_zones[zone_type]; 3808 if (zone->present_pages) 3809 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); 3810 } 3811#endif 3812} 3813 3814/** 3815 * free_area_init_nodes - Initialise all pg_data_t and zone data 3816 * @max_zone_pfn: an array of max PFNs for each zone 3817 * 3818 * This will call free_area_init_node() for each active node in the system. 3819 * Using the page ranges provided by add_active_range(), the size of each 3820 * zone in each node and their holes is calculated. If the maximum PFN 3821 * between two adjacent zones match, it is assumed that the zone is empty. 3822 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 3823 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 3824 * starts where the previous one ended. For example, ZONE_DMA32 starts 3825 * at arch_max_dma_pfn. 3826 */ 3827void __init free_area_init_nodes(unsigned long *max_zone_pfn) 3828{ 3829 unsigned long nid; 3830 enum zone_type i; 3831 3832 /* Sort early_node_map as initialisation assumes it is sorted */ 3833 sort_node_map(); 3834 3835 /* Record where the zone boundaries are */ 3836 memset(arch_zone_lowest_possible_pfn, 0, 3837 sizeof(arch_zone_lowest_possible_pfn)); 3838 memset(arch_zone_highest_possible_pfn, 0, 3839 sizeof(arch_zone_highest_possible_pfn)); 3840 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 3841 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 3842 for (i = 1; i < MAX_NR_ZONES; i++) { 3843 if (i == ZONE_MOVABLE) 3844 continue; 3845 arch_zone_lowest_possible_pfn[i] = 3846 arch_zone_highest_possible_pfn[i-1]; 3847 arch_zone_highest_possible_pfn[i] = 3848 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 3849 } 3850 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 3851 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 3852 3853 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 3854 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 3855 find_zone_movable_pfns_for_nodes(zone_movable_pfn); 3856 3857 /* Print out the zone ranges */ 3858 printk("Zone PFN ranges:\n"); 3859 for (i = 0; i < MAX_NR_ZONES; i++) { 3860 if (i == ZONE_MOVABLE) 3861 continue; 3862 printk(" %-8s %8lu -> %8lu\n", 3863 zone_names[i], 3864 arch_zone_lowest_possible_pfn[i], 3865 arch_zone_highest_possible_pfn[i]); 3866 } 3867 3868 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 3869 printk("Movable zone start PFN for each node\n"); 3870 for (i = 0; i < MAX_NUMNODES; i++) { 3871 if (zone_movable_pfn[i]) 3872 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); 3873 } 3874 3875 /* Print out the early_node_map[] */ 3876 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 3877 for (i = 0; i < nr_nodemap_entries; i++) 3878 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 3879 early_node_map[i].start_pfn, 3880 early_node_map[i].end_pfn); 3881 3882 /* Initialise every node */ 3883 setup_nr_node_ids(); 3884 for_each_online_node(nid) { 3885 pg_data_t *pgdat = NODE_DATA(nid); 3886 free_area_init_node(nid, pgdat, NULL, 3887 find_min_pfn_for_node(nid), NULL); 3888 3889 /* Any memory on that node */ 3890 if (pgdat->node_present_pages) 3891 node_set_state(nid, N_HIGH_MEMORY); 3892 check_for_regular_memory(pgdat); 3893 } 3894} 3895 3896static int __init cmdline_parse_core(char *p, unsigned long *core) 3897{ 3898 unsigned long long coremem; 3899 if (!p) 3900 return -EINVAL; 3901 3902 coremem = memparse(p, &p); 3903 *core = coremem >> PAGE_SHIFT; 3904 3905 /* Paranoid check that UL is enough for the coremem value */ 3906 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 3907 3908 return 0; 3909} 3910 3911/* 3912 * kernelcore=size sets the amount of memory for use for allocations that 3913 * cannot be reclaimed or migrated. 3914 */ 3915static int __init cmdline_parse_kernelcore(char *p) 3916{ 3917 return cmdline_parse_core(p, &required_kernelcore); 3918} 3919 3920/* 3921 * movablecore=size sets the amount of memory for use for allocations that 3922 * can be reclaimed or migrated. 3923 */ 3924static int __init cmdline_parse_movablecore(char *p) 3925{ 3926 return cmdline_parse_core(p, &required_movablecore); 3927} 3928 3929early_param("kernelcore", cmdline_parse_kernelcore); 3930early_param("movablecore", cmdline_parse_movablecore); 3931 3932#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3933 3934/** 3935 * set_dma_reserve - set the specified number of pages reserved in the first zone 3936 * @new_dma_reserve: The number of pages to mark reserved 3937 * 3938 * The per-cpu batchsize and zone watermarks are determined by present_pages. 3939 * In the DMA zone, a significant percentage may be consumed by kernel image 3940 * and other unfreeable allocations which can skew the watermarks badly. This 3941 * function may optionally be used to account for unfreeable pages in the 3942 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 3943 * smaller per-cpu batchsize. 3944 */ 3945void __init set_dma_reserve(unsigned long new_dma_reserve) 3946{ 3947 dma_reserve = new_dma_reserve; 3948} 3949 3950#ifndef CONFIG_NEED_MULTIPLE_NODES 3951static bootmem_data_t contig_bootmem_data; 3952struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 3953 3954EXPORT_SYMBOL(contig_page_data); 3955#endif 3956 3957void __init free_area_init(unsigned long *zones_size) 3958{ 3959 free_area_init_node(0, NODE_DATA(0), zones_size, 3960 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 3961} 3962 3963static int page_alloc_cpu_notify(struct notifier_block *self, 3964 unsigned long action, void *hcpu) 3965{ 3966 int cpu = (unsigned long)hcpu; 3967 3968 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 3969 drain_pages(cpu); 3970 3971 /* 3972 * Spill the event counters of the dead processor 3973 * into the current processors event counters. 3974 * This artificially elevates the count of the current 3975 * processor. 3976 */ 3977 vm_events_fold_cpu(cpu); 3978 3979 /* 3980 * Zero the differential counters of the dead processor 3981 * so that the vm statistics are consistent. 3982 * 3983 * This is only okay since the processor is dead and cannot 3984 * race with what we are doing. 3985 */ 3986 refresh_cpu_vm_stats(cpu); 3987 } 3988 return NOTIFY_OK; 3989} 3990 3991void __init page_alloc_init(void) 3992{ 3993 hotcpu_notifier(page_alloc_cpu_notify, 0); 3994} 3995 3996/* 3997 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 3998 * or min_free_kbytes changes. 3999 */ 4000static void calculate_totalreserve_pages(void) 4001{ 4002 struct pglist_data *pgdat; 4003 unsigned long reserve_pages = 0; 4004 enum zone_type i, j; 4005 4006 for_each_online_pgdat(pgdat) { 4007 for (i = 0; i < MAX_NR_ZONES; i++) { 4008 struct zone *zone = pgdat->node_zones + i; 4009 unsigned long max = 0; 4010 4011 /* Find valid and maximum lowmem_reserve in the zone */ 4012 for (j = i; j < MAX_NR_ZONES; j++) { 4013 if (zone->lowmem_reserve[j] > max) 4014 max = zone->lowmem_reserve[j]; 4015 } 4016 4017 /* we treat pages_high as reserved pages. */ 4018 max += zone->pages_high; 4019 4020 if (max > zone->present_pages) 4021 max = zone->present_pages; 4022 reserve_pages += max; 4023 } 4024 } 4025 totalreserve_pages = reserve_pages; 4026} 4027 4028/* 4029 * setup_per_zone_lowmem_reserve - called whenever 4030 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 4031 * has a correct pages reserved value, so an adequate number of 4032 * pages are left in the zone after a successful __alloc_pages(). 4033 */ 4034static void setup_per_zone_lowmem_reserve(void) 4035{ 4036 struct pglist_data *pgdat; 4037 enum zone_type j, idx; 4038 4039 for_each_online_pgdat(pgdat) { 4040 for (j = 0; j < MAX_NR_ZONES; j++) { 4041 struct zone *zone = pgdat->node_zones + j; 4042 unsigned long present_pages = zone->present_pages; 4043 4044 zone->lowmem_reserve[j] = 0; 4045 4046 idx = j; 4047 while (idx) { 4048 struct zone *lower_zone; 4049 4050 idx--; 4051 4052 if (sysctl_lowmem_reserve_ratio[idx] < 1) 4053 sysctl_lowmem_reserve_ratio[idx] = 1; 4054 4055 lower_zone = pgdat->node_zones + idx; 4056 lower_zone->lowmem_reserve[j] = present_pages / 4057 sysctl_lowmem_reserve_ratio[idx]; 4058 present_pages += lower_zone->present_pages; 4059 } 4060 } 4061 } 4062 4063 /* update totalreserve_pages */ 4064 calculate_totalreserve_pages(); 4065} 4066 4067/** 4068 * setup_per_zone_pages_min - called when min_free_kbytes changes. 4069 * 4070 * Ensures that the pages_{min,low,high} values for each zone are set correctly 4071 * with respect to min_free_kbytes. 4072 */ 4073void setup_per_zone_pages_min(void) 4074{ 4075 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 4076 unsigned long lowmem_pages = 0; 4077 struct zone *zone; 4078 unsigned long flags; 4079 4080 /* Calculate total number of !ZONE_HIGHMEM pages */ 4081 for_each_zone(zone) { 4082 if (!is_highmem(zone)) 4083 lowmem_pages += zone->present_pages; 4084 } 4085 4086 for_each_zone(zone) { 4087 u64 tmp; 4088 4089 spin_lock_irqsave(&zone->lru_lock, flags); 4090 tmp = (u64)pages_min * zone->present_pages; 4091 do_div(tmp, lowmem_pages); 4092 if (is_highmem(zone)) { 4093 /* 4094 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 4095 * need highmem pages, so cap pages_min to a small 4096 * value here. 4097 * 4098 * The (pages_high-pages_low) and (pages_low-pages_min) 4099 * deltas controls asynch page reclaim, and so should 4100 * not be capped for highmem. 4101 */ 4102 int min_pages; 4103 4104 min_pages = zone->present_pages / 1024; 4105 if (min_pages < SWAP_CLUSTER_MAX) 4106 min_pages = SWAP_CLUSTER_MAX; 4107 if (min_pages > 128) 4108 min_pages = 128; 4109 zone->pages_min = min_pages; 4110 } else { 4111 /* 4112 * If it's a lowmem zone, reserve a number of pages 4113 * proportionate to the zone's size. 4114 */ 4115 zone->pages_min = tmp; 4116 } 4117 4118 zone->pages_low = zone->pages_min + (tmp >> 2); 4119 zone->pages_high = zone->pages_min + (tmp >> 1); 4120 setup_zone_migrate_reserve(zone); 4121 spin_unlock_irqrestore(&zone->lru_lock, flags); 4122 } 4123 4124 /* update totalreserve_pages */ 4125 calculate_totalreserve_pages(); 4126} 4127 4128/* 4129 * Initialise min_free_kbytes. 4130 * 4131 * For small machines we want it small (128k min). For large machines 4132 * we want it large (64MB max). But it is not linear, because network 4133 * bandwidth does not increase linearly with machine size. We use 4134 * 4135 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 4136 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 4137 * 4138 * which yields 4139 * 4140 * 16MB: 512k 4141 * 32MB: 724k 4142 * 64MB: 1024k 4143 * 128MB: 1448k 4144 * 256MB: 2048k 4145 * 512MB: 2896k 4146 * 1024MB: 4096k 4147 * 2048MB: 5792k 4148 * 4096MB: 8192k 4149 * 8192MB: 11584k 4150 * 16384MB: 16384k 4151 */ 4152static int __init init_per_zone_pages_min(void) 4153{ 4154 unsigned long lowmem_kbytes; 4155 4156 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 4157 4158 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 4159 if (min_free_kbytes < 128) 4160 min_free_kbytes = 128; 4161 if (min_free_kbytes > 65536) 4162 min_free_kbytes = 65536; 4163 setup_per_zone_pages_min(); 4164 setup_per_zone_lowmem_reserve(); 4165 return 0; 4166} 4167module_init(init_per_zone_pages_min) 4168 4169/* 4170 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 4171 * that we can call two helper functions whenever min_free_kbytes 4172 * changes. 4173 */ 4174int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 4175 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4176{ 4177 proc_dointvec(table, write, file, buffer, length, ppos); 4178 if (write) 4179 setup_per_zone_pages_min(); 4180 return 0; 4181} 4182 4183#ifdef CONFIG_NUMA 4184int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 4185 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4186{ 4187 struct zone *zone; 4188 int rc; 4189 4190 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4191 if (rc) 4192 return rc; 4193 4194 for_each_zone(zone) 4195 zone->min_unmapped_pages = (zone->present_pages * 4196 sysctl_min_unmapped_ratio) / 100; 4197 return 0; 4198} 4199 4200int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 4201 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4202{ 4203 struct zone *zone; 4204 int rc; 4205 4206 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4207 if (rc) 4208 return rc; 4209 4210 for_each_zone(zone) 4211 zone->min_slab_pages = (zone->present_pages * 4212 sysctl_min_slab_ratio) / 100; 4213 return 0; 4214} 4215#endif 4216 4217/* 4218 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 4219 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 4220 * whenever sysctl_lowmem_reserve_ratio changes. 4221 * 4222 * The reserve ratio obviously has absolutely no relation with the 4223 * pages_min watermarks. The lowmem reserve ratio can only make sense 4224 * if in function of the boot time zone sizes. 4225 */ 4226int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 4227 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4228{ 4229 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4230 setup_per_zone_lowmem_reserve(); 4231 return 0; 4232} 4233 4234/* 4235 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 4236 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 4237 * can have before it gets flushed back to buddy allocator. 4238 */ 4239 4240int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 4241 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4242{ 4243 struct zone *zone; 4244 unsigned int cpu; 4245 int ret; 4246 4247 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4248 if (!write || (ret == -EINVAL)) 4249 return ret; 4250 for_each_zone(zone) { 4251 for_each_online_cpu(cpu) { 4252 unsigned long high; 4253 high = zone->present_pages / percpu_pagelist_fraction; 4254 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 4255 } 4256 } 4257 return 0; 4258} 4259 4260int hashdist = HASHDIST_DEFAULT; 4261 4262#ifdef CONFIG_NUMA 4263static int __init set_hashdist(char *str) 4264{ 4265 if (!str) 4266 return 0; 4267 hashdist = simple_strtoul(str, &str, 0); 4268 return 1; 4269} 4270__setup("hashdist=", set_hashdist); 4271#endif 4272 4273/* 4274 * allocate a large system hash table from bootmem 4275 * - it is assumed that the hash table must contain an exact power-of-2 4276 * quantity of entries 4277 * - limit is the number of hash buckets, not the total allocation size 4278 */ 4279void *__init alloc_large_system_hash(const char *tablename, 4280 unsigned long bucketsize, 4281 unsigned long numentries, 4282 int scale, 4283 int flags, 4284 unsigned int *_hash_shift, 4285 unsigned int *_hash_mask, 4286 unsigned long limit) 4287{ 4288 unsigned long long max = limit; 4289 unsigned long log2qty, size; 4290 void *table = NULL; 4291 4292 /* allow the kernel cmdline to have a say */ 4293 if (!numentries) { 4294 /* round applicable memory size up to nearest megabyte */ 4295 numentries = nr_kernel_pages; 4296 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 4297 numentries >>= 20 - PAGE_SHIFT; 4298 numentries <<= 20 - PAGE_SHIFT; 4299 4300 /* limit to 1 bucket per 2^scale bytes of low memory */ 4301 if (scale > PAGE_SHIFT) 4302 numentries >>= (scale - PAGE_SHIFT); 4303 else 4304 numentries <<= (PAGE_SHIFT - scale); 4305 4306 /* Make sure we've got at least a 0-order allocation.. */ 4307 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 4308 numentries = PAGE_SIZE / bucketsize; 4309 } 4310 numentries = roundup_pow_of_two(numentries); 4311 4312 /* limit allocation size to 1/16 total memory by default */ 4313 if (max == 0) { 4314 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 4315 do_div(max, bucketsize); 4316 } 4317 4318 if (numentries > max) 4319 numentries = max; 4320 4321 log2qty = ilog2(numentries); 4322 4323 do { 4324 size = bucketsize << log2qty; 4325 if (flags & HASH_EARLY) 4326 table = alloc_bootmem(size); 4327 else if (hashdist) 4328 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 4329 else { 4330 unsigned long order; 4331 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 4332 ; 4333 table = (void*) __get_free_pages(GFP_ATOMIC, order); 4334 /* 4335 * If bucketsize is not a power-of-two, we may free 4336 * some pages at the end of hash table. 4337 */ 4338 if (table) { 4339 unsigned long alloc_end = (unsigned long)table + 4340 (PAGE_SIZE << order); 4341 unsigned long used = (unsigned long)table + 4342 PAGE_ALIGN(size); 4343 split_page(virt_to_page(table), order); 4344 while (used < alloc_end) { 4345 free_page(used); 4346 used += PAGE_SIZE; 4347 } 4348 } 4349 } 4350 } while (!table && size > PAGE_SIZE && --log2qty); 4351 4352 if (!table) 4353 panic("Failed to allocate %s hash table\n", tablename); 4354 4355 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", 4356 tablename, 4357 (1U << log2qty), 4358 ilog2(size) - PAGE_SHIFT, 4359 size); 4360 4361 if (_hash_shift) 4362 *_hash_shift = log2qty; 4363 if (_hash_mask) 4364 *_hash_mask = (1 << log2qty) - 1; 4365 4366 return table; 4367} 4368 4369#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 4370struct page *pfn_to_page(unsigned long pfn) 4371{ 4372 return __pfn_to_page(pfn); 4373} 4374unsigned long page_to_pfn(struct page *page) 4375{ 4376 return __page_to_pfn(page); 4377} 4378EXPORT_SYMBOL(pfn_to_page); 4379EXPORT_SYMBOL(page_to_pfn); 4380#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 4381 4382/* Return a pointer to the bitmap storing bits affecting a block of pages */ 4383static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 4384 unsigned long pfn) 4385{ 4386#ifdef CONFIG_SPARSEMEM 4387 return __pfn_to_section(pfn)->pageblock_flags; 4388#else 4389 return zone->pageblock_flags; 4390#endif /* CONFIG_SPARSEMEM */ 4391} 4392 4393static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 4394{ 4395#ifdef CONFIG_SPARSEMEM 4396 pfn &= (PAGES_PER_SECTION-1); 4397 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4398#else 4399 pfn = pfn - zone->zone_start_pfn; 4400 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4401#endif /* CONFIG_SPARSEMEM */ 4402} 4403 4404/** 4405 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 4406 * @page: The page within the block of interest 4407 * @start_bitidx: The first bit of interest to retrieve 4408 * @end_bitidx: The last bit of interest 4409 * returns pageblock_bits flags 4410 */ 4411unsigned long get_pageblock_flags_group(struct page *page, 4412 int start_bitidx, int end_bitidx) 4413{ 4414 struct zone *zone; 4415 unsigned long *bitmap; 4416 unsigned long pfn, bitidx; 4417 unsigned long flags = 0; 4418 unsigned long value = 1; 4419 4420 zone = page_zone(page); 4421 pfn = page_to_pfn(page); 4422 bitmap = get_pageblock_bitmap(zone, pfn); 4423 bitidx = pfn_to_bitidx(zone, pfn); 4424 4425 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4426 if (test_bit(bitidx + start_bitidx, bitmap)) 4427 flags |= value; 4428 4429 return flags; 4430} 4431 4432/** 4433 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages 4434 * @page: The page within the block of interest 4435 * @start_bitidx: The first bit of interest 4436 * @end_bitidx: The last bit of interest 4437 * @flags: The flags to set 4438 */ 4439void set_pageblock_flags_group(struct page *page, unsigned long flags, 4440 int start_bitidx, int end_bitidx) 4441{ 4442 struct zone *zone; 4443 unsigned long *bitmap; 4444 unsigned long pfn, bitidx; 4445 unsigned long value = 1; 4446 4447 zone = page_zone(page); 4448 pfn = page_to_pfn(page); 4449 bitmap = get_pageblock_bitmap(zone, pfn); 4450 bitidx = pfn_to_bitidx(zone, pfn); 4451 4452 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4453 if (flags & value) 4454 __set_bit(bitidx + start_bitidx, bitmap); 4455 else 4456 __clear_bit(bitidx + start_bitidx, bitmap); 4457} 4458 4459/* 4460 * This is designed as sub function...plz see page_isolation.c also. 4461 * set/clear page block's type to be ISOLATE. 4462 * page allocater never alloc memory from ISOLATE block. 4463 */ 4464 4465int set_migratetype_isolate(struct page *page) 4466{ 4467 struct zone *zone; 4468 unsigned long flags; 4469 int ret = -EBUSY; 4470 4471 zone = page_zone(page); 4472 spin_lock_irqsave(&zone->lock, flags); 4473 /* 4474 * In future, more migrate types will be able to be isolation target. 4475 */ 4476 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 4477 goto out; 4478 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 4479 move_freepages_block(zone, page, MIGRATE_ISOLATE); 4480 ret = 0; 4481out: 4482 spin_unlock_irqrestore(&zone->lock, flags); 4483 if (!ret) 4484 drain_all_pages(); 4485 return ret; 4486} 4487 4488void unset_migratetype_isolate(struct page *page) 4489{ 4490 struct zone *zone; 4491 unsigned long flags; 4492 zone = page_zone(page); 4493 spin_lock_irqsave(&zone->lock, flags); 4494 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 4495 goto out; 4496 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4497 move_freepages_block(zone, page, MIGRATE_MOVABLE); 4498out: 4499 spin_unlock_irqrestore(&zone->lock, flags); 4500} 4501 4502#ifdef CONFIG_MEMORY_HOTREMOVE 4503/* 4504 * All pages in the range must be isolated before calling this. 4505 */ 4506void 4507__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 4508{ 4509 struct page *page; 4510 struct zone *zone; 4511 int order, i; 4512 unsigned long pfn; 4513 unsigned long flags; 4514 /* find the first valid pfn */ 4515 for (pfn = start_pfn; pfn < end_pfn; pfn++) 4516 if (pfn_valid(pfn)) 4517 break; 4518 if (pfn == end_pfn) 4519 return; 4520 zone = page_zone(pfn_to_page(pfn)); 4521 spin_lock_irqsave(&zone->lock, flags); 4522 pfn = start_pfn; 4523 while (pfn < end_pfn) { 4524 if (!pfn_valid(pfn)) { 4525 pfn++; 4526 continue; 4527 } 4528 page = pfn_to_page(pfn); 4529 BUG_ON(page_count(page)); 4530 BUG_ON(!PageBuddy(page)); 4531 order = page_order(page); 4532#ifdef CONFIG_DEBUG_VM 4533 printk(KERN_INFO "remove from free list %lx %d %lx\n", 4534 pfn, 1 << order, end_pfn); 4535#endif 4536 list_del(&page->lru); 4537 rmv_page_order(page); 4538 zone->free_area[order].nr_free--; 4539 __mod_zone_page_state(zone, NR_FREE_PAGES, 4540 - (1UL << order)); 4541 for (i = 0; i < (1 << order); i++) 4542 SetPageReserved((page+i)); 4543 pfn += (1 << order); 4544 } 4545 spin_unlock_irqrestore(&zone->lock, flags); 4546} 4547#endif 4548