page_alloc.c revision a41f24ea9fd6169b147c53c2392e2887cc1d9247
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/jiffies.h> 23#include <linux/bootmem.h> 24#include <linux/compiler.h> 25#include <linux/kernel.h> 26#include <linux/module.h> 27#include <linux/suspend.h> 28#include <linux/pagevec.h> 29#include <linux/blkdev.h> 30#include <linux/slab.h> 31#include <linux/oom.h> 32#include <linux/notifier.h> 33#include <linux/topology.h> 34#include <linux/sysctl.h> 35#include <linux/cpu.h> 36#include <linux/cpuset.h> 37#include <linux/memory_hotplug.h> 38#include <linux/nodemask.h> 39#include <linux/vmalloc.h> 40#include <linux/mempolicy.h> 41#include <linux/stop_machine.h> 42#include <linux/sort.h> 43#include <linux/pfn.h> 44#include <linux/backing-dev.h> 45#include <linux/fault-inject.h> 46#include <linux/page-isolation.h> 47#include <linux/memcontrol.h> 48 49#include <asm/tlbflush.h> 50#include <asm/div64.h> 51#include "internal.h" 52 53/* 54 * Array of node states. 55 */ 56nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 57 [N_POSSIBLE] = NODE_MASK_ALL, 58 [N_ONLINE] = { { [0] = 1UL } }, 59#ifndef CONFIG_NUMA 60 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 61#ifdef CONFIG_HIGHMEM 62 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 63#endif 64 [N_CPU] = { { [0] = 1UL } }, 65#endif /* NUMA */ 66}; 67EXPORT_SYMBOL(node_states); 68 69unsigned long totalram_pages __read_mostly; 70unsigned long totalreserve_pages __read_mostly; 71long nr_swap_pages; 72int percpu_pagelist_fraction; 73 74#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 75int pageblock_order __read_mostly; 76#endif 77 78static void __free_pages_ok(struct page *page, unsigned int order); 79 80/* 81 * results with 256, 32 in the lowmem_reserve sysctl: 82 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 83 * 1G machine -> (16M dma, 784M normal, 224M high) 84 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 85 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 86 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 87 * 88 * TBD: should special case ZONE_DMA32 machines here - in those we normally 89 * don't need any ZONE_NORMAL reservation 90 */ 91int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 92#ifdef CONFIG_ZONE_DMA 93 256, 94#endif 95#ifdef CONFIG_ZONE_DMA32 96 256, 97#endif 98#ifdef CONFIG_HIGHMEM 99 32, 100#endif 101 32, 102}; 103 104EXPORT_SYMBOL(totalram_pages); 105 106static char * const zone_names[MAX_NR_ZONES] = { 107#ifdef CONFIG_ZONE_DMA 108 "DMA", 109#endif 110#ifdef CONFIG_ZONE_DMA32 111 "DMA32", 112#endif 113 "Normal", 114#ifdef CONFIG_HIGHMEM 115 "HighMem", 116#endif 117 "Movable", 118}; 119 120int min_free_kbytes = 1024; 121 122unsigned long __meminitdata nr_kernel_pages; 123unsigned long __meminitdata nr_all_pages; 124static unsigned long __meminitdata dma_reserve; 125 126#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 127 /* 128 * MAX_ACTIVE_REGIONS determines the maximum number of distinct 129 * ranges of memory (RAM) that may be registered with add_active_range(). 130 * Ranges passed to add_active_range() will be merged if possible 131 * so the number of times add_active_range() can be called is 132 * related to the number of nodes and the number of holes 133 */ 134 #ifdef CONFIG_MAX_ACTIVE_REGIONS 135 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 136 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 137 #else 138 #if MAX_NUMNODES >= 32 139 /* If there can be many nodes, allow up to 50 holes per node */ 140 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 141 #else 142 /* By default, allow up to 256 distinct regions */ 143 #define MAX_ACTIVE_REGIONS 256 144 #endif 145 #endif 146 147 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; 148 static int __meminitdata nr_nodemap_entries; 149 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 150 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 151#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 152 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; 153 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; 154#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 155 unsigned long __initdata required_kernelcore; 156 static unsigned long __initdata required_movablecore; 157 unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 158 159 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 160 int movable_zone; 161 EXPORT_SYMBOL(movable_zone); 162#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 163 164#if MAX_NUMNODES > 1 165int nr_node_ids __read_mostly = MAX_NUMNODES; 166EXPORT_SYMBOL(nr_node_ids); 167#endif 168 169int page_group_by_mobility_disabled __read_mostly; 170 171static void set_pageblock_migratetype(struct page *page, int migratetype) 172{ 173 set_pageblock_flags_group(page, (unsigned long)migratetype, 174 PB_migrate, PB_migrate_end); 175} 176 177#ifdef CONFIG_DEBUG_VM 178static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 179{ 180 int ret = 0; 181 unsigned seq; 182 unsigned long pfn = page_to_pfn(page); 183 184 do { 185 seq = zone_span_seqbegin(zone); 186 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 187 ret = 1; 188 else if (pfn < zone->zone_start_pfn) 189 ret = 1; 190 } while (zone_span_seqretry(zone, seq)); 191 192 return ret; 193} 194 195static int page_is_consistent(struct zone *zone, struct page *page) 196{ 197 if (!pfn_valid_within(page_to_pfn(page))) 198 return 0; 199 if (zone != page_zone(page)) 200 return 0; 201 202 return 1; 203} 204/* 205 * Temporary debugging check for pages not lying within a given zone. 206 */ 207static int bad_range(struct zone *zone, struct page *page) 208{ 209 if (page_outside_zone_boundaries(zone, page)) 210 return 1; 211 if (!page_is_consistent(zone, page)) 212 return 1; 213 214 return 0; 215} 216#else 217static inline int bad_range(struct zone *zone, struct page *page) 218{ 219 return 0; 220} 221#endif 222 223static void bad_page(struct page *page) 224{ 225 void *pc = page_get_page_cgroup(page); 226 227 printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG 228 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", 229 current->comm, page, (int)(2*sizeof(unsigned long)), 230 (unsigned long)page->flags, page->mapping, 231 page_mapcount(page), page_count(page)); 232 if (pc) { 233 printk(KERN_EMERG "cgroup:%p\n", pc); 234 page_reset_bad_cgroup(page); 235 } 236 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 237 KERN_EMERG "Backtrace:\n"); 238 dump_stack(); 239 page->flags &= ~(1 << PG_lru | 240 1 << PG_private | 241 1 << PG_locked | 242 1 << PG_active | 243 1 << PG_dirty | 244 1 << PG_reclaim | 245 1 << PG_slab | 246 1 << PG_swapcache | 247 1 << PG_writeback | 248 1 << PG_buddy ); 249 set_page_count(page, 0); 250 reset_page_mapcount(page); 251 page->mapping = NULL; 252 add_taint(TAINT_BAD_PAGE); 253} 254 255/* 256 * Higher-order pages are called "compound pages". They are structured thusly: 257 * 258 * The first PAGE_SIZE page is called the "head page". 259 * 260 * The remaining PAGE_SIZE pages are called "tail pages". 261 * 262 * All pages have PG_compound set. All pages have their ->private pointing at 263 * the head page (even the head page has this). 264 * 265 * The first tail page's ->lru.next holds the address of the compound page's 266 * put_page() function. Its ->lru.prev holds the order of allocation. 267 * This usage means that zero-order pages may not be compound. 268 */ 269 270static void free_compound_page(struct page *page) 271{ 272 __free_pages_ok(page, compound_order(page)); 273} 274 275static void prep_compound_page(struct page *page, unsigned long order) 276{ 277 int i; 278 int nr_pages = 1 << order; 279 280 set_compound_page_dtor(page, free_compound_page); 281 set_compound_order(page, order); 282 __SetPageHead(page); 283 for (i = 1; i < nr_pages; i++) { 284 struct page *p = page + i; 285 286 __SetPageTail(p); 287 p->first_page = page; 288 } 289} 290 291static void destroy_compound_page(struct page *page, unsigned long order) 292{ 293 int i; 294 int nr_pages = 1 << order; 295 296 if (unlikely(compound_order(page) != order)) 297 bad_page(page); 298 299 if (unlikely(!PageHead(page))) 300 bad_page(page); 301 __ClearPageHead(page); 302 for (i = 1; i < nr_pages; i++) { 303 struct page *p = page + i; 304 305 if (unlikely(!PageTail(p) | 306 (p->first_page != page))) 307 bad_page(page); 308 __ClearPageTail(p); 309 } 310} 311 312static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 313{ 314 int i; 315 316 /* 317 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 318 * and __GFP_HIGHMEM from hard or soft interrupt context. 319 */ 320 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 321 for (i = 0; i < (1 << order); i++) 322 clear_highpage(page + i); 323} 324 325static inline void set_page_order(struct page *page, int order) 326{ 327 set_page_private(page, order); 328 __SetPageBuddy(page); 329} 330 331static inline void rmv_page_order(struct page *page) 332{ 333 __ClearPageBuddy(page); 334 set_page_private(page, 0); 335} 336 337/* 338 * Locate the struct page for both the matching buddy in our 339 * pair (buddy1) and the combined O(n+1) page they form (page). 340 * 341 * 1) Any buddy B1 will have an order O twin B2 which satisfies 342 * the following equation: 343 * B2 = B1 ^ (1 << O) 344 * For example, if the starting buddy (buddy2) is #8 its order 345 * 1 buddy is #10: 346 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 347 * 348 * 2) Any buddy B will have an order O+1 parent P which 349 * satisfies the following equation: 350 * P = B & ~(1 << O) 351 * 352 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 353 */ 354static inline struct page * 355__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 356{ 357 unsigned long buddy_idx = page_idx ^ (1 << order); 358 359 return page + (buddy_idx - page_idx); 360} 361 362static inline unsigned long 363__find_combined_index(unsigned long page_idx, unsigned int order) 364{ 365 return (page_idx & ~(1 << order)); 366} 367 368/* 369 * This function checks whether a page is free && is the buddy 370 * we can do coalesce a page and its buddy if 371 * (a) the buddy is not in a hole && 372 * (b) the buddy is in the buddy system && 373 * (c) a page and its buddy have the same order && 374 * (d) a page and its buddy are in the same zone. 375 * 376 * For recording whether a page is in the buddy system, we use PG_buddy. 377 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 378 * 379 * For recording page's order, we use page_private(page). 380 */ 381static inline int page_is_buddy(struct page *page, struct page *buddy, 382 int order) 383{ 384 if (!pfn_valid_within(page_to_pfn(buddy))) 385 return 0; 386 387 if (page_zone_id(page) != page_zone_id(buddy)) 388 return 0; 389 390 if (PageBuddy(buddy) && page_order(buddy) == order) { 391 BUG_ON(page_count(buddy) != 0); 392 return 1; 393 } 394 return 0; 395} 396 397/* 398 * Freeing function for a buddy system allocator. 399 * 400 * The concept of a buddy system is to maintain direct-mapped table 401 * (containing bit values) for memory blocks of various "orders". 402 * The bottom level table contains the map for the smallest allocatable 403 * units of memory (here, pages), and each level above it describes 404 * pairs of units from the levels below, hence, "buddies". 405 * At a high level, all that happens here is marking the table entry 406 * at the bottom level available, and propagating the changes upward 407 * as necessary, plus some accounting needed to play nicely with other 408 * parts of the VM system. 409 * At each level, we keep a list of pages, which are heads of continuous 410 * free pages of length of (1 << order) and marked with PG_buddy. Page's 411 * order is recorded in page_private(page) field. 412 * So when we are allocating or freeing one, we can derive the state of the 413 * other. That is, if we allocate a small block, and both were 414 * free, the remainder of the region must be split into blocks. 415 * If a block is freed, and its buddy is also free, then this 416 * triggers coalescing into a block of larger size. 417 * 418 * -- wli 419 */ 420 421static inline void __free_one_page(struct page *page, 422 struct zone *zone, unsigned int order) 423{ 424 unsigned long page_idx; 425 int order_size = 1 << order; 426 int migratetype = get_pageblock_migratetype(page); 427 428 if (unlikely(PageCompound(page))) 429 destroy_compound_page(page, order); 430 431 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 432 433 VM_BUG_ON(page_idx & (order_size - 1)); 434 VM_BUG_ON(bad_range(zone, page)); 435 436 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); 437 while (order < MAX_ORDER-1) { 438 unsigned long combined_idx; 439 struct page *buddy; 440 441 buddy = __page_find_buddy(page, page_idx, order); 442 if (!page_is_buddy(page, buddy, order)) 443 break; /* Move the buddy up one level. */ 444 445 list_del(&buddy->lru); 446 zone->free_area[order].nr_free--; 447 rmv_page_order(buddy); 448 combined_idx = __find_combined_index(page_idx, order); 449 page = page + (combined_idx - page_idx); 450 page_idx = combined_idx; 451 order++; 452 } 453 set_page_order(page, order); 454 list_add(&page->lru, 455 &zone->free_area[order].free_list[migratetype]); 456 zone->free_area[order].nr_free++; 457} 458 459static inline int free_pages_check(struct page *page) 460{ 461 if (unlikely(page_mapcount(page) | 462 (page->mapping != NULL) | 463 (page_get_page_cgroup(page) != NULL) | 464 (page_count(page) != 0) | 465 (page->flags & ( 466 1 << PG_lru | 467 1 << PG_private | 468 1 << PG_locked | 469 1 << PG_active | 470 1 << PG_slab | 471 1 << PG_swapcache | 472 1 << PG_writeback | 473 1 << PG_reserved | 474 1 << PG_buddy )))) 475 bad_page(page); 476 if (PageDirty(page)) 477 __ClearPageDirty(page); 478 /* 479 * For now, we report if PG_reserved was found set, but do not 480 * clear it, and do not free the page. But we shall soon need 481 * to do more, for when the ZERO_PAGE count wraps negative. 482 */ 483 return PageReserved(page); 484} 485 486/* 487 * Frees a list of pages. 488 * Assumes all pages on list are in same zone, and of same order. 489 * count is the number of pages to free. 490 * 491 * If the zone was previously in an "all pages pinned" state then look to 492 * see if this freeing clears that state. 493 * 494 * And clear the zone's pages_scanned counter, to hold off the "all pages are 495 * pinned" detection logic. 496 */ 497static void free_pages_bulk(struct zone *zone, int count, 498 struct list_head *list, int order) 499{ 500 spin_lock(&zone->lock); 501 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 502 zone->pages_scanned = 0; 503 while (count--) { 504 struct page *page; 505 506 VM_BUG_ON(list_empty(list)); 507 page = list_entry(list->prev, struct page, lru); 508 /* have to delete it as __free_one_page list manipulates */ 509 list_del(&page->lru); 510 __free_one_page(page, zone, order); 511 } 512 spin_unlock(&zone->lock); 513} 514 515static void free_one_page(struct zone *zone, struct page *page, int order) 516{ 517 spin_lock(&zone->lock); 518 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 519 zone->pages_scanned = 0; 520 __free_one_page(page, zone, order); 521 spin_unlock(&zone->lock); 522} 523 524static void __free_pages_ok(struct page *page, unsigned int order) 525{ 526 unsigned long flags; 527 int i; 528 int reserved = 0; 529 530 for (i = 0 ; i < (1 << order) ; ++i) 531 reserved += free_pages_check(page + i); 532 if (reserved) 533 return; 534 535 if (!PageHighMem(page)) 536 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 537 arch_free_page(page, order); 538 kernel_map_pages(page, 1 << order, 0); 539 540 local_irq_save(flags); 541 __count_vm_events(PGFREE, 1 << order); 542 free_one_page(page_zone(page), page, order); 543 local_irq_restore(flags); 544} 545 546/* 547 * permit the bootmem allocator to evade page validation on high-order frees 548 */ 549void __free_pages_bootmem(struct page *page, unsigned int order) 550{ 551 if (order == 0) { 552 __ClearPageReserved(page); 553 set_page_count(page, 0); 554 set_page_refcounted(page); 555 __free_page(page); 556 } else { 557 int loop; 558 559 prefetchw(page); 560 for (loop = 0; loop < BITS_PER_LONG; loop++) { 561 struct page *p = &page[loop]; 562 563 if (loop + 1 < BITS_PER_LONG) 564 prefetchw(p + 1); 565 __ClearPageReserved(p); 566 set_page_count(p, 0); 567 } 568 569 set_page_refcounted(page); 570 __free_pages(page, order); 571 } 572} 573 574 575/* 576 * The order of subdivision here is critical for the IO subsystem. 577 * Please do not alter this order without good reasons and regression 578 * testing. Specifically, as large blocks of memory are subdivided, 579 * the order in which smaller blocks are delivered depends on the order 580 * they're subdivided in this function. This is the primary factor 581 * influencing the order in which pages are delivered to the IO 582 * subsystem according to empirical testing, and this is also justified 583 * by considering the behavior of a buddy system containing a single 584 * large block of memory acted on by a series of small allocations. 585 * This behavior is a critical factor in sglist merging's success. 586 * 587 * -- wli 588 */ 589static inline void expand(struct zone *zone, struct page *page, 590 int low, int high, struct free_area *area, 591 int migratetype) 592{ 593 unsigned long size = 1 << high; 594 595 while (high > low) { 596 area--; 597 high--; 598 size >>= 1; 599 VM_BUG_ON(bad_range(zone, &page[size])); 600 list_add(&page[size].lru, &area->free_list[migratetype]); 601 area->nr_free++; 602 set_page_order(&page[size], high); 603 } 604} 605 606/* 607 * This page is about to be returned from the page allocator 608 */ 609static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 610{ 611 if (unlikely(page_mapcount(page) | 612 (page->mapping != NULL) | 613 (page_get_page_cgroup(page) != NULL) | 614 (page_count(page) != 0) | 615 (page->flags & ( 616 1 << PG_lru | 617 1 << PG_private | 618 1 << PG_locked | 619 1 << PG_active | 620 1 << PG_dirty | 621 1 << PG_slab | 622 1 << PG_swapcache | 623 1 << PG_writeback | 624 1 << PG_reserved | 625 1 << PG_buddy )))) 626 bad_page(page); 627 628 /* 629 * For now, we report if PG_reserved was found set, but do not 630 * clear it, and do not allocate the page: as a safety net. 631 */ 632 if (PageReserved(page)) 633 return 1; 634 635 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim | 636 1 << PG_referenced | 1 << PG_arch_1 | 637 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk); 638 set_page_private(page, 0); 639 set_page_refcounted(page); 640 641 arch_alloc_page(page, order); 642 kernel_map_pages(page, 1 << order, 1); 643 644 if (gfp_flags & __GFP_ZERO) 645 prep_zero_page(page, order, gfp_flags); 646 647 if (order && (gfp_flags & __GFP_COMP)) 648 prep_compound_page(page, order); 649 650 return 0; 651} 652 653/* 654 * Go through the free lists for the given migratetype and remove 655 * the smallest available page from the freelists 656 */ 657static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 658 int migratetype) 659{ 660 unsigned int current_order; 661 struct free_area * area; 662 struct page *page; 663 664 /* Find a page of the appropriate size in the preferred list */ 665 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 666 area = &(zone->free_area[current_order]); 667 if (list_empty(&area->free_list[migratetype])) 668 continue; 669 670 page = list_entry(area->free_list[migratetype].next, 671 struct page, lru); 672 list_del(&page->lru); 673 rmv_page_order(page); 674 area->nr_free--; 675 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); 676 expand(zone, page, order, current_order, area, migratetype); 677 return page; 678 } 679 680 return NULL; 681} 682 683 684/* 685 * This array describes the order lists are fallen back to when 686 * the free lists for the desirable migrate type are depleted 687 */ 688static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 689 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 690 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 691 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 692 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ 693}; 694 695/* 696 * Move the free pages in a range to the free lists of the requested type. 697 * Note that start_page and end_pages are not aligned on a pageblock 698 * boundary. If alignment is required, use move_freepages_block() 699 */ 700int move_freepages(struct zone *zone, 701 struct page *start_page, struct page *end_page, 702 int migratetype) 703{ 704 struct page *page; 705 unsigned long order; 706 int pages_moved = 0; 707 708#ifndef CONFIG_HOLES_IN_ZONE 709 /* 710 * page_zone is not safe to call in this context when 711 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 712 * anyway as we check zone boundaries in move_freepages_block(). 713 * Remove at a later date when no bug reports exist related to 714 * grouping pages by mobility 715 */ 716 BUG_ON(page_zone(start_page) != page_zone(end_page)); 717#endif 718 719 for (page = start_page; page <= end_page;) { 720 if (!pfn_valid_within(page_to_pfn(page))) { 721 page++; 722 continue; 723 } 724 725 if (!PageBuddy(page)) { 726 page++; 727 continue; 728 } 729 730 order = page_order(page); 731 list_del(&page->lru); 732 list_add(&page->lru, 733 &zone->free_area[order].free_list[migratetype]); 734 page += 1 << order; 735 pages_moved += 1 << order; 736 } 737 738 return pages_moved; 739} 740 741int move_freepages_block(struct zone *zone, struct page *page, int migratetype) 742{ 743 unsigned long start_pfn, end_pfn; 744 struct page *start_page, *end_page; 745 746 start_pfn = page_to_pfn(page); 747 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 748 start_page = pfn_to_page(start_pfn); 749 end_page = start_page + pageblock_nr_pages - 1; 750 end_pfn = start_pfn + pageblock_nr_pages - 1; 751 752 /* Do not cross zone boundaries */ 753 if (start_pfn < zone->zone_start_pfn) 754 start_page = page; 755 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) 756 return 0; 757 758 return move_freepages(zone, start_page, end_page, migratetype); 759} 760 761/* Remove an element from the buddy allocator from the fallback list */ 762static struct page *__rmqueue_fallback(struct zone *zone, int order, 763 int start_migratetype) 764{ 765 struct free_area * area; 766 int current_order; 767 struct page *page; 768 int migratetype, i; 769 770 /* Find the largest possible block of pages in the other list */ 771 for (current_order = MAX_ORDER-1; current_order >= order; 772 --current_order) { 773 for (i = 0; i < MIGRATE_TYPES - 1; i++) { 774 migratetype = fallbacks[start_migratetype][i]; 775 776 /* MIGRATE_RESERVE handled later if necessary */ 777 if (migratetype == MIGRATE_RESERVE) 778 continue; 779 780 area = &(zone->free_area[current_order]); 781 if (list_empty(&area->free_list[migratetype])) 782 continue; 783 784 page = list_entry(area->free_list[migratetype].next, 785 struct page, lru); 786 area->nr_free--; 787 788 /* 789 * If breaking a large block of pages, move all free 790 * pages to the preferred allocation list. If falling 791 * back for a reclaimable kernel allocation, be more 792 * agressive about taking ownership of free pages 793 */ 794 if (unlikely(current_order >= (pageblock_order >> 1)) || 795 start_migratetype == MIGRATE_RECLAIMABLE) { 796 unsigned long pages; 797 pages = move_freepages_block(zone, page, 798 start_migratetype); 799 800 /* Claim the whole block if over half of it is free */ 801 if (pages >= (1 << (pageblock_order-1))) 802 set_pageblock_migratetype(page, 803 start_migratetype); 804 805 migratetype = start_migratetype; 806 } 807 808 /* Remove the page from the freelists */ 809 list_del(&page->lru); 810 rmv_page_order(page); 811 __mod_zone_page_state(zone, NR_FREE_PAGES, 812 -(1UL << order)); 813 814 if (current_order == pageblock_order) 815 set_pageblock_migratetype(page, 816 start_migratetype); 817 818 expand(zone, page, order, current_order, area, migratetype); 819 return page; 820 } 821 } 822 823 /* Use MIGRATE_RESERVE rather than fail an allocation */ 824 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); 825} 826 827/* 828 * Do the hard work of removing an element from the buddy allocator. 829 * Call me with the zone->lock already held. 830 */ 831static struct page *__rmqueue(struct zone *zone, unsigned int order, 832 int migratetype) 833{ 834 struct page *page; 835 836 page = __rmqueue_smallest(zone, order, migratetype); 837 838 if (unlikely(!page)) 839 page = __rmqueue_fallback(zone, order, migratetype); 840 841 return page; 842} 843 844/* 845 * Obtain a specified number of elements from the buddy allocator, all under 846 * a single hold of the lock, for efficiency. Add them to the supplied list. 847 * Returns the number of new pages which were placed at *list. 848 */ 849static int rmqueue_bulk(struct zone *zone, unsigned int order, 850 unsigned long count, struct list_head *list, 851 int migratetype) 852{ 853 int i; 854 855 spin_lock(&zone->lock); 856 for (i = 0; i < count; ++i) { 857 struct page *page = __rmqueue(zone, order, migratetype); 858 if (unlikely(page == NULL)) 859 break; 860 861 /* 862 * Split buddy pages returned by expand() are received here 863 * in physical page order. The page is added to the callers and 864 * list and the list head then moves forward. From the callers 865 * perspective, the linked list is ordered by page number in 866 * some conditions. This is useful for IO devices that can 867 * merge IO requests if the physical pages are ordered 868 * properly. 869 */ 870 list_add(&page->lru, list); 871 set_page_private(page, migratetype); 872 list = &page->lru; 873 } 874 spin_unlock(&zone->lock); 875 return i; 876} 877 878#ifdef CONFIG_NUMA 879/* 880 * Called from the vmstat counter updater to drain pagesets of this 881 * currently executing processor on remote nodes after they have 882 * expired. 883 * 884 * Note that this function must be called with the thread pinned to 885 * a single processor. 886 */ 887void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 888{ 889 unsigned long flags; 890 int to_drain; 891 892 local_irq_save(flags); 893 if (pcp->count >= pcp->batch) 894 to_drain = pcp->batch; 895 else 896 to_drain = pcp->count; 897 free_pages_bulk(zone, to_drain, &pcp->list, 0); 898 pcp->count -= to_drain; 899 local_irq_restore(flags); 900} 901#endif 902 903/* 904 * Drain pages of the indicated processor. 905 * 906 * The processor must either be the current processor and the 907 * thread pinned to the current processor or a processor that 908 * is not online. 909 */ 910static void drain_pages(unsigned int cpu) 911{ 912 unsigned long flags; 913 struct zone *zone; 914 915 for_each_zone(zone) { 916 struct per_cpu_pageset *pset; 917 struct per_cpu_pages *pcp; 918 919 if (!populated_zone(zone)) 920 continue; 921 922 pset = zone_pcp(zone, cpu); 923 924 pcp = &pset->pcp; 925 local_irq_save(flags); 926 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 927 pcp->count = 0; 928 local_irq_restore(flags); 929 } 930} 931 932/* 933 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 934 */ 935void drain_local_pages(void *arg) 936{ 937 drain_pages(smp_processor_id()); 938} 939 940/* 941 * Spill all the per-cpu pages from all CPUs back into the buddy allocator 942 */ 943void drain_all_pages(void) 944{ 945 on_each_cpu(drain_local_pages, NULL, 0, 1); 946} 947 948#ifdef CONFIG_HIBERNATION 949 950void mark_free_pages(struct zone *zone) 951{ 952 unsigned long pfn, max_zone_pfn; 953 unsigned long flags; 954 int order, t; 955 struct list_head *curr; 956 957 if (!zone->spanned_pages) 958 return; 959 960 spin_lock_irqsave(&zone->lock, flags); 961 962 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 963 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 964 if (pfn_valid(pfn)) { 965 struct page *page = pfn_to_page(pfn); 966 967 if (!swsusp_page_is_forbidden(page)) 968 swsusp_unset_page_free(page); 969 } 970 971 for_each_migratetype_order(order, t) { 972 list_for_each(curr, &zone->free_area[order].free_list[t]) { 973 unsigned long i; 974 975 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 976 for (i = 0; i < (1UL << order); i++) 977 swsusp_set_page_free(pfn_to_page(pfn + i)); 978 } 979 } 980 spin_unlock_irqrestore(&zone->lock, flags); 981} 982#endif /* CONFIG_PM */ 983 984/* 985 * Free a 0-order page 986 */ 987static void free_hot_cold_page(struct page *page, int cold) 988{ 989 struct zone *zone = page_zone(page); 990 struct per_cpu_pages *pcp; 991 unsigned long flags; 992 993 if (PageAnon(page)) 994 page->mapping = NULL; 995 if (free_pages_check(page)) 996 return; 997 998 if (!PageHighMem(page)) 999 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 1000 arch_free_page(page, 0); 1001 kernel_map_pages(page, 1, 0); 1002 1003 pcp = &zone_pcp(zone, get_cpu())->pcp; 1004 local_irq_save(flags); 1005 __count_vm_event(PGFREE); 1006 if (cold) 1007 list_add_tail(&page->lru, &pcp->list); 1008 else 1009 list_add(&page->lru, &pcp->list); 1010 set_page_private(page, get_pageblock_migratetype(page)); 1011 pcp->count++; 1012 if (pcp->count >= pcp->high) { 1013 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 1014 pcp->count -= pcp->batch; 1015 } 1016 local_irq_restore(flags); 1017 put_cpu(); 1018} 1019 1020void free_hot_page(struct page *page) 1021{ 1022 free_hot_cold_page(page, 0); 1023} 1024 1025void free_cold_page(struct page *page) 1026{ 1027 free_hot_cold_page(page, 1); 1028} 1029 1030/* 1031 * split_page takes a non-compound higher-order page, and splits it into 1032 * n (1<<order) sub-pages: page[0..n] 1033 * Each sub-page must be freed individually. 1034 * 1035 * Note: this is probably too low level an operation for use in drivers. 1036 * Please consult with lkml before using this in your driver. 1037 */ 1038void split_page(struct page *page, unsigned int order) 1039{ 1040 int i; 1041 1042 VM_BUG_ON(PageCompound(page)); 1043 VM_BUG_ON(!page_count(page)); 1044 for (i = 1; i < (1 << order); i++) 1045 set_page_refcounted(page + i); 1046} 1047 1048/* 1049 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1050 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1051 * or two. 1052 */ 1053static struct page *buffered_rmqueue(struct zone *preferred_zone, 1054 struct zone *zone, int order, gfp_t gfp_flags) 1055{ 1056 unsigned long flags; 1057 struct page *page; 1058 int cold = !!(gfp_flags & __GFP_COLD); 1059 int cpu; 1060 int migratetype = allocflags_to_migratetype(gfp_flags); 1061 1062again: 1063 cpu = get_cpu(); 1064 if (likely(order == 0)) { 1065 struct per_cpu_pages *pcp; 1066 1067 pcp = &zone_pcp(zone, cpu)->pcp; 1068 local_irq_save(flags); 1069 if (!pcp->count) { 1070 pcp->count = rmqueue_bulk(zone, 0, 1071 pcp->batch, &pcp->list, migratetype); 1072 if (unlikely(!pcp->count)) 1073 goto failed; 1074 } 1075 1076 /* Find a page of the appropriate migrate type */ 1077 if (cold) { 1078 list_for_each_entry_reverse(page, &pcp->list, lru) 1079 if (page_private(page) == migratetype) 1080 break; 1081 } else { 1082 list_for_each_entry(page, &pcp->list, lru) 1083 if (page_private(page) == migratetype) 1084 break; 1085 } 1086 1087 /* Allocate more to the pcp list if necessary */ 1088 if (unlikely(&page->lru == &pcp->list)) { 1089 pcp->count += rmqueue_bulk(zone, 0, 1090 pcp->batch, &pcp->list, migratetype); 1091 page = list_entry(pcp->list.next, struct page, lru); 1092 } 1093 1094 list_del(&page->lru); 1095 pcp->count--; 1096 } else { 1097 spin_lock_irqsave(&zone->lock, flags); 1098 page = __rmqueue(zone, order, migratetype); 1099 spin_unlock(&zone->lock); 1100 if (!page) 1101 goto failed; 1102 } 1103 1104 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1105 zone_statistics(preferred_zone, zone); 1106 local_irq_restore(flags); 1107 put_cpu(); 1108 1109 VM_BUG_ON(bad_range(zone, page)); 1110 if (prep_new_page(page, order, gfp_flags)) 1111 goto again; 1112 return page; 1113 1114failed: 1115 local_irq_restore(flags); 1116 put_cpu(); 1117 return NULL; 1118} 1119 1120#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 1121#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 1122#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 1123#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 1124#define ALLOC_HARDER 0x10 /* try to alloc harder */ 1125#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 1126#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1127 1128#ifdef CONFIG_FAIL_PAGE_ALLOC 1129 1130static struct fail_page_alloc_attr { 1131 struct fault_attr attr; 1132 1133 u32 ignore_gfp_highmem; 1134 u32 ignore_gfp_wait; 1135 u32 min_order; 1136 1137#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1138 1139 struct dentry *ignore_gfp_highmem_file; 1140 struct dentry *ignore_gfp_wait_file; 1141 struct dentry *min_order_file; 1142 1143#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1144 1145} fail_page_alloc = { 1146 .attr = FAULT_ATTR_INITIALIZER, 1147 .ignore_gfp_wait = 1, 1148 .ignore_gfp_highmem = 1, 1149 .min_order = 1, 1150}; 1151 1152static int __init setup_fail_page_alloc(char *str) 1153{ 1154 return setup_fault_attr(&fail_page_alloc.attr, str); 1155} 1156__setup("fail_page_alloc=", setup_fail_page_alloc); 1157 1158static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1159{ 1160 if (order < fail_page_alloc.min_order) 1161 return 0; 1162 if (gfp_mask & __GFP_NOFAIL) 1163 return 0; 1164 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1165 return 0; 1166 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1167 return 0; 1168 1169 return should_fail(&fail_page_alloc.attr, 1 << order); 1170} 1171 1172#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1173 1174static int __init fail_page_alloc_debugfs(void) 1175{ 1176 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1177 struct dentry *dir; 1178 int err; 1179 1180 err = init_fault_attr_dentries(&fail_page_alloc.attr, 1181 "fail_page_alloc"); 1182 if (err) 1183 return err; 1184 dir = fail_page_alloc.attr.dentries.dir; 1185 1186 fail_page_alloc.ignore_gfp_wait_file = 1187 debugfs_create_bool("ignore-gfp-wait", mode, dir, 1188 &fail_page_alloc.ignore_gfp_wait); 1189 1190 fail_page_alloc.ignore_gfp_highmem_file = 1191 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1192 &fail_page_alloc.ignore_gfp_highmem); 1193 fail_page_alloc.min_order_file = 1194 debugfs_create_u32("min-order", mode, dir, 1195 &fail_page_alloc.min_order); 1196 1197 if (!fail_page_alloc.ignore_gfp_wait_file || 1198 !fail_page_alloc.ignore_gfp_highmem_file || 1199 !fail_page_alloc.min_order_file) { 1200 err = -ENOMEM; 1201 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 1202 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 1203 debugfs_remove(fail_page_alloc.min_order_file); 1204 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 1205 } 1206 1207 return err; 1208} 1209 1210late_initcall(fail_page_alloc_debugfs); 1211 1212#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1213 1214#else /* CONFIG_FAIL_PAGE_ALLOC */ 1215 1216static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1217{ 1218 return 0; 1219} 1220 1221#endif /* CONFIG_FAIL_PAGE_ALLOC */ 1222 1223/* 1224 * Return 1 if free pages are above 'mark'. This takes into account the order 1225 * of the allocation. 1226 */ 1227int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1228 int classzone_idx, int alloc_flags) 1229{ 1230 /* free_pages my go negative - that's OK */ 1231 long min = mark; 1232 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 1233 int o; 1234 1235 if (alloc_flags & ALLOC_HIGH) 1236 min -= min / 2; 1237 if (alloc_flags & ALLOC_HARDER) 1238 min -= min / 4; 1239 1240 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1241 return 0; 1242 for (o = 0; o < order; o++) { 1243 /* At the next order, this order's pages become unavailable */ 1244 free_pages -= z->free_area[o].nr_free << o; 1245 1246 /* Require fewer higher order pages to be free */ 1247 min >>= 1; 1248 1249 if (free_pages <= min) 1250 return 0; 1251 } 1252 return 1; 1253} 1254 1255#ifdef CONFIG_NUMA 1256/* 1257 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1258 * skip over zones that are not allowed by the cpuset, or that have 1259 * been recently (in last second) found to be nearly full. See further 1260 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1261 * that have to skip over a lot of full or unallowed zones. 1262 * 1263 * If the zonelist cache is present in the passed in zonelist, then 1264 * returns a pointer to the allowed node mask (either the current 1265 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) 1266 * 1267 * If the zonelist cache is not available for this zonelist, does 1268 * nothing and returns NULL. 1269 * 1270 * If the fullzones BITMAP in the zonelist cache is stale (more than 1271 * a second since last zap'd) then we zap it out (clear its bits.) 1272 * 1273 * We hold off even calling zlc_setup, until after we've checked the 1274 * first zone in the zonelist, on the theory that most allocations will 1275 * be satisfied from that first zone, so best to examine that zone as 1276 * quickly as we can. 1277 */ 1278static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1279{ 1280 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1281 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1282 1283 zlc = zonelist->zlcache_ptr; 1284 if (!zlc) 1285 return NULL; 1286 1287 if (time_after(jiffies, zlc->last_full_zap + HZ)) { 1288 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1289 zlc->last_full_zap = jiffies; 1290 } 1291 1292 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1293 &cpuset_current_mems_allowed : 1294 &node_states[N_HIGH_MEMORY]; 1295 return allowednodes; 1296} 1297 1298/* 1299 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1300 * if it is worth looking at further for free memory: 1301 * 1) Check that the zone isn't thought to be full (doesn't have its 1302 * bit set in the zonelist_cache fullzones BITMAP). 1303 * 2) Check that the zones node (obtained from the zonelist_cache 1304 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1305 * Return true (non-zero) if zone is worth looking at further, or 1306 * else return false (zero) if it is not. 1307 * 1308 * This check -ignores- the distinction between various watermarks, 1309 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1310 * found to be full for any variation of these watermarks, it will 1311 * be considered full for up to one second by all requests, unless 1312 * we are so low on memory on all allowed nodes that we are forced 1313 * into the second scan of the zonelist. 1314 * 1315 * In the second scan we ignore this zonelist cache and exactly 1316 * apply the watermarks to all zones, even it is slower to do so. 1317 * We are low on memory in the second scan, and should leave no stone 1318 * unturned looking for a free page. 1319 */ 1320static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1321 nodemask_t *allowednodes) 1322{ 1323 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1324 int i; /* index of *z in zonelist zones */ 1325 int n; /* node that zone *z is on */ 1326 1327 zlc = zonelist->zlcache_ptr; 1328 if (!zlc) 1329 return 1; 1330 1331 i = z - zonelist->_zonerefs; 1332 n = zlc->z_to_n[i]; 1333 1334 /* This zone is worth trying if it is allowed but not full */ 1335 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1336} 1337 1338/* 1339 * Given 'z' scanning a zonelist, set the corresponding bit in 1340 * zlc->fullzones, so that subsequent attempts to allocate a page 1341 * from that zone don't waste time re-examining it. 1342 */ 1343static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1344{ 1345 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1346 int i; /* index of *z in zonelist zones */ 1347 1348 zlc = zonelist->zlcache_ptr; 1349 if (!zlc) 1350 return; 1351 1352 i = z - zonelist->_zonerefs; 1353 1354 set_bit(i, zlc->fullzones); 1355} 1356 1357#else /* CONFIG_NUMA */ 1358 1359static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1360{ 1361 return NULL; 1362} 1363 1364static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1365 nodemask_t *allowednodes) 1366{ 1367 return 1; 1368} 1369 1370static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1371{ 1372} 1373#endif /* CONFIG_NUMA */ 1374 1375/* 1376 * get_page_from_freelist goes through the zonelist trying to allocate 1377 * a page. 1378 */ 1379static struct page * 1380get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1381 struct zonelist *zonelist, int high_zoneidx, int alloc_flags) 1382{ 1383 struct zoneref *z; 1384 struct page *page = NULL; 1385 int classzone_idx; 1386 struct zone *zone, *preferred_zone; 1387 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1388 int zlc_active = 0; /* set if using zonelist_cache */ 1389 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1390 1391 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, 1392 &preferred_zone); 1393 classzone_idx = zone_idx(preferred_zone); 1394 1395zonelist_scan: 1396 /* 1397 * Scan zonelist, looking for a zone with enough free. 1398 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1399 */ 1400 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1401 high_zoneidx, nodemask) { 1402 if (NUMA_BUILD && zlc_active && 1403 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1404 continue; 1405 if ((alloc_flags & ALLOC_CPUSET) && 1406 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1407 goto try_next_zone; 1408 1409 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1410 unsigned long mark; 1411 if (alloc_flags & ALLOC_WMARK_MIN) 1412 mark = zone->pages_min; 1413 else if (alloc_flags & ALLOC_WMARK_LOW) 1414 mark = zone->pages_low; 1415 else 1416 mark = zone->pages_high; 1417 if (!zone_watermark_ok(zone, order, mark, 1418 classzone_idx, alloc_flags)) { 1419 if (!zone_reclaim_mode || 1420 !zone_reclaim(zone, gfp_mask, order)) 1421 goto this_zone_full; 1422 } 1423 } 1424 1425 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask); 1426 if (page) 1427 break; 1428this_zone_full: 1429 if (NUMA_BUILD) 1430 zlc_mark_zone_full(zonelist, z); 1431try_next_zone: 1432 if (NUMA_BUILD && !did_zlc_setup) { 1433 /* we do zlc_setup after the first zone is tried */ 1434 allowednodes = zlc_setup(zonelist, alloc_flags); 1435 zlc_active = 1; 1436 did_zlc_setup = 1; 1437 } 1438 } 1439 1440 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1441 /* Disable zlc cache for second zonelist scan */ 1442 zlc_active = 0; 1443 goto zonelist_scan; 1444 } 1445 return page; 1446} 1447 1448/* 1449 * This is the 'heart' of the zoned buddy allocator. 1450 */ 1451static struct page * 1452__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, 1453 struct zonelist *zonelist, nodemask_t *nodemask) 1454{ 1455 const gfp_t wait = gfp_mask & __GFP_WAIT; 1456 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1457 struct zoneref *z; 1458 struct zone *zone; 1459 struct page *page; 1460 struct reclaim_state reclaim_state; 1461 struct task_struct *p = current; 1462 int do_retry; 1463 int alloc_flags; 1464 unsigned long did_some_progress; 1465 unsigned long pages_reclaimed = 0; 1466 1467 might_sleep_if(wait); 1468 1469 if (should_fail_alloc_page(gfp_mask, order)) 1470 return NULL; 1471 1472restart: 1473 z = zonelist->_zonerefs; /* the list of zones suitable for gfp_mask */ 1474 1475 if (unlikely(!z->zone)) { 1476 /* 1477 * Happens if we have an empty zonelist as a result of 1478 * GFP_THISNODE being used on a memoryless node 1479 */ 1480 return NULL; 1481 } 1482 1483 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 1484 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1485 if (page) 1486 goto got_pg; 1487 1488 /* 1489 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1490 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1491 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1492 * using a larger set of nodes after it has established that the 1493 * allowed per node queues are empty and that nodes are 1494 * over allocated. 1495 */ 1496 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1497 goto nopage; 1498 1499 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 1500 wakeup_kswapd(zone, order); 1501 1502 /* 1503 * OK, we're below the kswapd watermark and have kicked background 1504 * reclaim. Now things get more complex, so set up alloc_flags according 1505 * to how we want to proceed. 1506 * 1507 * The caller may dip into page reserves a bit more if the caller 1508 * cannot run direct reclaim, or if the caller has realtime scheduling 1509 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1510 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1511 */ 1512 alloc_flags = ALLOC_WMARK_MIN; 1513 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 1514 alloc_flags |= ALLOC_HARDER; 1515 if (gfp_mask & __GFP_HIGH) 1516 alloc_flags |= ALLOC_HIGH; 1517 if (wait) 1518 alloc_flags |= ALLOC_CPUSET; 1519 1520 /* 1521 * Go through the zonelist again. Let __GFP_HIGH and allocations 1522 * coming from realtime tasks go deeper into reserves. 1523 * 1524 * This is the last chance, in general, before the goto nopage. 1525 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1526 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1527 */ 1528 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 1529 high_zoneidx, alloc_flags); 1530 if (page) 1531 goto got_pg; 1532 1533 /* This allocation should allow future memory freeing. */ 1534 1535rebalance: 1536 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1537 && !in_interrupt()) { 1538 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1539nofail_alloc: 1540 /* go through the zonelist yet again, ignoring mins */ 1541 page = get_page_from_freelist(gfp_mask, nodemask, order, 1542 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS); 1543 if (page) 1544 goto got_pg; 1545 if (gfp_mask & __GFP_NOFAIL) { 1546 congestion_wait(WRITE, HZ/50); 1547 goto nofail_alloc; 1548 } 1549 } 1550 goto nopage; 1551 } 1552 1553 /* Atomic allocations - we can't balance anything */ 1554 if (!wait) 1555 goto nopage; 1556 1557 cond_resched(); 1558 1559 /* We now go into synchronous reclaim */ 1560 cpuset_memory_pressure_bump(); 1561 p->flags |= PF_MEMALLOC; 1562 reclaim_state.reclaimed_slab = 0; 1563 p->reclaim_state = &reclaim_state; 1564 1565 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); 1566 1567 p->reclaim_state = NULL; 1568 p->flags &= ~PF_MEMALLOC; 1569 1570 cond_resched(); 1571 1572 if (order != 0) 1573 drain_all_pages(); 1574 1575 if (likely(did_some_progress)) { 1576 page = get_page_from_freelist(gfp_mask, nodemask, order, 1577 zonelist, high_zoneidx, alloc_flags); 1578 if (page) 1579 goto got_pg; 1580 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1581 if (!try_set_zone_oom(zonelist, gfp_mask)) { 1582 schedule_timeout_uninterruptible(1); 1583 goto restart; 1584 } 1585 1586 /* 1587 * Go through the zonelist yet one more time, keep 1588 * very high watermark here, this is only to catch 1589 * a parallel oom killing, we must fail if we're still 1590 * under heavy pressure. 1591 */ 1592 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 1593 order, zonelist, high_zoneidx, 1594 ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1595 if (page) { 1596 clear_zonelist_oom(zonelist, gfp_mask); 1597 goto got_pg; 1598 } 1599 1600 /* The OOM killer will not help higher order allocs so fail */ 1601 if (order > PAGE_ALLOC_COSTLY_ORDER) { 1602 clear_zonelist_oom(zonelist, gfp_mask); 1603 goto nopage; 1604 } 1605 1606 out_of_memory(zonelist, gfp_mask, order); 1607 clear_zonelist_oom(zonelist, gfp_mask); 1608 goto restart; 1609 } 1610 1611 /* 1612 * Don't let big-order allocations loop unless the caller explicitly 1613 * requests that. Wait for some write requests to complete then retry. 1614 * 1615 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER 1616 * means __GFP_NOFAIL, but that may not be true in other 1617 * implementations. 1618 * 1619 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is 1620 * specified, then we retry until we no longer reclaim any pages 1621 * (above), or we've reclaimed an order of pages at least as 1622 * large as the allocation's order. In both cases, if the 1623 * allocation still fails, we stop retrying. 1624 */ 1625 pages_reclaimed += did_some_progress; 1626 do_retry = 0; 1627 if (!(gfp_mask & __GFP_NORETRY)) { 1628 if (order <= PAGE_ALLOC_COSTLY_ORDER) { 1629 do_retry = 1; 1630 } else { 1631 if (gfp_mask & __GFP_REPEAT && 1632 pages_reclaimed < (1 << order)) 1633 do_retry = 1; 1634 } 1635 if (gfp_mask & __GFP_NOFAIL) 1636 do_retry = 1; 1637 } 1638 if (do_retry) { 1639 congestion_wait(WRITE, HZ/50); 1640 goto rebalance; 1641 } 1642 1643nopage: 1644 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1645 printk(KERN_WARNING "%s: page allocation failure." 1646 " order:%d, mode:0x%x\n", 1647 p->comm, order, gfp_mask); 1648 dump_stack(); 1649 show_mem(); 1650 } 1651got_pg: 1652 return page; 1653} 1654 1655struct page * 1656__alloc_pages(gfp_t gfp_mask, unsigned int order, 1657 struct zonelist *zonelist) 1658{ 1659 return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); 1660} 1661 1662struct page * 1663__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 1664 struct zonelist *zonelist, nodemask_t *nodemask) 1665{ 1666 return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); 1667} 1668 1669EXPORT_SYMBOL(__alloc_pages); 1670 1671/* 1672 * Common helper functions. 1673 */ 1674unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1675{ 1676 struct page * page; 1677 page = alloc_pages(gfp_mask, order); 1678 if (!page) 1679 return 0; 1680 return (unsigned long) page_address(page); 1681} 1682 1683EXPORT_SYMBOL(__get_free_pages); 1684 1685unsigned long get_zeroed_page(gfp_t gfp_mask) 1686{ 1687 struct page * page; 1688 1689 /* 1690 * get_zeroed_page() returns a 32-bit address, which cannot represent 1691 * a highmem page 1692 */ 1693 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1694 1695 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1696 if (page) 1697 return (unsigned long) page_address(page); 1698 return 0; 1699} 1700 1701EXPORT_SYMBOL(get_zeroed_page); 1702 1703void __pagevec_free(struct pagevec *pvec) 1704{ 1705 int i = pagevec_count(pvec); 1706 1707 while (--i >= 0) 1708 free_hot_cold_page(pvec->pages[i], pvec->cold); 1709} 1710 1711void __free_pages(struct page *page, unsigned int order) 1712{ 1713 if (put_page_testzero(page)) { 1714 if (order == 0) 1715 free_hot_page(page); 1716 else 1717 __free_pages_ok(page, order); 1718 } 1719} 1720 1721EXPORT_SYMBOL(__free_pages); 1722 1723void free_pages(unsigned long addr, unsigned int order) 1724{ 1725 if (addr != 0) { 1726 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1727 __free_pages(virt_to_page((void *)addr), order); 1728 } 1729} 1730 1731EXPORT_SYMBOL(free_pages); 1732 1733static unsigned int nr_free_zone_pages(int offset) 1734{ 1735 struct zoneref *z; 1736 struct zone *zone; 1737 1738 /* Just pick one node, since fallback list is circular */ 1739 unsigned int sum = 0; 1740 1741 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 1742 1743 for_each_zone_zonelist(zone, z, zonelist, offset) { 1744 unsigned long size = zone->present_pages; 1745 unsigned long high = zone->pages_high; 1746 if (size > high) 1747 sum += size - high; 1748 } 1749 1750 return sum; 1751} 1752 1753/* 1754 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1755 */ 1756unsigned int nr_free_buffer_pages(void) 1757{ 1758 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1759} 1760EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 1761 1762/* 1763 * Amount of free RAM allocatable within all zones 1764 */ 1765unsigned int nr_free_pagecache_pages(void) 1766{ 1767 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 1768} 1769 1770static inline void show_node(struct zone *zone) 1771{ 1772 if (NUMA_BUILD) 1773 printk("Node %d ", zone_to_nid(zone)); 1774} 1775 1776void si_meminfo(struct sysinfo *val) 1777{ 1778 val->totalram = totalram_pages; 1779 val->sharedram = 0; 1780 val->freeram = global_page_state(NR_FREE_PAGES); 1781 val->bufferram = nr_blockdev_pages(); 1782 val->totalhigh = totalhigh_pages; 1783 val->freehigh = nr_free_highpages(); 1784 val->mem_unit = PAGE_SIZE; 1785} 1786 1787EXPORT_SYMBOL(si_meminfo); 1788 1789#ifdef CONFIG_NUMA 1790void si_meminfo_node(struct sysinfo *val, int nid) 1791{ 1792 pg_data_t *pgdat = NODE_DATA(nid); 1793 1794 val->totalram = pgdat->node_present_pages; 1795 val->freeram = node_page_state(nid, NR_FREE_PAGES); 1796#ifdef CONFIG_HIGHMEM 1797 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1798 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 1799 NR_FREE_PAGES); 1800#else 1801 val->totalhigh = 0; 1802 val->freehigh = 0; 1803#endif 1804 val->mem_unit = PAGE_SIZE; 1805} 1806#endif 1807 1808#define K(x) ((x) << (PAGE_SHIFT-10)) 1809 1810/* 1811 * Show free area list (used inside shift_scroll-lock stuff) 1812 * We also calculate the percentage fragmentation. We do this by counting the 1813 * memory on each free list with the exception of the first item on the list. 1814 */ 1815void show_free_areas(void) 1816{ 1817 int cpu; 1818 struct zone *zone; 1819 1820 for_each_zone(zone) { 1821 if (!populated_zone(zone)) 1822 continue; 1823 1824 show_node(zone); 1825 printk("%s per-cpu:\n", zone->name); 1826 1827 for_each_online_cpu(cpu) { 1828 struct per_cpu_pageset *pageset; 1829 1830 pageset = zone_pcp(zone, cpu); 1831 1832 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 1833 cpu, pageset->pcp.high, 1834 pageset->pcp.batch, pageset->pcp.count); 1835 } 1836 } 1837 1838 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" 1839 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 1840 global_page_state(NR_ACTIVE), 1841 global_page_state(NR_INACTIVE), 1842 global_page_state(NR_FILE_DIRTY), 1843 global_page_state(NR_WRITEBACK), 1844 global_page_state(NR_UNSTABLE_NFS), 1845 global_page_state(NR_FREE_PAGES), 1846 global_page_state(NR_SLAB_RECLAIMABLE) + 1847 global_page_state(NR_SLAB_UNRECLAIMABLE), 1848 global_page_state(NR_FILE_MAPPED), 1849 global_page_state(NR_PAGETABLE), 1850 global_page_state(NR_BOUNCE)); 1851 1852 for_each_zone(zone) { 1853 int i; 1854 1855 if (!populated_zone(zone)) 1856 continue; 1857 1858 show_node(zone); 1859 printk("%s" 1860 " free:%lukB" 1861 " min:%lukB" 1862 " low:%lukB" 1863 " high:%lukB" 1864 " active:%lukB" 1865 " inactive:%lukB" 1866 " present:%lukB" 1867 " pages_scanned:%lu" 1868 " all_unreclaimable? %s" 1869 "\n", 1870 zone->name, 1871 K(zone_page_state(zone, NR_FREE_PAGES)), 1872 K(zone->pages_min), 1873 K(zone->pages_low), 1874 K(zone->pages_high), 1875 K(zone_page_state(zone, NR_ACTIVE)), 1876 K(zone_page_state(zone, NR_INACTIVE)), 1877 K(zone->present_pages), 1878 zone->pages_scanned, 1879 (zone_is_all_unreclaimable(zone) ? "yes" : "no") 1880 ); 1881 printk("lowmem_reserve[]:"); 1882 for (i = 0; i < MAX_NR_ZONES; i++) 1883 printk(" %lu", zone->lowmem_reserve[i]); 1884 printk("\n"); 1885 } 1886 1887 for_each_zone(zone) { 1888 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1889 1890 if (!populated_zone(zone)) 1891 continue; 1892 1893 show_node(zone); 1894 printk("%s: ", zone->name); 1895 1896 spin_lock_irqsave(&zone->lock, flags); 1897 for (order = 0; order < MAX_ORDER; order++) { 1898 nr[order] = zone->free_area[order].nr_free; 1899 total += nr[order] << order; 1900 } 1901 spin_unlock_irqrestore(&zone->lock, flags); 1902 for (order = 0; order < MAX_ORDER; order++) 1903 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1904 printk("= %lukB\n", K(total)); 1905 } 1906 1907 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 1908 1909 show_swap_cache_info(); 1910} 1911 1912static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 1913{ 1914 zoneref->zone = zone; 1915 zoneref->zone_idx = zone_idx(zone); 1916} 1917 1918/* 1919 * Builds allocation fallback zone lists. 1920 * 1921 * Add all populated zones of a node to the zonelist. 1922 */ 1923static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 1924 int nr_zones, enum zone_type zone_type) 1925{ 1926 struct zone *zone; 1927 1928 BUG_ON(zone_type >= MAX_NR_ZONES); 1929 zone_type++; 1930 1931 do { 1932 zone_type--; 1933 zone = pgdat->node_zones + zone_type; 1934 if (populated_zone(zone)) { 1935 zoneref_set_zone(zone, 1936 &zonelist->_zonerefs[nr_zones++]); 1937 check_highest_zone(zone_type); 1938 } 1939 1940 } while (zone_type); 1941 return nr_zones; 1942} 1943 1944 1945/* 1946 * zonelist_order: 1947 * 0 = automatic detection of better ordering. 1948 * 1 = order by ([node] distance, -zonetype) 1949 * 2 = order by (-zonetype, [node] distance) 1950 * 1951 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 1952 * the same zonelist. So only NUMA can configure this param. 1953 */ 1954#define ZONELIST_ORDER_DEFAULT 0 1955#define ZONELIST_ORDER_NODE 1 1956#define ZONELIST_ORDER_ZONE 2 1957 1958/* zonelist order in the kernel. 1959 * set_zonelist_order() will set this to NODE or ZONE. 1960 */ 1961static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 1962static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 1963 1964 1965#ifdef CONFIG_NUMA 1966/* The value user specified ....changed by config */ 1967static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 1968/* string for sysctl */ 1969#define NUMA_ZONELIST_ORDER_LEN 16 1970char numa_zonelist_order[16] = "default"; 1971 1972/* 1973 * interface for configure zonelist ordering. 1974 * command line option "numa_zonelist_order" 1975 * = "[dD]efault - default, automatic configuration. 1976 * = "[nN]ode - order by node locality, then by zone within node 1977 * = "[zZ]one - order by zone, then by locality within zone 1978 */ 1979 1980static int __parse_numa_zonelist_order(char *s) 1981{ 1982 if (*s == 'd' || *s == 'D') { 1983 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 1984 } else if (*s == 'n' || *s == 'N') { 1985 user_zonelist_order = ZONELIST_ORDER_NODE; 1986 } else if (*s == 'z' || *s == 'Z') { 1987 user_zonelist_order = ZONELIST_ORDER_ZONE; 1988 } else { 1989 printk(KERN_WARNING 1990 "Ignoring invalid numa_zonelist_order value: " 1991 "%s\n", s); 1992 return -EINVAL; 1993 } 1994 return 0; 1995} 1996 1997static __init int setup_numa_zonelist_order(char *s) 1998{ 1999 if (s) 2000 return __parse_numa_zonelist_order(s); 2001 return 0; 2002} 2003early_param("numa_zonelist_order", setup_numa_zonelist_order); 2004 2005/* 2006 * sysctl handler for numa_zonelist_order 2007 */ 2008int numa_zonelist_order_handler(ctl_table *table, int write, 2009 struct file *file, void __user *buffer, size_t *length, 2010 loff_t *ppos) 2011{ 2012 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 2013 int ret; 2014 2015 if (write) 2016 strncpy(saved_string, (char*)table->data, 2017 NUMA_ZONELIST_ORDER_LEN); 2018 ret = proc_dostring(table, write, file, buffer, length, ppos); 2019 if (ret) 2020 return ret; 2021 if (write) { 2022 int oldval = user_zonelist_order; 2023 if (__parse_numa_zonelist_order((char*)table->data)) { 2024 /* 2025 * bogus value. restore saved string 2026 */ 2027 strncpy((char*)table->data, saved_string, 2028 NUMA_ZONELIST_ORDER_LEN); 2029 user_zonelist_order = oldval; 2030 } else if (oldval != user_zonelist_order) 2031 build_all_zonelists(); 2032 } 2033 return 0; 2034} 2035 2036 2037#define MAX_NODE_LOAD (num_online_nodes()) 2038static int node_load[MAX_NUMNODES]; 2039 2040/** 2041 * find_next_best_node - find the next node that should appear in a given node's fallback list 2042 * @node: node whose fallback list we're appending 2043 * @used_node_mask: nodemask_t of already used nodes 2044 * 2045 * We use a number of factors to determine which is the next node that should 2046 * appear on a given node's fallback list. The node should not have appeared 2047 * already in @node's fallback list, and it should be the next closest node 2048 * according to the distance array (which contains arbitrary distance values 2049 * from each node to each node in the system), and should also prefer nodes 2050 * with no CPUs, since presumably they'll have very little allocation pressure 2051 * on them otherwise. 2052 * It returns -1 if no node is found. 2053 */ 2054static int find_next_best_node(int node, nodemask_t *used_node_mask) 2055{ 2056 int n, val; 2057 int min_val = INT_MAX; 2058 int best_node = -1; 2059 node_to_cpumask_ptr(tmp, 0); 2060 2061 /* Use the local node if we haven't already */ 2062 if (!node_isset(node, *used_node_mask)) { 2063 node_set(node, *used_node_mask); 2064 return node; 2065 } 2066 2067 for_each_node_state(n, N_HIGH_MEMORY) { 2068 2069 /* Don't want a node to appear more than once */ 2070 if (node_isset(n, *used_node_mask)) 2071 continue; 2072 2073 /* Use the distance array to find the distance */ 2074 val = node_distance(node, n); 2075 2076 /* Penalize nodes under us ("prefer the next node") */ 2077 val += (n < node); 2078 2079 /* Give preference to headless and unused nodes */ 2080 node_to_cpumask_ptr_next(tmp, n); 2081 if (!cpus_empty(*tmp)) 2082 val += PENALTY_FOR_NODE_WITH_CPUS; 2083 2084 /* Slight preference for less loaded node */ 2085 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 2086 val += node_load[n]; 2087 2088 if (val < min_val) { 2089 min_val = val; 2090 best_node = n; 2091 } 2092 } 2093 2094 if (best_node >= 0) 2095 node_set(best_node, *used_node_mask); 2096 2097 return best_node; 2098} 2099 2100 2101/* 2102 * Build zonelists ordered by node and zones within node. 2103 * This results in maximum locality--normal zone overflows into local 2104 * DMA zone, if any--but risks exhausting DMA zone. 2105 */ 2106static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 2107{ 2108 int j; 2109 struct zonelist *zonelist; 2110 2111 zonelist = &pgdat->node_zonelists[0]; 2112 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 2113 ; 2114 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2115 MAX_NR_ZONES - 1); 2116 zonelist->_zonerefs[j].zone = NULL; 2117 zonelist->_zonerefs[j].zone_idx = 0; 2118} 2119 2120/* 2121 * Build gfp_thisnode zonelists 2122 */ 2123static void build_thisnode_zonelists(pg_data_t *pgdat) 2124{ 2125 int j; 2126 struct zonelist *zonelist; 2127 2128 zonelist = &pgdat->node_zonelists[1]; 2129 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2130 zonelist->_zonerefs[j].zone = NULL; 2131 zonelist->_zonerefs[j].zone_idx = 0; 2132} 2133 2134/* 2135 * Build zonelists ordered by zone and nodes within zones. 2136 * This results in conserving DMA zone[s] until all Normal memory is 2137 * exhausted, but results in overflowing to remote node while memory 2138 * may still exist in local DMA zone. 2139 */ 2140static int node_order[MAX_NUMNODES]; 2141 2142static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 2143{ 2144 int pos, j, node; 2145 int zone_type; /* needs to be signed */ 2146 struct zone *z; 2147 struct zonelist *zonelist; 2148 2149 zonelist = &pgdat->node_zonelists[0]; 2150 pos = 0; 2151 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 2152 for (j = 0; j < nr_nodes; j++) { 2153 node = node_order[j]; 2154 z = &NODE_DATA(node)->node_zones[zone_type]; 2155 if (populated_zone(z)) { 2156 zoneref_set_zone(z, 2157 &zonelist->_zonerefs[pos++]); 2158 check_highest_zone(zone_type); 2159 } 2160 } 2161 } 2162 zonelist->_zonerefs[pos].zone = NULL; 2163 zonelist->_zonerefs[pos].zone_idx = 0; 2164} 2165 2166static int default_zonelist_order(void) 2167{ 2168 int nid, zone_type; 2169 unsigned long low_kmem_size,total_size; 2170 struct zone *z; 2171 int average_size; 2172 /* 2173 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem. 2174 * If they are really small and used heavily, the system can fall 2175 * into OOM very easily. 2176 * This function detect ZONE_DMA/DMA32 size and confgigures zone order. 2177 */ 2178 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ 2179 low_kmem_size = 0; 2180 total_size = 0; 2181 for_each_online_node(nid) { 2182 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2183 z = &NODE_DATA(nid)->node_zones[zone_type]; 2184 if (populated_zone(z)) { 2185 if (zone_type < ZONE_NORMAL) 2186 low_kmem_size += z->present_pages; 2187 total_size += z->present_pages; 2188 } 2189 } 2190 } 2191 if (!low_kmem_size || /* there are no DMA area. */ 2192 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ 2193 return ZONELIST_ORDER_NODE; 2194 /* 2195 * look into each node's config. 2196 * If there is a node whose DMA/DMA32 memory is very big area on 2197 * local memory, NODE_ORDER may be suitable. 2198 */ 2199 average_size = total_size / 2200 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); 2201 for_each_online_node(nid) { 2202 low_kmem_size = 0; 2203 total_size = 0; 2204 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2205 z = &NODE_DATA(nid)->node_zones[zone_type]; 2206 if (populated_zone(z)) { 2207 if (zone_type < ZONE_NORMAL) 2208 low_kmem_size += z->present_pages; 2209 total_size += z->present_pages; 2210 } 2211 } 2212 if (low_kmem_size && 2213 total_size > average_size && /* ignore small node */ 2214 low_kmem_size > total_size * 70/100) 2215 return ZONELIST_ORDER_NODE; 2216 } 2217 return ZONELIST_ORDER_ZONE; 2218} 2219 2220static void set_zonelist_order(void) 2221{ 2222 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 2223 current_zonelist_order = default_zonelist_order(); 2224 else 2225 current_zonelist_order = user_zonelist_order; 2226} 2227 2228static void build_zonelists(pg_data_t *pgdat) 2229{ 2230 int j, node, load; 2231 enum zone_type i; 2232 nodemask_t used_mask; 2233 int local_node, prev_node; 2234 struct zonelist *zonelist; 2235 int order = current_zonelist_order; 2236 2237 /* initialize zonelists */ 2238 for (i = 0; i < MAX_ZONELISTS; i++) { 2239 zonelist = pgdat->node_zonelists + i; 2240 zonelist->_zonerefs[0].zone = NULL; 2241 zonelist->_zonerefs[0].zone_idx = 0; 2242 } 2243 2244 /* NUMA-aware ordering of nodes */ 2245 local_node = pgdat->node_id; 2246 load = num_online_nodes(); 2247 prev_node = local_node; 2248 nodes_clear(used_mask); 2249 2250 memset(node_load, 0, sizeof(node_load)); 2251 memset(node_order, 0, sizeof(node_order)); 2252 j = 0; 2253 2254 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 2255 int distance = node_distance(local_node, node); 2256 2257 /* 2258 * If another node is sufficiently far away then it is better 2259 * to reclaim pages in a zone before going off node. 2260 */ 2261 if (distance > RECLAIM_DISTANCE) 2262 zone_reclaim_mode = 1; 2263 2264 /* 2265 * We don't want to pressure a particular node. 2266 * So adding penalty to the first node in same 2267 * distance group to make it round-robin. 2268 */ 2269 if (distance != node_distance(local_node, prev_node)) 2270 node_load[node] = load; 2271 2272 prev_node = node; 2273 load--; 2274 if (order == ZONELIST_ORDER_NODE) 2275 build_zonelists_in_node_order(pgdat, node); 2276 else 2277 node_order[j++] = node; /* remember order */ 2278 } 2279 2280 if (order == ZONELIST_ORDER_ZONE) { 2281 /* calculate node order -- i.e., DMA last! */ 2282 build_zonelists_in_zone_order(pgdat, j); 2283 } 2284 2285 build_thisnode_zonelists(pgdat); 2286} 2287 2288/* Construct the zonelist performance cache - see further mmzone.h */ 2289static void build_zonelist_cache(pg_data_t *pgdat) 2290{ 2291 struct zonelist *zonelist; 2292 struct zonelist_cache *zlc; 2293 struct zoneref *z; 2294 2295 zonelist = &pgdat->node_zonelists[0]; 2296 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 2297 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 2298 for (z = zonelist->_zonerefs; z->zone; z++) 2299 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 2300} 2301 2302 2303#else /* CONFIG_NUMA */ 2304 2305static void set_zonelist_order(void) 2306{ 2307 current_zonelist_order = ZONELIST_ORDER_ZONE; 2308} 2309 2310static void build_zonelists(pg_data_t *pgdat) 2311{ 2312 int node, local_node; 2313 enum zone_type j; 2314 struct zonelist *zonelist; 2315 2316 local_node = pgdat->node_id; 2317 2318 zonelist = &pgdat->node_zonelists[0]; 2319 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2320 2321 /* 2322 * Now we build the zonelist so that it contains the zones 2323 * of all the other nodes. 2324 * We don't want to pressure a particular node, so when 2325 * building the zones for node N, we make sure that the 2326 * zones coming right after the local ones are those from 2327 * node N+1 (modulo N) 2328 */ 2329 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 2330 if (!node_online(node)) 2331 continue; 2332 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2333 MAX_NR_ZONES - 1); 2334 } 2335 for (node = 0; node < local_node; node++) { 2336 if (!node_online(node)) 2337 continue; 2338 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2339 MAX_NR_ZONES - 1); 2340 } 2341 2342 zonelist->_zonerefs[j].zone = NULL; 2343 zonelist->_zonerefs[j].zone_idx = 0; 2344} 2345 2346/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 2347static void build_zonelist_cache(pg_data_t *pgdat) 2348{ 2349 pgdat->node_zonelists[0].zlcache_ptr = NULL; 2350 pgdat->node_zonelists[1].zlcache_ptr = NULL; 2351} 2352 2353#endif /* CONFIG_NUMA */ 2354 2355/* return values int ....just for stop_machine_run() */ 2356static int __build_all_zonelists(void *dummy) 2357{ 2358 int nid; 2359 2360 for_each_online_node(nid) { 2361 pg_data_t *pgdat = NODE_DATA(nid); 2362 2363 build_zonelists(pgdat); 2364 build_zonelist_cache(pgdat); 2365 } 2366 return 0; 2367} 2368 2369void build_all_zonelists(void) 2370{ 2371 set_zonelist_order(); 2372 2373 if (system_state == SYSTEM_BOOTING) { 2374 __build_all_zonelists(NULL); 2375 cpuset_init_current_mems_allowed(); 2376 } else { 2377 /* we have to stop all cpus to guarantee there is no user 2378 of zonelist */ 2379 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 2380 /* cpuset refresh routine should be here */ 2381 } 2382 vm_total_pages = nr_free_pagecache_pages(); 2383 /* 2384 * Disable grouping by mobility if the number of pages in the 2385 * system is too low to allow the mechanism to work. It would be 2386 * more accurate, but expensive to check per-zone. This check is 2387 * made on memory-hotadd so a system can start with mobility 2388 * disabled and enable it later 2389 */ 2390 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 2391 page_group_by_mobility_disabled = 1; 2392 else 2393 page_group_by_mobility_disabled = 0; 2394 2395 printk("Built %i zonelists in %s order, mobility grouping %s. " 2396 "Total pages: %ld\n", 2397 num_online_nodes(), 2398 zonelist_order_name[current_zonelist_order], 2399 page_group_by_mobility_disabled ? "off" : "on", 2400 vm_total_pages); 2401#ifdef CONFIG_NUMA 2402 printk("Policy zone: %s\n", zone_names[policy_zone]); 2403#endif 2404} 2405 2406/* 2407 * Helper functions to size the waitqueue hash table. 2408 * Essentially these want to choose hash table sizes sufficiently 2409 * large so that collisions trying to wait on pages are rare. 2410 * But in fact, the number of active page waitqueues on typical 2411 * systems is ridiculously low, less than 200. So this is even 2412 * conservative, even though it seems large. 2413 * 2414 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 2415 * waitqueues, i.e. the size of the waitq table given the number of pages. 2416 */ 2417#define PAGES_PER_WAITQUEUE 256 2418 2419#ifndef CONFIG_MEMORY_HOTPLUG 2420static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2421{ 2422 unsigned long size = 1; 2423 2424 pages /= PAGES_PER_WAITQUEUE; 2425 2426 while (size < pages) 2427 size <<= 1; 2428 2429 /* 2430 * Once we have dozens or even hundreds of threads sleeping 2431 * on IO we've got bigger problems than wait queue collision. 2432 * Limit the size of the wait table to a reasonable size. 2433 */ 2434 size = min(size, 4096UL); 2435 2436 return max(size, 4UL); 2437} 2438#else 2439/* 2440 * A zone's size might be changed by hot-add, so it is not possible to determine 2441 * a suitable size for its wait_table. So we use the maximum size now. 2442 * 2443 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 2444 * 2445 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 2446 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 2447 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 2448 * 2449 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 2450 * or more by the traditional way. (See above). It equals: 2451 * 2452 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 2453 * ia64(16K page size) : = ( 8G + 4M)byte. 2454 * powerpc (64K page size) : = (32G +16M)byte. 2455 */ 2456static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2457{ 2458 return 4096UL; 2459} 2460#endif 2461 2462/* 2463 * This is an integer logarithm so that shifts can be used later 2464 * to extract the more random high bits from the multiplicative 2465 * hash function before the remainder is taken. 2466 */ 2467static inline unsigned long wait_table_bits(unsigned long size) 2468{ 2469 return ffz(~size); 2470} 2471 2472#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 2473 2474/* 2475 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 2476 * of blocks reserved is based on zone->pages_min. The memory within the 2477 * reserve will tend to store contiguous free pages. Setting min_free_kbytes 2478 * higher will lead to a bigger reserve which will get freed as contiguous 2479 * blocks as reclaim kicks in 2480 */ 2481static void setup_zone_migrate_reserve(struct zone *zone) 2482{ 2483 unsigned long start_pfn, pfn, end_pfn; 2484 struct page *page; 2485 unsigned long reserve, block_migratetype; 2486 2487 /* Get the start pfn, end pfn and the number of blocks to reserve */ 2488 start_pfn = zone->zone_start_pfn; 2489 end_pfn = start_pfn + zone->spanned_pages; 2490 reserve = roundup(zone->pages_min, pageblock_nr_pages) >> 2491 pageblock_order; 2492 2493 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 2494 if (!pfn_valid(pfn)) 2495 continue; 2496 page = pfn_to_page(pfn); 2497 2498 /* Blocks with reserved pages will never free, skip them. */ 2499 if (PageReserved(page)) 2500 continue; 2501 2502 block_migratetype = get_pageblock_migratetype(page); 2503 2504 /* If this block is reserved, account for it */ 2505 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { 2506 reserve--; 2507 continue; 2508 } 2509 2510 /* Suitable for reserving if this block is movable */ 2511 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { 2512 set_pageblock_migratetype(page, MIGRATE_RESERVE); 2513 move_freepages_block(zone, page, MIGRATE_RESERVE); 2514 reserve--; 2515 continue; 2516 } 2517 2518 /* 2519 * If the reserve is met and this is a previous reserved block, 2520 * take it back 2521 */ 2522 if (block_migratetype == MIGRATE_RESERVE) { 2523 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2524 move_freepages_block(zone, page, MIGRATE_MOVABLE); 2525 } 2526 } 2527} 2528 2529/* 2530 * Initially all pages are reserved - free ones are freed 2531 * up by free_all_bootmem() once the early boot process is 2532 * done. Non-atomic initialization, single-pass. 2533 */ 2534void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 2535 unsigned long start_pfn, enum memmap_context context) 2536{ 2537 struct page *page; 2538 unsigned long end_pfn = start_pfn + size; 2539 unsigned long pfn; 2540 struct zone *z; 2541 2542 z = &NODE_DATA(nid)->node_zones[zone]; 2543 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 2544 /* 2545 * There can be holes in boot-time mem_map[]s 2546 * handed to this function. They do not 2547 * exist on hotplugged memory. 2548 */ 2549 if (context == MEMMAP_EARLY) { 2550 if (!early_pfn_valid(pfn)) 2551 continue; 2552 if (!early_pfn_in_nid(pfn, nid)) 2553 continue; 2554 } 2555 page = pfn_to_page(pfn); 2556 set_page_links(page, zone, nid, pfn); 2557 init_page_count(page); 2558 reset_page_mapcount(page); 2559 SetPageReserved(page); 2560 /* 2561 * Mark the block movable so that blocks are reserved for 2562 * movable at startup. This will force kernel allocations 2563 * to reserve their blocks rather than leaking throughout 2564 * the address space during boot when many long-lived 2565 * kernel allocations are made. Later some blocks near 2566 * the start are marked MIGRATE_RESERVE by 2567 * setup_zone_migrate_reserve() 2568 * 2569 * bitmap is created for zone's valid pfn range. but memmap 2570 * can be created for invalid pages (for alignment) 2571 * check here not to call set_pageblock_migratetype() against 2572 * pfn out of zone. 2573 */ 2574 if ((z->zone_start_pfn <= pfn) 2575 && (pfn < z->zone_start_pfn + z->spanned_pages) 2576 && !(pfn & (pageblock_nr_pages - 1))) 2577 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2578 2579 INIT_LIST_HEAD(&page->lru); 2580#ifdef WANT_PAGE_VIRTUAL 2581 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 2582 if (!is_highmem_idx(zone)) 2583 set_page_address(page, __va(pfn << PAGE_SHIFT)); 2584#endif 2585 } 2586} 2587 2588static void __meminit zone_init_free_lists(struct zone *zone) 2589{ 2590 int order, t; 2591 for_each_migratetype_order(order, t) { 2592 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 2593 zone->free_area[order].nr_free = 0; 2594 } 2595} 2596 2597#ifndef __HAVE_ARCH_MEMMAP_INIT 2598#define memmap_init(size, nid, zone, start_pfn) \ 2599 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 2600#endif 2601 2602static int zone_batchsize(struct zone *zone) 2603{ 2604 int batch; 2605 2606 /* 2607 * The per-cpu-pages pools are set to around 1000th of the 2608 * size of the zone. But no more than 1/2 of a meg. 2609 * 2610 * OK, so we don't know how big the cache is. So guess. 2611 */ 2612 batch = zone->present_pages / 1024; 2613 if (batch * PAGE_SIZE > 512 * 1024) 2614 batch = (512 * 1024) / PAGE_SIZE; 2615 batch /= 4; /* We effectively *= 4 below */ 2616 if (batch < 1) 2617 batch = 1; 2618 2619 /* 2620 * Clamp the batch to a 2^n - 1 value. Having a power 2621 * of 2 value was found to be more likely to have 2622 * suboptimal cache aliasing properties in some cases. 2623 * 2624 * For example if 2 tasks are alternately allocating 2625 * batches of pages, one task can end up with a lot 2626 * of pages of one half of the possible page colors 2627 * and the other with pages of the other colors. 2628 */ 2629 batch = (1 << (fls(batch + batch/2)-1)) - 1; 2630 2631 return batch; 2632} 2633 2634inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2635{ 2636 struct per_cpu_pages *pcp; 2637 2638 memset(p, 0, sizeof(*p)); 2639 2640 pcp = &p->pcp; 2641 pcp->count = 0; 2642 pcp->high = 6 * batch; 2643 pcp->batch = max(1UL, 1 * batch); 2644 INIT_LIST_HEAD(&pcp->list); 2645} 2646 2647/* 2648 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 2649 * to the value high for the pageset p. 2650 */ 2651 2652static void setup_pagelist_highmark(struct per_cpu_pageset *p, 2653 unsigned long high) 2654{ 2655 struct per_cpu_pages *pcp; 2656 2657 pcp = &p->pcp; 2658 pcp->high = high; 2659 pcp->batch = max(1UL, high/4); 2660 if ((high/4) > (PAGE_SHIFT * 8)) 2661 pcp->batch = PAGE_SHIFT * 8; 2662} 2663 2664 2665#ifdef CONFIG_NUMA 2666/* 2667 * Boot pageset table. One per cpu which is going to be used for all 2668 * zones and all nodes. The parameters will be set in such a way 2669 * that an item put on a list will immediately be handed over to 2670 * the buddy list. This is safe since pageset manipulation is done 2671 * with interrupts disabled. 2672 * 2673 * Some NUMA counter updates may also be caught by the boot pagesets. 2674 * 2675 * The boot_pagesets must be kept even after bootup is complete for 2676 * unused processors and/or zones. They do play a role for bootstrapping 2677 * hotplugged processors. 2678 * 2679 * zoneinfo_show() and maybe other functions do 2680 * not check if the processor is online before following the pageset pointer. 2681 * Other parts of the kernel may not check if the zone is available. 2682 */ 2683static struct per_cpu_pageset boot_pageset[NR_CPUS]; 2684 2685/* 2686 * Dynamically allocate memory for the 2687 * per cpu pageset array in struct zone. 2688 */ 2689static int __cpuinit process_zones(int cpu) 2690{ 2691 struct zone *zone, *dzone; 2692 int node = cpu_to_node(cpu); 2693 2694 node_set_state(node, N_CPU); /* this node has a cpu */ 2695 2696 for_each_zone(zone) { 2697 2698 if (!populated_zone(zone)) 2699 continue; 2700 2701 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2702 GFP_KERNEL, node); 2703 if (!zone_pcp(zone, cpu)) 2704 goto bad; 2705 2706 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 2707 2708 if (percpu_pagelist_fraction) 2709 setup_pagelist_highmark(zone_pcp(zone, cpu), 2710 (zone->present_pages / percpu_pagelist_fraction)); 2711 } 2712 2713 return 0; 2714bad: 2715 for_each_zone(dzone) { 2716 if (!populated_zone(dzone)) 2717 continue; 2718 if (dzone == zone) 2719 break; 2720 kfree(zone_pcp(dzone, cpu)); 2721 zone_pcp(dzone, cpu) = NULL; 2722 } 2723 return -ENOMEM; 2724} 2725 2726static inline void free_zone_pagesets(int cpu) 2727{ 2728 struct zone *zone; 2729 2730 for_each_zone(zone) { 2731 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 2732 2733 /* Free per_cpu_pageset if it is slab allocated */ 2734 if (pset != &boot_pageset[cpu]) 2735 kfree(pset); 2736 zone_pcp(zone, cpu) = NULL; 2737 } 2738} 2739 2740static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 2741 unsigned long action, 2742 void *hcpu) 2743{ 2744 int cpu = (long)hcpu; 2745 int ret = NOTIFY_OK; 2746 2747 switch (action) { 2748 case CPU_UP_PREPARE: 2749 case CPU_UP_PREPARE_FROZEN: 2750 if (process_zones(cpu)) 2751 ret = NOTIFY_BAD; 2752 break; 2753 case CPU_UP_CANCELED: 2754 case CPU_UP_CANCELED_FROZEN: 2755 case CPU_DEAD: 2756 case CPU_DEAD_FROZEN: 2757 free_zone_pagesets(cpu); 2758 break; 2759 default: 2760 break; 2761 } 2762 return ret; 2763} 2764 2765static struct notifier_block __cpuinitdata pageset_notifier = 2766 { &pageset_cpuup_callback, NULL, 0 }; 2767 2768void __init setup_per_cpu_pageset(void) 2769{ 2770 int err; 2771 2772 /* Initialize per_cpu_pageset for cpu 0. 2773 * A cpuup callback will do this for every cpu 2774 * as it comes online 2775 */ 2776 err = process_zones(smp_processor_id()); 2777 BUG_ON(err); 2778 register_cpu_notifier(&pageset_notifier); 2779} 2780 2781#endif 2782 2783static noinline __init_refok 2784int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2785{ 2786 int i; 2787 struct pglist_data *pgdat = zone->zone_pgdat; 2788 size_t alloc_size; 2789 2790 /* 2791 * The per-page waitqueue mechanism uses hashed waitqueues 2792 * per zone. 2793 */ 2794 zone->wait_table_hash_nr_entries = 2795 wait_table_hash_nr_entries(zone_size_pages); 2796 zone->wait_table_bits = 2797 wait_table_bits(zone->wait_table_hash_nr_entries); 2798 alloc_size = zone->wait_table_hash_nr_entries 2799 * sizeof(wait_queue_head_t); 2800 2801 if (system_state == SYSTEM_BOOTING) { 2802 zone->wait_table = (wait_queue_head_t *) 2803 alloc_bootmem_node(pgdat, alloc_size); 2804 } else { 2805 /* 2806 * This case means that a zone whose size was 0 gets new memory 2807 * via memory hot-add. 2808 * But it may be the case that a new node was hot-added. In 2809 * this case vmalloc() will not be able to use this new node's 2810 * memory - this wait_table must be initialized to use this new 2811 * node itself as well. 2812 * To use this new node's memory, further consideration will be 2813 * necessary. 2814 */ 2815 zone->wait_table = vmalloc(alloc_size); 2816 } 2817 if (!zone->wait_table) 2818 return -ENOMEM; 2819 2820 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 2821 init_waitqueue_head(zone->wait_table + i); 2822 2823 return 0; 2824} 2825 2826static __meminit void zone_pcp_init(struct zone *zone) 2827{ 2828 int cpu; 2829 unsigned long batch = zone_batchsize(zone); 2830 2831 for (cpu = 0; cpu < NR_CPUS; cpu++) { 2832#ifdef CONFIG_NUMA 2833 /* Early boot. Slab allocator not functional yet */ 2834 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 2835 setup_pageset(&boot_pageset[cpu],0); 2836#else 2837 setup_pageset(zone_pcp(zone,cpu), batch); 2838#endif 2839 } 2840 if (zone->present_pages) 2841 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 2842 zone->name, zone->present_pages, batch); 2843} 2844 2845__meminit int init_currently_empty_zone(struct zone *zone, 2846 unsigned long zone_start_pfn, 2847 unsigned long size, 2848 enum memmap_context context) 2849{ 2850 struct pglist_data *pgdat = zone->zone_pgdat; 2851 int ret; 2852 ret = zone_wait_table_init(zone, size); 2853 if (ret) 2854 return ret; 2855 pgdat->nr_zones = zone_idx(zone) + 1; 2856 2857 zone->zone_start_pfn = zone_start_pfn; 2858 2859 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2860 2861 zone_init_free_lists(zone); 2862 2863 return 0; 2864} 2865 2866#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2867/* 2868 * Basic iterator support. Return the first range of PFNs for a node 2869 * Note: nid == MAX_NUMNODES returns first region regardless of node 2870 */ 2871static int __meminit first_active_region_index_in_nid(int nid) 2872{ 2873 int i; 2874 2875 for (i = 0; i < nr_nodemap_entries; i++) 2876 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 2877 return i; 2878 2879 return -1; 2880} 2881 2882/* 2883 * Basic iterator support. Return the next active range of PFNs for a node 2884 * Note: nid == MAX_NUMNODES returns next region regardless of node 2885 */ 2886static int __meminit next_active_region_index_in_nid(int index, int nid) 2887{ 2888 for (index = index + 1; index < nr_nodemap_entries; index++) 2889 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2890 return index; 2891 2892 return -1; 2893} 2894 2895#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 2896/* 2897 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 2898 * Architectures may implement their own version but if add_active_range() 2899 * was used and there are no special requirements, this is a convenient 2900 * alternative 2901 */ 2902int __meminit early_pfn_to_nid(unsigned long pfn) 2903{ 2904 int i; 2905 2906 for (i = 0; i < nr_nodemap_entries; i++) { 2907 unsigned long start_pfn = early_node_map[i].start_pfn; 2908 unsigned long end_pfn = early_node_map[i].end_pfn; 2909 2910 if (start_pfn <= pfn && pfn < end_pfn) 2911 return early_node_map[i].nid; 2912 } 2913 2914 return 0; 2915} 2916#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 2917 2918/* Basic iterator support to walk early_node_map[] */ 2919#define for_each_active_range_index_in_nid(i, nid) \ 2920 for (i = first_active_region_index_in_nid(nid); i != -1; \ 2921 i = next_active_region_index_in_nid(i, nid)) 2922 2923/** 2924 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2925 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 2926 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 2927 * 2928 * If an architecture guarantees that all ranges registered with 2929 * add_active_ranges() contain no holes and may be freed, this 2930 * this function may be used instead of calling free_bootmem() manually. 2931 */ 2932void __init free_bootmem_with_active_regions(int nid, 2933 unsigned long max_low_pfn) 2934{ 2935 int i; 2936 2937 for_each_active_range_index_in_nid(i, nid) { 2938 unsigned long size_pages = 0; 2939 unsigned long end_pfn = early_node_map[i].end_pfn; 2940 2941 if (early_node_map[i].start_pfn >= max_low_pfn) 2942 continue; 2943 2944 if (end_pfn > max_low_pfn) 2945 end_pfn = max_low_pfn; 2946 2947 size_pages = end_pfn - early_node_map[i].start_pfn; 2948 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 2949 PFN_PHYS(early_node_map[i].start_pfn), 2950 size_pages << PAGE_SHIFT); 2951 } 2952} 2953 2954/** 2955 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2956 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2957 * 2958 * If an architecture guarantees that all ranges registered with 2959 * add_active_ranges() contain no holes and may be freed, this 2960 * function may be used instead of calling memory_present() manually. 2961 */ 2962void __init sparse_memory_present_with_active_regions(int nid) 2963{ 2964 int i; 2965 2966 for_each_active_range_index_in_nid(i, nid) 2967 memory_present(early_node_map[i].nid, 2968 early_node_map[i].start_pfn, 2969 early_node_map[i].end_pfn); 2970} 2971 2972/** 2973 * push_node_boundaries - Push node boundaries to at least the requested boundary 2974 * @nid: The nid of the node to push the boundary for 2975 * @start_pfn: The start pfn of the node 2976 * @end_pfn: The end pfn of the node 2977 * 2978 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd 2979 * time. Specifically, on x86_64, SRAT will report ranges that can potentially 2980 * be hotplugged even though no physical memory exists. This function allows 2981 * an arch to push out the node boundaries so mem_map is allocated that can 2982 * be used later. 2983 */ 2984#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2985void __init push_node_boundaries(unsigned int nid, 2986 unsigned long start_pfn, unsigned long end_pfn) 2987{ 2988 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", 2989 nid, start_pfn, end_pfn); 2990 2991 /* Initialise the boundary for this node if necessary */ 2992 if (node_boundary_end_pfn[nid] == 0) 2993 node_boundary_start_pfn[nid] = -1UL; 2994 2995 /* Update the boundaries */ 2996 if (node_boundary_start_pfn[nid] > start_pfn) 2997 node_boundary_start_pfn[nid] = start_pfn; 2998 if (node_boundary_end_pfn[nid] < end_pfn) 2999 node_boundary_end_pfn[nid] = end_pfn; 3000} 3001 3002/* If necessary, push the node boundary out for reserve hotadd */ 3003static void __meminit account_node_boundary(unsigned int nid, 3004 unsigned long *start_pfn, unsigned long *end_pfn) 3005{ 3006 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", 3007 nid, *start_pfn, *end_pfn); 3008 3009 /* Return if boundary information has not been provided */ 3010 if (node_boundary_end_pfn[nid] == 0) 3011 return; 3012 3013 /* Check the boundaries and update if necessary */ 3014 if (node_boundary_start_pfn[nid] < *start_pfn) 3015 *start_pfn = node_boundary_start_pfn[nid]; 3016 if (node_boundary_end_pfn[nid] > *end_pfn) 3017 *end_pfn = node_boundary_end_pfn[nid]; 3018} 3019#else 3020void __init push_node_boundaries(unsigned int nid, 3021 unsigned long start_pfn, unsigned long end_pfn) {} 3022 3023static void __meminit account_node_boundary(unsigned int nid, 3024 unsigned long *start_pfn, unsigned long *end_pfn) {} 3025#endif 3026 3027 3028/** 3029 * get_pfn_range_for_nid - Return the start and end page frames for a node 3030 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 3031 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 3032 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 3033 * 3034 * It returns the start and end page frame of a node based on information 3035 * provided by an arch calling add_active_range(). If called for a node 3036 * with no available memory, a warning is printed and the start and end 3037 * PFNs will be 0. 3038 */ 3039void __meminit get_pfn_range_for_nid(unsigned int nid, 3040 unsigned long *start_pfn, unsigned long *end_pfn) 3041{ 3042 int i; 3043 *start_pfn = -1UL; 3044 *end_pfn = 0; 3045 3046 for_each_active_range_index_in_nid(i, nid) { 3047 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 3048 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 3049 } 3050 3051 if (*start_pfn == -1UL) 3052 *start_pfn = 0; 3053 3054 /* Push the node boundaries out if requested */ 3055 account_node_boundary(nid, start_pfn, end_pfn); 3056} 3057 3058/* 3059 * This finds a zone that can be used for ZONE_MOVABLE pages. The 3060 * assumption is made that zones within a node are ordered in monotonic 3061 * increasing memory addresses so that the "highest" populated zone is used 3062 */ 3063void __init find_usable_zone_for_movable(void) 3064{ 3065 int zone_index; 3066 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 3067 if (zone_index == ZONE_MOVABLE) 3068 continue; 3069 3070 if (arch_zone_highest_possible_pfn[zone_index] > 3071 arch_zone_lowest_possible_pfn[zone_index]) 3072 break; 3073 } 3074 3075 VM_BUG_ON(zone_index == -1); 3076 movable_zone = zone_index; 3077} 3078 3079/* 3080 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 3081 * because it is sized independant of architecture. Unlike the other zones, 3082 * the starting point for ZONE_MOVABLE is not fixed. It may be different 3083 * in each node depending on the size of each node and how evenly kernelcore 3084 * is distributed. This helper function adjusts the zone ranges 3085 * provided by the architecture for a given node by using the end of the 3086 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 3087 * zones within a node are in order of monotonic increases memory addresses 3088 */ 3089void __meminit adjust_zone_range_for_zone_movable(int nid, 3090 unsigned long zone_type, 3091 unsigned long node_start_pfn, 3092 unsigned long node_end_pfn, 3093 unsigned long *zone_start_pfn, 3094 unsigned long *zone_end_pfn) 3095{ 3096 /* Only adjust if ZONE_MOVABLE is on this node */ 3097 if (zone_movable_pfn[nid]) { 3098 /* Size ZONE_MOVABLE */ 3099 if (zone_type == ZONE_MOVABLE) { 3100 *zone_start_pfn = zone_movable_pfn[nid]; 3101 *zone_end_pfn = min(node_end_pfn, 3102 arch_zone_highest_possible_pfn[movable_zone]); 3103 3104 /* Adjust for ZONE_MOVABLE starting within this range */ 3105 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 3106 *zone_end_pfn > zone_movable_pfn[nid]) { 3107 *zone_end_pfn = zone_movable_pfn[nid]; 3108 3109 /* Check if this whole range is within ZONE_MOVABLE */ 3110 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 3111 *zone_start_pfn = *zone_end_pfn; 3112 } 3113} 3114 3115/* 3116 * Return the number of pages a zone spans in a node, including holes 3117 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 3118 */ 3119static unsigned long __meminit zone_spanned_pages_in_node(int nid, 3120 unsigned long zone_type, 3121 unsigned long *ignored) 3122{ 3123 unsigned long node_start_pfn, node_end_pfn; 3124 unsigned long zone_start_pfn, zone_end_pfn; 3125 3126 /* Get the start and end of the node and zone */ 3127 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3128 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 3129 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 3130 adjust_zone_range_for_zone_movable(nid, zone_type, 3131 node_start_pfn, node_end_pfn, 3132 &zone_start_pfn, &zone_end_pfn); 3133 3134 /* Check that this node has pages within the zone's required range */ 3135 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 3136 return 0; 3137 3138 /* Move the zone boundaries inside the node if necessary */ 3139 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 3140 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 3141 3142 /* Return the spanned pages */ 3143 return zone_end_pfn - zone_start_pfn; 3144} 3145 3146/* 3147 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 3148 * then all holes in the requested range will be accounted for. 3149 */ 3150unsigned long __meminit __absent_pages_in_range(int nid, 3151 unsigned long range_start_pfn, 3152 unsigned long range_end_pfn) 3153{ 3154 int i = 0; 3155 unsigned long prev_end_pfn = 0, hole_pages = 0; 3156 unsigned long start_pfn; 3157 3158 /* Find the end_pfn of the first active range of pfns in the node */ 3159 i = first_active_region_index_in_nid(nid); 3160 if (i == -1) 3161 return 0; 3162 3163 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3164 3165 /* Account for ranges before physical memory on this node */ 3166 if (early_node_map[i].start_pfn > range_start_pfn) 3167 hole_pages = prev_end_pfn - range_start_pfn; 3168 3169 /* Find all holes for the zone within the node */ 3170 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 3171 3172 /* No need to continue if prev_end_pfn is outside the zone */ 3173 if (prev_end_pfn >= range_end_pfn) 3174 break; 3175 3176 /* Make sure the end of the zone is not within the hole */ 3177 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3178 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 3179 3180 /* Update the hole size cound and move on */ 3181 if (start_pfn > range_start_pfn) { 3182 BUG_ON(prev_end_pfn > start_pfn); 3183 hole_pages += start_pfn - prev_end_pfn; 3184 } 3185 prev_end_pfn = early_node_map[i].end_pfn; 3186 } 3187 3188 /* Account for ranges past physical memory on this node */ 3189 if (range_end_pfn > prev_end_pfn) 3190 hole_pages += range_end_pfn - 3191 max(range_start_pfn, prev_end_pfn); 3192 3193 return hole_pages; 3194} 3195 3196/** 3197 * absent_pages_in_range - Return number of page frames in holes within a range 3198 * @start_pfn: The start PFN to start searching for holes 3199 * @end_pfn: The end PFN to stop searching for holes 3200 * 3201 * It returns the number of pages frames in memory holes within a range. 3202 */ 3203unsigned long __init absent_pages_in_range(unsigned long start_pfn, 3204 unsigned long end_pfn) 3205{ 3206 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 3207} 3208 3209/* Return the number of page frames in holes in a zone on a node */ 3210static unsigned long __meminit zone_absent_pages_in_node(int nid, 3211 unsigned long zone_type, 3212 unsigned long *ignored) 3213{ 3214 unsigned long node_start_pfn, node_end_pfn; 3215 unsigned long zone_start_pfn, zone_end_pfn; 3216 3217 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3218 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 3219 node_start_pfn); 3220 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 3221 node_end_pfn); 3222 3223 adjust_zone_range_for_zone_movable(nid, zone_type, 3224 node_start_pfn, node_end_pfn, 3225 &zone_start_pfn, &zone_end_pfn); 3226 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 3227} 3228 3229#else 3230static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 3231 unsigned long zone_type, 3232 unsigned long *zones_size) 3233{ 3234 return zones_size[zone_type]; 3235} 3236 3237static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 3238 unsigned long zone_type, 3239 unsigned long *zholes_size) 3240{ 3241 if (!zholes_size) 3242 return 0; 3243 3244 return zholes_size[zone_type]; 3245} 3246 3247#endif 3248 3249static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 3250 unsigned long *zones_size, unsigned long *zholes_size) 3251{ 3252 unsigned long realtotalpages, totalpages = 0; 3253 enum zone_type i; 3254 3255 for (i = 0; i < MAX_NR_ZONES; i++) 3256 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 3257 zones_size); 3258 pgdat->node_spanned_pages = totalpages; 3259 3260 realtotalpages = totalpages; 3261 for (i = 0; i < MAX_NR_ZONES; i++) 3262 realtotalpages -= 3263 zone_absent_pages_in_node(pgdat->node_id, i, 3264 zholes_size); 3265 pgdat->node_present_pages = realtotalpages; 3266 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 3267 realtotalpages); 3268} 3269 3270#ifndef CONFIG_SPARSEMEM 3271/* 3272 * Calculate the size of the zone->blockflags rounded to an unsigned long 3273 * Start by making sure zonesize is a multiple of pageblock_order by rounding 3274 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 3275 * round what is now in bits to nearest long in bits, then return it in 3276 * bytes. 3277 */ 3278static unsigned long __init usemap_size(unsigned long zonesize) 3279{ 3280 unsigned long usemapsize; 3281 3282 usemapsize = roundup(zonesize, pageblock_nr_pages); 3283 usemapsize = usemapsize >> pageblock_order; 3284 usemapsize *= NR_PAGEBLOCK_BITS; 3285 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 3286 3287 return usemapsize / 8; 3288} 3289 3290static void __init setup_usemap(struct pglist_data *pgdat, 3291 struct zone *zone, unsigned long zonesize) 3292{ 3293 unsigned long usemapsize = usemap_size(zonesize); 3294 zone->pageblock_flags = NULL; 3295 if (usemapsize) { 3296 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 3297 memset(zone->pageblock_flags, 0, usemapsize); 3298 } 3299} 3300#else 3301static void inline setup_usemap(struct pglist_data *pgdat, 3302 struct zone *zone, unsigned long zonesize) {} 3303#endif /* CONFIG_SPARSEMEM */ 3304 3305#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 3306 3307/* Return a sensible default order for the pageblock size. */ 3308static inline int pageblock_default_order(void) 3309{ 3310 if (HPAGE_SHIFT > PAGE_SHIFT) 3311 return HUGETLB_PAGE_ORDER; 3312 3313 return MAX_ORDER-1; 3314} 3315 3316/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 3317static inline void __init set_pageblock_order(unsigned int order) 3318{ 3319 /* Check that pageblock_nr_pages has not already been setup */ 3320 if (pageblock_order) 3321 return; 3322 3323 /* 3324 * Assume the largest contiguous order of interest is a huge page. 3325 * This value may be variable depending on boot parameters on IA64 3326 */ 3327 pageblock_order = order; 3328} 3329#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3330 3331/* 3332 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 3333 * and pageblock_default_order() are unused as pageblock_order is set 3334 * at compile-time. See include/linux/pageblock-flags.h for the values of 3335 * pageblock_order based on the kernel config 3336 */ 3337static inline int pageblock_default_order(unsigned int order) 3338{ 3339 return MAX_ORDER-1; 3340} 3341#define set_pageblock_order(x) do {} while (0) 3342 3343#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3344 3345/* 3346 * Set up the zone data structures: 3347 * - mark all pages reserved 3348 * - mark all memory queues empty 3349 * - clear the memory bitmaps 3350 */ 3351static void __paginginit free_area_init_core(struct pglist_data *pgdat, 3352 unsigned long *zones_size, unsigned long *zholes_size) 3353{ 3354 enum zone_type j; 3355 int nid = pgdat->node_id; 3356 unsigned long zone_start_pfn = pgdat->node_start_pfn; 3357 int ret; 3358 3359 pgdat_resize_init(pgdat); 3360 pgdat->nr_zones = 0; 3361 init_waitqueue_head(&pgdat->kswapd_wait); 3362 pgdat->kswapd_max_order = 0; 3363 3364 for (j = 0; j < MAX_NR_ZONES; j++) { 3365 struct zone *zone = pgdat->node_zones + j; 3366 unsigned long size, realsize, memmap_pages; 3367 3368 size = zone_spanned_pages_in_node(nid, j, zones_size); 3369 realsize = size - zone_absent_pages_in_node(nid, j, 3370 zholes_size); 3371 3372 /* 3373 * Adjust realsize so that it accounts for how much memory 3374 * is used by this zone for memmap. This affects the watermark 3375 * and per-cpu initialisations 3376 */ 3377 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; 3378 if (realsize >= memmap_pages) { 3379 realsize -= memmap_pages; 3380 printk(KERN_DEBUG 3381 " %s zone: %lu pages used for memmap\n", 3382 zone_names[j], memmap_pages); 3383 } else 3384 printk(KERN_WARNING 3385 " %s zone: %lu pages exceeds realsize %lu\n", 3386 zone_names[j], memmap_pages, realsize); 3387 3388 /* Account for reserved pages */ 3389 if (j == 0 && realsize > dma_reserve) { 3390 realsize -= dma_reserve; 3391 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 3392 zone_names[0], dma_reserve); 3393 } 3394 3395 if (!is_highmem_idx(j)) 3396 nr_kernel_pages += realsize; 3397 nr_all_pages += realsize; 3398 3399 zone->spanned_pages = size; 3400 zone->present_pages = realsize; 3401#ifdef CONFIG_NUMA 3402 zone->node = nid; 3403 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 3404 / 100; 3405 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 3406#endif 3407 zone->name = zone_names[j]; 3408 spin_lock_init(&zone->lock); 3409 spin_lock_init(&zone->lru_lock); 3410 zone_seqlock_init(zone); 3411 zone->zone_pgdat = pgdat; 3412 3413 zone->prev_priority = DEF_PRIORITY; 3414 3415 zone_pcp_init(zone); 3416 INIT_LIST_HEAD(&zone->active_list); 3417 INIT_LIST_HEAD(&zone->inactive_list); 3418 zone->nr_scan_active = 0; 3419 zone->nr_scan_inactive = 0; 3420 zap_zone_vm_stats(zone); 3421 zone->flags = 0; 3422 if (!size) 3423 continue; 3424 3425 set_pageblock_order(pageblock_default_order()); 3426 setup_usemap(pgdat, zone, size); 3427 ret = init_currently_empty_zone(zone, zone_start_pfn, 3428 size, MEMMAP_EARLY); 3429 BUG_ON(ret); 3430 zone_start_pfn += size; 3431 } 3432} 3433 3434static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 3435{ 3436 /* Skip empty nodes */ 3437 if (!pgdat->node_spanned_pages) 3438 return; 3439 3440#ifdef CONFIG_FLAT_NODE_MEM_MAP 3441 /* ia64 gets its own node_mem_map, before this, without bootmem */ 3442 if (!pgdat->node_mem_map) { 3443 unsigned long size, start, end; 3444 struct page *map; 3445 3446 /* 3447 * The zone's endpoints aren't required to be MAX_ORDER 3448 * aligned but the node_mem_map endpoints must be in order 3449 * for the buddy allocator to function correctly. 3450 */ 3451 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 3452 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 3453 end = ALIGN(end, MAX_ORDER_NR_PAGES); 3454 size = (end - start) * sizeof(struct page); 3455 map = alloc_remap(pgdat->node_id, size); 3456 if (!map) 3457 map = alloc_bootmem_node(pgdat, size); 3458 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 3459 } 3460#ifndef CONFIG_NEED_MULTIPLE_NODES 3461 /* 3462 * With no DISCONTIG, the global mem_map is just set as node 0's 3463 */ 3464 if (pgdat == NODE_DATA(0)) { 3465 mem_map = NODE_DATA(0)->node_mem_map; 3466#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3467 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 3468 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 3469#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3470 } 3471#endif 3472#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 3473} 3474 3475void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat, 3476 unsigned long *zones_size, unsigned long node_start_pfn, 3477 unsigned long *zholes_size) 3478{ 3479 pgdat->node_id = nid; 3480 pgdat->node_start_pfn = node_start_pfn; 3481 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3482 3483 alloc_node_mem_map(pgdat); 3484 3485 free_area_init_core(pgdat, zones_size, zholes_size); 3486} 3487 3488#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3489 3490#if MAX_NUMNODES > 1 3491/* 3492 * Figure out the number of possible node ids. 3493 */ 3494static void __init setup_nr_node_ids(void) 3495{ 3496 unsigned int node; 3497 unsigned int highest = 0; 3498 3499 for_each_node_mask(node, node_possible_map) 3500 highest = node; 3501 nr_node_ids = highest + 1; 3502} 3503#else 3504static inline void setup_nr_node_ids(void) 3505{ 3506} 3507#endif 3508 3509/** 3510 * add_active_range - Register a range of PFNs backed by physical memory 3511 * @nid: The node ID the range resides on 3512 * @start_pfn: The start PFN of the available physical memory 3513 * @end_pfn: The end PFN of the available physical memory 3514 * 3515 * These ranges are stored in an early_node_map[] and later used by 3516 * free_area_init_nodes() to calculate zone sizes and holes. If the 3517 * range spans a memory hole, it is up to the architecture to ensure 3518 * the memory is not freed by the bootmem allocator. If possible 3519 * the range being registered will be merged with existing ranges. 3520 */ 3521void __init add_active_range(unsigned int nid, unsigned long start_pfn, 3522 unsigned long end_pfn) 3523{ 3524 int i; 3525 3526 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 3527 "%d entries of %d used\n", 3528 nid, start_pfn, end_pfn, 3529 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3530 3531 /* Merge with existing active regions if possible */ 3532 for (i = 0; i < nr_nodemap_entries; i++) { 3533 if (early_node_map[i].nid != nid) 3534 continue; 3535 3536 /* Skip if an existing region covers this new one */ 3537 if (start_pfn >= early_node_map[i].start_pfn && 3538 end_pfn <= early_node_map[i].end_pfn) 3539 return; 3540 3541 /* Merge forward if suitable */ 3542 if (start_pfn <= early_node_map[i].end_pfn && 3543 end_pfn > early_node_map[i].end_pfn) { 3544 early_node_map[i].end_pfn = end_pfn; 3545 return; 3546 } 3547 3548 /* Merge backward if suitable */ 3549 if (start_pfn < early_node_map[i].end_pfn && 3550 end_pfn >= early_node_map[i].start_pfn) { 3551 early_node_map[i].start_pfn = start_pfn; 3552 return; 3553 } 3554 } 3555 3556 /* Check that early_node_map is large enough */ 3557 if (i >= MAX_ACTIVE_REGIONS) { 3558 printk(KERN_CRIT "More than %d memory regions, truncating\n", 3559 MAX_ACTIVE_REGIONS); 3560 return; 3561 } 3562 3563 early_node_map[i].nid = nid; 3564 early_node_map[i].start_pfn = start_pfn; 3565 early_node_map[i].end_pfn = end_pfn; 3566 nr_nodemap_entries = i + 1; 3567} 3568 3569/** 3570 * shrink_active_range - Shrink an existing registered range of PFNs 3571 * @nid: The node id the range is on that should be shrunk 3572 * @old_end_pfn: The old end PFN of the range 3573 * @new_end_pfn: The new PFN of the range 3574 * 3575 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3576 * The map is kept at the end physical page range that has already been 3577 * registered with add_active_range(). This function allows an arch to shrink 3578 * an existing registered range. 3579 */ 3580void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 3581 unsigned long new_end_pfn) 3582{ 3583 int i; 3584 3585 /* Find the old active region end and shrink */ 3586 for_each_active_range_index_in_nid(i, nid) 3587 if (early_node_map[i].end_pfn == old_end_pfn) { 3588 early_node_map[i].end_pfn = new_end_pfn; 3589 break; 3590 } 3591} 3592 3593/** 3594 * remove_all_active_ranges - Remove all currently registered regions 3595 * 3596 * During discovery, it may be found that a table like SRAT is invalid 3597 * and an alternative discovery method must be used. This function removes 3598 * all currently registered regions. 3599 */ 3600void __init remove_all_active_ranges(void) 3601{ 3602 memset(early_node_map, 0, sizeof(early_node_map)); 3603 nr_nodemap_entries = 0; 3604#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 3605 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); 3606 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); 3607#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 3608} 3609 3610/* Compare two active node_active_regions */ 3611static int __init cmp_node_active_region(const void *a, const void *b) 3612{ 3613 struct node_active_region *arange = (struct node_active_region *)a; 3614 struct node_active_region *brange = (struct node_active_region *)b; 3615 3616 /* Done this way to avoid overflows */ 3617 if (arange->start_pfn > brange->start_pfn) 3618 return 1; 3619 if (arange->start_pfn < brange->start_pfn) 3620 return -1; 3621 3622 return 0; 3623} 3624 3625/* sort the node_map by start_pfn */ 3626static void __init sort_node_map(void) 3627{ 3628 sort(early_node_map, (size_t)nr_nodemap_entries, 3629 sizeof(struct node_active_region), 3630 cmp_node_active_region, NULL); 3631} 3632 3633/* Find the lowest pfn for a node */ 3634unsigned long __init find_min_pfn_for_node(unsigned long nid) 3635{ 3636 int i; 3637 unsigned long min_pfn = ULONG_MAX; 3638 3639 /* Assuming a sorted map, the first range found has the starting pfn */ 3640 for_each_active_range_index_in_nid(i, nid) 3641 min_pfn = min(min_pfn, early_node_map[i].start_pfn); 3642 3643 if (min_pfn == ULONG_MAX) { 3644 printk(KERN_WARNING 3645 "Could not find start_pfn for node %lu\n", nid); 3646 return 0; 3647 } 3648 3649 return min_pfn; 3650} 3651 3652/** 3653 * find_min_pfn_with_active_regions - Find the minimum PFN registered 3654 * 3655 * It returns the minimum PFN based on information provided via 3656 * add_active_range(). 3657 */ 3658unsigned long __init find_min_pfn_with_active_regions(void) 3659{ 3660 return find_min_pfn_for_node(MAX_NUMNODES); 3661} 3662 3663/** 3664 * find_max_pfn_with_active_regions - Find the maximum PFN registered 3665 * 3666 * It returns the maximum PFN based on information provided via 3667 * add_active_range(). 3668 */ 3669unsigned long __init find_max_pfn_with_active_regions(void) 3670{ 3671 int i; 3672 unsigned long max_pfn = 0; 3673 3674 for (i = 0; i < nr_nodemap_entries; i++) 3675 max_pfn = max(max_pfn, early_node_map[i].end_pfn); 3676 3677 return max_pfn; 3678} 3679 3680/* 3681 * early_calculate_totalpages() 3682 * Sum pages in active regions for movable zone. 3683 * Populate N_HIGH_MEMORY for calculating usable_nodes. 3684 */ 3685static unsigned long __init early_calculate_totalpages(void) 3686{ 3687 int i; 3688 unsigned long totalpages = 0; 3689 3690 for (i = 0; i < nr_nodemap_entries; i++) { 3691 unsigned long pages = early_node_map[i].end_pfn - 3692 early_node_map[i].start_pfn; 3693 totalpages += pages; 3694 if (pages) 3695 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); 3696 } 3697 return totalpages; 3698} 3699 3700/* 3701 * Find the PFN the Movable zone begins in each node. Kernel memory 3702 * is spread evenly between nodes as long as the nodes have enough 3703 * memory. When they don't, some nodes will have more kernelcore than 3704 * others 3705 */ 3706void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) 3707{ 3708 int i, nid; 3709 unsigned long usable_startpfn; 3710 unsigned long kernelcore_node, kernelcore_remaining; 3711 unsigned long totalpages = early_calculate_totalpages(); 3712 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 3713 3714 /* 3715 * If movablecore was specified, calculate what size of 3716 * kernelcore that corresponds so that memory usable for 3717 * any allocation type is evenly spread. If both kernelcore 3718 * and movablecore are specified, then the value of kernelcore 3719 * will be used for required_kernelcore if it's greater than 3720 * what movablecore would have allowed. 3721 */ 3722 if (required_movablecore) { 3723 unsigned long corepages; 3724 3725 /* 3726 * Round-up so that ZONE_MOVABLE is at least as large as what 3727 * was requested by the user 3728 */ 3729 required_movablecore = 3730 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 3731 corepages = totalpages - required_movablecore; 3732 3733 required_kernelcore = max(required_kernelcore, corepages); 3734 } 3735 3736 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 3737 if (!required_kernelcore) 3738 return; 3739 3740 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 3741 find_usable_zone_for_movable(); 3742 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 3743 3744restart: 3745 /* Spread kernelcore memory as evenly as possible throughout nodes */ 3746 kernelcore_node = required_kernelcore / usable_nodes; 3747 for_each_node_state(nid, N_HIGH_MEMORY) { 3748 /* 3749 * Recalculate kernelcore_node if the division per node 3750 * now exceeds what is necessary to satisfy the requested 3751 * amount of memory for the kernel 3752 */ 3753 if (required_kernelcore < kernelcore_node) 3754 kernelcore_node = required_kernelcore / usable_nodes; 3755 3756 /* 3757 * As the map is walked, we track how much memory is usable 3758 * by the kernel using kernelcore_remaining. When it is 3759 * 0, the rest of the node is usable by ZONE_MOVABLE 3760 */ 3761 kernelcore_remaining = kernelcore_node; 3762 3763 /* Go through each range of PFNs within this node */ 3764 for_each_active_range_index_in_nid(i, nid) { 3765 unsigned long start_pfn, end_pfn; 3766 unsigned long size_pages; 3767 3768 start_pfn = max(early_node_map[i].start_pfn, 3769 zone_movable_pfn[nid]); 3770 end_pfn = early_node_map[i].end_pfn; 3771 if (start_pfn >= end_pfn) 3772 continue; 3773 3774 /* Account for what is only usable for kernelcore */ 3775 if (start_pfn < usable_startpfn) { 3776 unsigned long kernel_pages; 3777 kernel_pages = min(end_pfn, usable_startpfn) 3778 - start_pfn; 3779 3780 kernelcore_remaining -= min(kernel_pages, 3781 kernelcore_remaining); 3782 required_kernelcore -= min(kernel_pages, 3783 required_kernelcore); 3784 3785 /* Continue if range is now fully accounted */ 3786 if (end_pfn <= usable_startpfn) { 3787 3788 /* 3789 * Push zone_movable_pfn to the end so 3790 * that if we have to rebalance 3791 * kernelcore across nodes, we will 3792 * not double account here 3793 */ 3794 zone_movable_pfn[nid] = end_pfn; 3795 continue; 3796 } 3797 start_pfn = usable_startpfn; 3798 } 3799 3800 /* 3801 * The usable PFN range for ZONE_MOVABLE is from 3802 * start_pfn->end_pfn. Calculate size_pages as the 3803 * number of pages used as kernelcore 3804 */ 3805 size_pages = end_pfn - start_pfn; 3806 if (size_pages > kernelcore_remaining) 3807 size_pages = kernelcore_remaining; 3808 zone_movable_pfn[nid] = start_pfn + size_pages; 3809 3810 /* 3811 * Some kernelcore has been met, update counts and 3812 * break if the kernelcore for this node has been 3813 * satisified 3814 */ 3815 required_kernelcore -= min(required_kernelcore, 3816 size_pages); 3817 kernelcore_remaining -= size_pages; 3818 if (!kernelcore_remaining) 3819 break; 3820 } 3821 } 3822 3823 /* 3824 * If there is still required_kernelcore, we do another pass with one 3825 * less node in the count. This will push zone_movable_pfn[nid] further 3826 * along on the nodes that still have memory until kernelcore is 3827 * satisified 3828 */ 3829 usable_nodes--; 3830 if (usable_nodes && required_kernelcore > usable_nodes) 3831 goto restart; 3832 3833 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 3834 for (nid = 0; nid < MAX_NUMNODES; nid++) 3835 zone_movable_pfn[nid] = 3836 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 3837} 3838 3839/* Any regular memory on that node ? */ 3840static void check_for_regular_memory(pg_data_t *pgdat) 3841{ 3842#ifdef CONFIG_HIGHMEM 3843 enum zone_type zone_type; 3844 3845 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { 3846 struct zone *zone = &pgdat->node_zones[zone_type]; 3847 if (zone->present_pages) 3848 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); 3849 } 3850#endif 3851} 3852 3853/** 3854 * free_area_init_nodes - Initialise all pg_data_t and zone data 3855 * @max_zone_pfn: an array of max PFNs for each zone 3856 * 3857 * This will call free_area_init_node() for each active node in the system. 3858 * Using the page ranges provided by add_active_range(), the size of each 3859 * zone in each node and their holes is calculated. If the maximum PFN 3860 * between two adjacent zones match, it is assumed that the zone is empty. 3861 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 3862 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 3863 * starts where the previous one ended. For example, ZONE_DMA32 starts 3864 * at arch_max_dma_pfn. 3865 */ 3866void __init free_area_init_nodes(unsigned long *max_zone_pfn) 3867{ 3868 unsigned long nid; 3869 enum zone_type i; 3870 3871 /* Sort early_node_map as initialisation assumes it is sorted */ 3872 sort_node_map(); 3873 3874 /* Record where the zone boundaries are */ 3875 memset(arch_zone_lowest_possible_pfn, 0, 3876 sizeof(arch_zone_lowest_possible_pfn)); 3877 memset(arch_zone_highest_possible_pfn, 0, 3878 sizeof(arch_zone_highest_possible_pfn)); 3879 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 3880 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 3881 for (i = 1; i < MAX_NR_ZONES; i++) { 3882 if (i == ZONE_MOVABLE) 3883 continue; 3884 arch_zone_lowest_possible_pfn[i] = 3885 arch_zone_highest_possible_pfn[i-1]; 3886 arch_zone_highest_possible_pfn[i] = 3887 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 3888 } 3889 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 3890 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 3891 3892 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 3893 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 3894 find_zone_movable_pfns_for_nodes(zone_movable_pfn); 3895 3896 /* Print out the zone ranges */ 3897 printk("Zone PFN ranges:\n"); 3898 for (i = 0; i < MAX_NR_ZONES; i++) { 3899 if (i == ZONE_MOVABLE) 3900 continue; 3901 printk(" %-8s %8lu -> %8lu\n", 3902 zone_names[i], 3903 arch_zone_lowest_possible_pfn[i], 3904 arch_zone_highest_possible_pfn[i]); 3905 } 3906 3907 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 3908 printk("Movable zone start PFN for each node\n"); 3909 for (i = 0; i < MAX_NUMNODES; i++) { 3910 if (zone_movable_pfn[i]) 3911 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); 3912 } 3913 3914 /* Print out the early_node_map[] */ 3915 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 3916 for (i = 0; i < nr_nodemap_entries; i++) 3917 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 3918 early_node_map[i].start_pfn, 3919 early_node_map[i].end_pfn); 3920 3921 /* Initialise every node */ 3922 setup_nr_node_ids(); 3923 for_each_online_node(nid) { 3924 pg_data_t *pgdat = NODE_DATA(nid); 3925 free_area_init_node(nid, pgdat, NULL, 3926 find_min_pfn_for_node(nid), NULL); 3927 3928 /* Any memory on that node */ 3929 if (pgdat->node_present_pages) 3930 node_set_state(nid, N_HIGH_MEMORY); 3931 check_for_regular_memory(pgdat); 3932 } 3933} 3934 3935static int __init cmdline_parse_core(char *p, unsigned long *core) 3936{ 3937 unsigned long long coremem; 3938 if (!p) 3939 return -EINVAL; 3940 3941 coremem = memparse(p, &p); 3942 *core = coremem >> PAGE_SHIFT; 3943 3944 /* Paranoid check that UL is enough for the coremem value */ 3945 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 3946 3947 return 0; 3948} 3949 3950/* 3951 * kernelcore=size sets the amount of memory for use for allocations that 3952 * cannot be reclaimed or migrated. 3953 */ 3954static int __init cmdline_parse_kernelcore(char *p) 3955{ 3956 return cmdline_parse_core(p, &required_kernelcore); 3957} 3958 3959/* 3960 * movablecore=size sets the amount of memory for use for allocations that 3961 * can be reclaimed or migrated. 3962 */ 3963static int __init cmdline_parse_movablecore(char *p) 3964{ 3965 return cmdline_parse_core(p, &required_movablecore); 3966} 3967 3968early_param("kernelcore", cmdline_parse_kernelcore); 3969early_param("movablecore", cmdline_parse_movablecore); 3970 3971#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3972 3973/** 3974 * set_dma_reserve - set the specified number of pages reserved in the first zone 3975 * @new_dma_reserve: The number of pages to mark reserved 3976 * 3977 * The per-cpu batchsize and zone watermarks are determined by present_pages. 3978 * In the DMA zone, a significant percentage may be consumed by kernel image 3979 * and other unfreeable allocations which can skew the watermarks badly. This 3980 * function may optionally be used to account for unfreeable pages in the 3981 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 3982 * smaller per-cpu batchsize. 3983 */ 3984void __init set_dma_reserve(unsigned long new_dma_reserve) 3985{ 3986 dma_reserve = new_dma_reserve; 3987} 3988 3989#ifndef CONFIG_NEED_MULTIPLE_NODES 3990static bootmem_data_t contig_bootmem_data; 3991struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 3992 3993EXPORT_SYMBOL(contig_page_data); 3994#endif 3995 3996void __init free_area_init(unsigned long *zones_size) 3997{ 3998 free_area_init_node(0, NODE_DATA(0), zones_size, 3999 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 4000} 4001 4002static int page_alloc_cpu_notify(struct notifier_block *self, 4003 unsigned long action, void *hcpu) 4004{ 4005 int cpu = (unsigned long)hcpu; 4006 4007 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 4008 drain_pages(cpu); 4009 4010 /* 4011 * Spill the event counters of the dead processor 4012 * into the current processors event counters. 4013 * This artificially elevates the count of the current 4014 * processor. 4015 */ 4016 vm_events_fold_cpu(cpu); 4017 4018 /* 4019 * Zero the differential counters of the dead processor 4020 * so that the vm statistics are consistent. 4021 * 4022 * This is only okay since the processor is dead and cannot 4023 * race with what we are doing. 4024 */ 4025 refresh_cpu_vm_stats(cpu); 4026 } 4027 return NOTIFY_OK; 4028} 4029 4030void __init page_alloc_init(void) 4031{ 4032 hotcpu_notifier(page_alloc_cpu_notify, 0); 4033} 4034 4035/* 4036 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 4037 * or min_free_kbytes changes. 4038 */ 4039static void calculate_totalreserve_pages(void) 4040{ 4041 struct pglist_data *pgdat; 4042 unsigned long reserve_pages = 0; 4043 enum zone_type i, j; 4044 4045 for_each_online_pgdat(pgdat) { 4046 for (i = 0; i < MAX_NR_ZONES; i++) { 4047 struct zone *zone = pgdat->node_zones + i; 4048 unsigned long max = 0; 4049 4050 /* Find valid and maximum lowmem_reserve in the zone */ 4051 for (j = i; j < MAX_NR_ZONES; j++) { 4052 if (zone->lowmem_reserve[j] > max) 4053 max = zone->lowmem_reserve[j]; 4054 } 4055 4056 /* we treat pages_high as reserved pages. */ 4057 max += zone->pages_high; 4058 4059 if (max > zone->present_pages) 4060 max = zone->present_pages; 4061 reserve_pages += max; 4062 } 4063 } 4064 totalreserve_pages = reserve_pages; 4065} 4066 4067/* 4068 * setup_per_zone_lowmem_reserve - called whenever 4069 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 4070 * has a correct pages reserved value, so an adequate number of 4071 * pages are left in the zone after a successful __alloc_pages(). 4072 */ 4073static void setup_per_zone_lowmem_reserve(void) 4074{ 4075 struct pglist_data *pgdat; 4076 enum zone_type j, idx; 4077 4078 for_each_online_pgdat(pgdat) { 4079 for (j = 0; j < MAX_NR_ZONES; j++) { 4080 struct zone *zone = pgdat->node_zones + j; 4081 unsigned long present_pages = zone->present_pages; 4082 4083 zone->lowmem_reserve[j] = 0; 4084 4085 idx = j; 4086 while (idx) { 4087 struct zone *lower_zone; 4088 4089 idx--; 4090 4091 if (sysctl_lowmem_reserve_ratio[idx] < 1) 4092 sysctl_lowmem_reserve_ratio[idx] = 1; 4093 4094 lower_zone = pgdat->node_zones + idx; 4095 lower_zone->lowmem_reserve[j] = present_pages / 4096 sysctl_lowmem_reserve_ratio[idx]; 4097 present_pages += lower_zone->present_pages; 4098 } 4099 } 4100 } 4101 4102 /* update totalreserve_pages */ 4103 calculate_totalreserve_pages(); 4104} 4105 4106/** 4107 * setup_per_zone_pages_min - called when min_free_kbytes changes. 4108 * 4109 * Ensures that the pages_{min,low,high} values for each zone are set correctly 4110 * with respect to min_free_kbytes. 4111 */ 4112void setup_per_zone_pages_min(void) 4113{ 4114 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 4115 unsigned long lowmem_pages = 0; 4116 struct zone *zone; 4117 unsigned long flags; 4118 4119 /* Calculate total number of !ZONE_HIGHMEM pages */ 4120 for_each_zone(zone) { 4121 if (!is_highmem(zone)) 4122 lowmem_pages += zone->present_pages; 4123 } 4124 4125 for_each_zone(zone) { 4126 u64 tmp; 4127 4128 spin_lock_irqsave(&zone->lru_lock, flags); 4129 tmp = (u64)pages_min * zone->present_pages; 4130 do_div(tmp, lowmem_pages); 4131 if (is_highmem(zone)) { 4132 /* 4133 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 4134 * need highmem pages, so cap pages_min to a small 4135 * value here. 4136 * 4137 * The (pages_high-pages_low) and (pages_low-pages_min) 4138 * deltas controls asynch page reclaim, and so should 4139 * not be capped for highmem. 4140 */ 4141 int min_pages; 4142 4143 min_pages = zone->present_pages / 1024; 4144 if (min_pages < SWAP_CLUSTER_MAX) 4145 min_pages = SWAP_CLUSTER_MAX; 4146 if (min_pages > 128) 4147 min_pages = 128; 4148 zone->pages_min = min_pages; 4149 } else { 4150 /* 4151 * If it's a lowmem zone, reserve a number of pages 4152 * proportionate to the zone's size. 4153 */ 4154 zone->pages_min = tmp; 4155 } 4156 4157 zone->pages_low = zone->pages_min + (tmp >> 2); 4158 zone->pages_high = zone->pages_min + (tmp >> 1); 4159 setup_zone_migrate_reserve(zone); 4160 spin_unlock_irqrestore(&zone->lru_lock, flags); 4161 } 4162 4163 /* update totalreserve_pages */ 4164 calculate_totalreserve_pages(); 4165} 4166 4167/* 4168 * Initialise min_free_kbytes. 4169 * 4170 * For small machines we want it small (128k min). For large machines 4171 * we want it large (64MB max). But it is not linear, because network 4172 * bandwidth does not increase linearly with machine size. We use 4173 * 4174 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 4175 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 4176 * 4177 * which yields 4178 * 4179 * 16MB: 512k 4180 * 32MB: 724k 4181 * 64MB: 1024k 4182 * 128MB: 1448k 4183 * 256MB: 2048k 4184 * 512MB: 2896k 4185 * 1024MB: 4096k 4186 * 2048MB: 5792k 4187 * 4096MB: 8192k 4188 * 8192MB: 11584k 4189 * 16384MB: 16384k 4190 */ 4191static int __init init_per_zone_pages_min(void) 4192{ 4193 unsigned long lowmem_kbytes; 4194 4195 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 4196 4197 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 4198 if (min_free_kbytes < 128) 4199 min_free_kbytes = 128; 4200 if (min_free_kbytes > 65536) 4201 min_free_kbytes = 65536; 4202 setup_per_zone_pages_min(); 4203 setup_per_zone_lowmem_reserve(); 4204 return 0; 4205} 4206module_init(init_per_zone_pages_min) 4207 4208/* 4209 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 4210 * that we can call two helper functions whenever min_free_kbytes 4211 * changes. 4212 */ 4213int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 4214 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4215{ 4216 proc_dointvec(table, write, file, buffer, length, ppos); 4217 if (write) 4218 setup_per_zone_pages_min(); 4219 return 0; 4220} 4221 4222#ifdef CONFIG_NUMA 4223int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 4224 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4225{ 4226 struct zone *zone; 4227 int rc; 4228 4229 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4230 if (rc) 4231 return rc; 4232 4233 for_each_zone(zone) 4234 zone->min_unmapped_pages = (zone->present_pages * 4235 sysctl_min_unmapped_ratio) / 100; 4236 return 0; 4237} 4238 4239int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 4240 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4241{ 4242 struct zone *zone; 4243 int rc; 4244 4245 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4246 if (rc) 4247 return rc; 4248 4249 for_each_zone(zone) 4250 zone->min_slab_pages = (zone->present_pages * 4251 sysctl_min_slab_ratio) / 100; 4252 return 0; 4253} 4254#endif 4255 4256/* 4257 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 4258 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 4259 * whenever sysctl_lowmem_reserve_ratio changes. 4260 * 4261 * The reserve ratio obviously has absolutely no relation with the 4262 * pages_min watermarks. The lowmem reserve ratio can only make sense 4263 * if in function of the boot time zone sizes. 4264 */ 4265int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 4266 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4267{ 4268 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4269 setup_per_zone_lowmem_reserve(); 4270 return 0; 4271} 4272 4273/* 4274 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 4275 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 4276 * can have before it gets flushed back to buddy allocator. 4277 */ 4278 4279int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 4280 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4281{ 4282 struct zone *zone; 4283 unsigned int cpu; 4284 int ret; 4285 4286 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4287 if (!write || (ret == -EINVAL)) 4288 return ret; 4289 for_each_zone(zone) { 4290 for_each_online_cpu(cpu) { 4291 unsigned long high; 4292 high = zone->present_pages / percpu_pagelist_fraction; 4293 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 4294 } 4295 } 4296 return 0; 4297} 4298 4299int hashdist = HASHDIST_DEFAULT; 4300 4301#ifdef CONFIG_NUMA 4302static int __init set_hashdist(char *str) 4303{ 4304 if (!str) 4305 return 0; 4306 hashdist = simple_strtoul(str, &str, 0); 4307 return 1; 4308} 4309__setup("hashdist=", set_hashdist); 4310#endif 4311 4312/* 4313 * allocate a large system hash table from bootmem 4314 * - it is assumed that the hash table must contain an exact power-of-2 4315 * quantity of entries 4316 * - limit is the number of hash buckets, not the total allocation size 4317 */ 4318void *__init alloc_large_system_hash(const char *tablename, 4319 unsigned long bucketsize, 4320 unsigned long numentries, 4321 int scale, 4322 int flags, 4323 unsigned int *_hash_shift, 4324 unsigned int *_hash_mask, 4325 unsigned long limit) 4326{ 4327 unsigned long long max = limit; 4328 unsigned long log2qty, size; 4329 void *table = NULL; 4330 4331 /* allow the kernel cmdline to have a say */ 4332 if (!numentries) { 4333 /* round applicable memory size up to nearest megabyte */ 4334 numentries = nr_kernel_pages; 4335 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 4336 numentries >>= 20 - PAGE_SHIFT; 4337 numentries <<= 20 - PAGE_SHIFT; 4338 4339 /* limit to 1 bucket per 2^scale bytes of low memory */ 4340 if (scale > PAGE_SHIFT) 4341 numentries >>= (scale - PAGE_SHIFT); 4342 else 4343 numentries <<= (PAGE_SHIFT - scale); 4344 4345 /* Make sure we've got at least a 0-order allocation.. */ 4346 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 4347 numentries = PAGE_SIZE / bucketsize; 4348 } 4349 numentries = roundup_pow_of_two(numentries); 4350 4351 /* limit allocation size to 1/16 total memory by default */ 4352 if (max == 0) { 4353 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 4354 do_div(max, bucketsize); 4355 } 4356 4357 if (numentries > max) 4358 numentries = max; 4359 4360 log2qty = ilog2(numentries); 4361 4362 do { 4363 size = bucketsize << log2qty; 4364 if (flags & HASH_EARLY) 4365 table = alloc_bootmem(size); 4366 else if (hashdist) 4367 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 4368 else { 4369 unsigned long order = get_order(size); 4370 table = (void*) __get_free_pages(GFP_ATOMIC, order); 4371 /* 4372 * If bucketsize is not a power-of-two, we may free 4373 * some pages at the end of hash table. 4374 */ 4375 if (table) { 4376 unsigned long alloc_end = (unsigned long)table + 4377 (PAGE_SIZE << order); 4378 unsigned long used = (unsigned long)table + 4379 PAGE_ALIGN(size); 4380 split_page(virt_to_page(table), order); 4381 while (used < alloc_end) { 4382 free_page(used); 4383 used += PAGE_SIZE; 4384 } 4385 } 4386 } 4387 } while (!table && size > PAGE_SIZE && --log2qty); 4388 4389 if (!table) 4390 panic("Failed to allocate %s hash table\n", tablename); 4391 4392 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", 4393 tablename, 4394 (1U << log2qty), 4395 ilog2(size) - PAGE_SHIFT, 4396 size); 4397 4398 if (_hash_shift) 4399 *_hash_shift = log2qty; 4400 if (_hash_mask) 4401 *_hash_mask = (1 << log2qty) - 1; 4402 4403 return table; 4404} 4405 4406#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 4407struct page *pfn_to_page(unsigned long pfn) 4408{ 4409 return __pfn_to_page(pfn); 4410} 4411unsigned long page_to_pfn(struct page *page) 4412{ 4413 return __page_to_pfn(page); 4414} 4415EXPORT_SYMBOL(pfn_to_page); 4416EXPORT_SYMBOL(page_to_pfn); 4417#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 4418 4419/* Return a pointer to the bitmap storing bits affecting a block of pages */ 4420static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 4421 unsigned long pfn) 4422{ 4423#ifdef CONFIG_SPARSEMEM 4424 return __pfn_to_section(pfn)->pageblock_flags; 4425#else 4426 return zone->pageblock_flags; 4427#endif /* CONFIG_SPARSEMEM */ 4428} 4429 4430static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 4431{ 4432#ifdef CONFIG_SPARSEMEM 4433 pfn &= (PAGES_PER_SECTION-1); 4434 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4435#else 4436 pfn = pfn - zone->zone_start_pfn; 4437 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4438#endif /* CONFIG_SPARSEMEM */ 4439} 4440 4441/** 4442 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 4443 * @page: The page within the block of interest 4444 * @start_bitidx: The first bit of interest to retrieve 4445 * @end_bitidx: The last bit of interest 4446 * returns pageblock_bits flags 4447 */ 4448unsigned long get_pageblock_flags_group(struct page *page, 4449 int start_bitidx, int end_bitidx) 4450{ 4451 struct zone *zone; 4452 unsigned long *bitmap; 4453 unsigned long pfn, bitidx; 4454 unsigned long flags = 0; 4455 unsigned long value = 1; 4456 4457 zone = page_zone(page); 4458 pfn = page_to_pfn(page); 4459 bitmap = get_pageblock_bitmap(zone, pfn); 4460 bitidx = pfn_to_bitidx(zone, pfn); 4461 4462 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4463 if (test_bit(bitidx + start_bitidx, bitmap)) 4464 flags |= value; 4465 4466 return flags; 4467} 4468 4469/** 4470 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages 4471 * @page: The page within the block of interest 4472 * @start_bitidx: The first bit of interest 4473 * @end_bitidx: The last bit of interest 4474 * @flags: The flags to set 4475 */ 4476void set_pageblock_flags_group(struct page *page, unsigned long flags, 4477 int start_bitidx, int end_bitidx) 4478{ 4479 struct zone *zone; 4480 unsigned long *bitmap; 4481 unsigned long pfn, bitidx; 4482 unsigned long value = 1; 4483 4484 zone = page_zone(page); 4485 pfn = page_to_pfn(page); 4486 bitmap = get_pageblock_bitmap(zone, pfn); 4487 bitidx = pfn_to_bitidx(zone, pfn); 4488 VM_BUG_ON(pfn < zone->zone_start_pfn); 4489 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); 4490 4491 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4492 if (flags & value) 4493 __set_bit(bitidx + start_bitidx, bitmap); 4494 else 4495 __clear_bit(bitidx + start_bitidx, bitmap); 4496} 4497 4498/* 4499 * This is designed as sub function...plz see page_isolation.c also. 4500 * set/clear page block's type to be ISOLATE. 4501 * page allocater never alloc memory from ISOLATE block. 4502 */ 4503 4504int set_migratetype_isolate(struct page *page) 4505{ 4506 struct zone *zone; 4507 unsigned long flags; 4508 int ret = -EBUSY; 4509 4510 zone = page_zone(page); 4511 spin_lock_irqsave(&zone->lock, flags); 4512 /* 4513 * In future, more migrate types will be able to be isolation target. 4514 */ 4515 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 4516 goto out; 4517 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 4518 move_freepages_block(zone, page, MIGRATE_ISOLATE); 4519 ret = 0; 4520out: 4521 spin_unlock_irqrestore(&zone->lock, flags); 4522 if (!ret) 4523 drain_all_pages(); 4524 return ret; 4525} 4526 4527void unset_migratetype_isolate(struct page *page) 4528{ 4529 struct zone *zone; 4530 unsigned long flags; 4531 zone = page_zone(page); 4532 spin_lock_irqsave(&zone->lock, flags); 4533 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 4534 goto out; 4535 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4536 move_freepages_block(zone, page, MIGRATE_MOVABLE); 4537out: 4538 spin_unlock_irqrestore(&zone->lock, flags); 4539} 4540 4541#ifdef CONFIG_MEMORY_HOTREMOVE 4542/* 4543 * All pages in the range must be isolated before calling this. 4544 */ 4545void 4546__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 4547{ 4548 struct page *page; 4549 struct zone *zone; 4550 int order, i; 4551 unsigned long pfn; 4552 unsigned long flags; 4553 /* find the first valid pfn */ 4554 for (pfn = start_pfn; pfn < end_pfn; pfn++) 4555 if (pfn_valid(pfn)) 4556 break; 4557 if (pfn == end_pfn) 4558 return; 4559 zone = page_zone(pfn_to_page(pfn)); 4560 spin_lock_irqsave(&zone->lock, flags); 4561 pfn = start_pfn; 4562 while (pfn < end_pfn) { 4563 if (!pfn_valid(pfn)) { 4564 pfn++; 4565 continue; 4566 } 4567 page = pfn_to_page(pfn); 4568 BUG_ON(page_count(page)); 4569 BUG_ON(!PageBuddy(page)); 4570 order = page_order(page); 4571#ifdef CONFIG_DEBUG_VM 4572 printk(KERN_INFO "remove from free list %lx %d %lx\n", 4573 pfn, 1 << order, end_pfn); 4574#endif 4575 list_del(&page->lru); 4576 rmv_page_order(page); 4577 zone->free_area[order].nr_free--; 4578 __mod_zone_page_state(zone, NR_FREE_PAGES, 4579 - (1UL << order)); 4580 for (i = 0; i < (1 << order); i++) 4581 SetPageReserved((page+i)); 4582 pfn += (1 << order); 4583 } 4584 spin_unlock_irqrestore(&zone->lock, flags); 4585} 4586#endif 4587