page_alloc.c revision 82553a937f12352c26fe457510ebab3f512cd3fa
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/jiffies.h> 23#include <linux/bootmem.h> 24#include <linux/compiler.h> 25#include <linux/kernel.h> 26#include <linux/module.h> 27#include <linux/suspend.h> 28#include <linux/pagevec.h> 29#include <linux/blkdev.h> 30#include <linux/slab.h> 31#include <linux/oom.h> 32#include <linux/notifier.h> 33#include <linux/topology.h> 34#include <linux/sysctl.h> 35#include <linux/cpu.h> 36#include <linux/cpuset.h> 37#include <linux/memory_hotplug.h> 38#include <linux/nodemask.h> 39#include <linux/vmalloc.h> 40#include <linux/mempolicy.h> 41#include <linux/stop_machine.h> 42#include <linux/sort.h> 43#include <linux/pfn.h> 44#include <linux/backing-dev.h> 45#include <linux/fault-inject.h> 46#include <linux/page-isolation.h> 47#include <linux/page_cgroup.h> 48#include <linux/debugobjects.h> 49#include <linux/kmemleak.h> 50 51#include <asm/tlbflush.h> 52#include <asm/div64.h> 53#include "internal.h" 54 55/* 56 * Array of node states. 57 */ 58nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 59 [N_POSSIBLE] = NODE_MASK_ALL, 60 [N_ONLINE] = { { [0] = 1UL } }, 61#ifndef CONFIG_NUMA 62 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 63#ifdef CONFIG_HIGHMEM 64 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 65#endif 66 [N_CPU] = { { [0] = 1UL } }, 67#endif /* NUMA */ 68}; 69EXPORT_SYMBOL(node_states); 70 71unsigned long totalram_pages __read_mostly; 72unsigned long totalreserve_pages __read_mostly; 73unsigned long highest_memmap_pfn __read_mostly; 74int percpu_pagelist_fraction; 75 76#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 77int pageblock_order __read_mostly; 78#endif 79 80static void __free_pages_ok(struct page *page, unsigned int order); 81 82/* 83 * results with 256, 32 in the lowmem_reserve sysctl: 84 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 85 * 1G machine -> (16M dma, 784M normal, 224M high) 86 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 87 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 88 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 89 * 90 * TBD: should special case ZONE_DMA32 machines here - in those we normally 91 * don't need any ZONE_NORMAL reservation 92 */ 93int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 94#ifdef CONFIG_ZONE_DMA 95 256, 96#endif 97#ifdef CONFIG_ZONE_DMA32 98 256, 99#endif 100#ifdef CONFIG_HIGHMEM 101 32, 102#endif 103 32, 104}; 105 106EXPORT_SYMBOL(totalram_pages); 107 108static char * const zone_names[MAX_NR_ZONES] = { 109#ifdef CONFIG_ZONE_DMA 110 "DMA", 111#endif 112#ifdef CONFIG_ZONE_DMA32 113 "DMA32", 114#endif 115 "Normal", 116#ifdef CONFIG_HIGHMEM 117 "HighMem", 118#endif 119 "Movable", 120}; 121 122int min_free_kbytes = 1024; 123 124unsigned long __meminitdata nr_kernel_pages; 125unsigned long __meminitdata nr_all_pages; 126static unsigned long __meminitdata dma_reserve; 127 128#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 129 /* 130 * MAX_ACTIVE_REGIONS determines the maximum number of distinct 131 * ranges of memory (RAM) that may be registered with add_active_range(). 132 * Ranges passed to add_active_range() will be merged if possible 133 * so the number of times add_active_range() can be called is 134 * related to the number of nodes and the number of holes 135 */ 136 #ifdef CONFIG_MAX_ACTIVE_REGIONS 137 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 138 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 139 #else 140 #if MAX_NUMNODES >= 32 141 /* If there can be many nodes, allow up to 50 holes per node */ 142 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 143 #else 144 /* By default, allow up to 256 distinct regions */ 145 #define MAX_ACTIVE_REGIONS 256 146 #endif 147 #endif 148 149 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; 150 static int __meminitdata nr_nodemap_entries; 151 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 152 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 153 static unsigned long __initdata required_kernelcore; 154 static unsigned long __initdata required_movablecore; 155 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 156 157 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 158 int movable_zone; 159 EXPORT_SYMBOL(movable_zone); 160#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 161 162#if MAX_NUMNODES > 1 163int nr_node_ids __read_mostly = MAX_NUMNODES; 164int nr_online_nodes __read_mostly = 1; 165EXPORT_SYMBOL(nr_node_ids); 166EXPORT_SYMBOL(nr_online_nodes); 167#endif 168 169int page_group_by_mobility_disabled __read_mostly; 170 171static void set_pageblock_migratetype(struct page *page, int migratetype) 172{ 173 174 if (unlikely(page_group_by_mobility_disabled)) 175 migratetype = MIGRATE_UNMOVABLE; 176 177 set_pageblock_flags_group(page, (unsigned long)migratetype, 178 PB_migrate, PB_migrate_end); 179} 180 181bool oom_killer_disabled __read_mostly; 182 183#ifdef CONFIG_DEBUG_VM 184static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 185{ 186 int ret = 0; 187 unsigned seq; 188 unsigned long pfn = page_to_pfn(page); 189 190 do { 191 seq = zone_span_seqbegin(zone); 192 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 193 ret = 1; 194 else if (pfn < zone->zone_start_pfn) 195 ret = 1; 196 } while (zone_span_seqretry(zone, seq)); 197 198 return ret; 199} 200 201static int page_is_consistent(struct zone *zone, struct page *page) 202{ 203 if (!pfn_valid_within(page_to_pfn(page))) 204 return 0; 205 if (zone != page_zone(page)) 206 return 0; 207 208 return 1; 209} 210/* 211 * Temporary debugging check for pages not lying within a given zone. 212 */ 213static int bad_range(struct zone *zone, struct page *page) 214{ 215 if (page_outside_zone_boundaries(zone, page)) 216 return 1; 217 if (!page_is_consistent(zone, page)) 218 return 1; 219 220 return 0; 221} 222#else 223static inline int bad_range(struct zone *zone, struct page *page) 224{ 225 return 0; 226} 227#endif 228 229static void bad_page(struct page *page) 230{ 231 static unsigned long resume; 232 static unsigned long nr_shown; 233 static unsigned long nr_unshown; 234 235 /* 236 * Allow a burst of 60 reports, then keep quiet for that minute; 237 * or allow a steady drip of one report per second. 238 */ 239 if (nr_shown == 60) { 240 if (time_before(jiffies, resume)) { 241 nr_unshown++; 242 goto out; 243 } 244 if (nr_unshown) { 245 printk(KERN_ALERT 246 "BUG: Bad page state: %lu messages suppressed\n", 247 nr_unshown); 248 nr_unshown = 0; 249 } 250 nr_shown = 0; 251 } 252 if (nr_shown++ == 0) 253 resume = jiffies + 60 * HZ; 254 255 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 256 current->comm, page_to_pfn(page)); 257 printk(KERN_ALERT 258 "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", 259 page, (void *)page->flags, page_count(page), 260 page_mapcount(page), page->mapping, page->index); 261 262 dump_stack(); 263out: 264 /* Leave bad fields for debug, except PageBuddy could make trouble */ 265 __ClearPageBuddy(page); 266 add_taint(TAINT_BAD_PAGE); 267} 268 269/* 270 * Higher-order pages are called "compound pages". They are structured thusly: 271 * 272 * The first PAGE_SIZE page is called the "head page". 273 * 274 * The remaining PAGE_SIZE pages are called "tail pages". 275 * 276 * All pages have PG_compound set. All pages have their ->private pointing at 277 * the head page (even the head page has this). 278 * 279 * The first tail page's ->lru.next holds the address of the compound page's 280 * put_page() function. Its ->lru.prev holds the order of allocation. 281 * This usage means that zero-order pages may not be compound. 282 */ 283 284static void free_compound_page(struct page *page) 285{ 286 __free_pages_ok(page, compound_order(page)); 287} 288 289void prep_compound_page(struct page *page, unsigned long order) 290{ 291 int i; 292 int nr_pages = 1 << order; 293 294 set_compound_page_dtor(page, free_compound_page); 295 set_compound_order(page, order); 296 __SetPageHead(page); 297 for (i = 1; i < nr_pages; i++) { 298 struct page *p = page + i; 299 300 __SetPageTail(p); 301 p->first_page = page; 302 } 303} 304 305static int destroy_compound_page(struct page *page, unsigned long order) 306{ 307 int i; 308 int nr_pages = 1 << order; 309 int bad = 0; 310 311 if (unlikely(compound_order(page) != order) || 312 unlikely(!PageHead(page))) { 313 bad_page(page); 314 bad++; 315 } 316 317 __ClearPageHead(page); 318 319 for (i = 1; i < nr_pages; i++) { 320 struct page *p = page + i; 321 322 if (unlikely(!PageTail(p) || (p->first_page != page))) { 323 bad_page(page); 324 bad++; 325 } 326 __ClearPageTail(p); 327 } 328 329 return bad; 330} 331 332static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 333{ 334 int i; 335 336 /* 337 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 338 * and __GFP_HIGHMEM from hard or soft interrupt context. 339 */ 340 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 341 for (i = 0; i < (1 << order); i++) 342 clear_highpage(page + i); 343} 344 345static inline void set_page_order(struct page *page, int order) 346{ 347 set_page_private(page, order); 348 __SetPageBuddy(page); 349} 350 351static inline void rmv_page_order(struct page *page) 352{ 353 __ClearPageBuddy(page); 354 set_page_private(page, 0); 355} 356 357/* 358 * Locate the struct page for both the matching buddy in our 359 * pair (buddy1) and the combined O(n+1) page they form (page). 360 * 361 * 1) Any buddy B1 will have an order O twin B2 which satisfies 362 * the following equation: 363 * B2 = B1 ^ (1 << O) 364 * For example, if the starting buddy (buddy2) is #8 its order 365 * 1 buddy is #10: 366 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 367 * 368 * 2) Any buddy B will have an order O+1 parent P which 369 * satisfies the following equation: 370 * P = B & ~(1 << O) 371 * 372 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 373 */ 374static inline struct page * 375__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 376{ 377 unsigned long buddy_idx = page_idx ^ (1 << order); 378 379 return page + (buddy_idx - page_idx); 380} 381 382static inline unsigned long 383__find_combined_index(unsigned long page_idx, unsigned int order) 384{ 385 return (page_idx & ~(1 << order)); 386} 387 388/* 389 * This function checks whether a page is free && is the buddy 390 * we can do coalesce a page and its buddy if 391 * (a) the buddy is not in a hole && 392 * (b) the buddy is in the buddy system && 393 * (c) a page and its buddy have the same order && 394 * (d) a page and its buddy are in the same zone. 395 * 396 * For recording whether a page is in the buddy system, we use PG_buddy. 397 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 398 * 399 * For recording page's order, we use page_private(page). 400 */ 401static inline int page_is_buddy(struct page *page, struct page *buddy, 402 int order) 403{ 404 if (!pfn_valid_within(page_to_pfn(buddy))) 405 return 0; 406 407 if (page_zone_id(page) != page_zone_id(buddy)) 408 return 0; 409 410 if (PageBuddy(buddy) && page_order(buddy) == order) { 411 VM_BUG_ON(page_count(buddy) != 0); 412 return 1; 413 } 414 return 0; 415} 416 417/* 418 * Freeing function for a buddy system allocator. 419 * 420 * The concept of a buddy system is to maintain direct-mapped table 421 * (containing bit values) for memory blocks of various "orders". 422 * The bottom level table contains the map for the smallest allocatable 423 * units of memory (here, pages), and each level above it describes 424 * pairs of units from the levels below, hence, "buddies". 425 * At a high level, all that happens here is marking the table entry 426 * at the bottom level available, and propagating the changes upward 427 * as necessary, plus some accounting needed to play nicely with other 428 * parts of the VM system. 429 * At each level, we keep a list of pages, which are heads of continuous 430 * free pages of length of (1 << order) and marked with PG_buddy. Page's 431 * order is recorded in page_private(page) field. 432 * So when we are allocating or freeing one, we can derive the state of the 433 * other. That is, if we allocate a small block, and both were 434 * free, the remainder of the region must be split into blocks. 435 * If a block is freed, and its buddy is also free, then this 436 * triggers coalescing into a block of larger size. 437 * 438 * -- wli 439 */ 440 441static inline void __free_one_page(struct page *page, 442 struct zone *zone, unsigned int order, 443 int migratetype) 444{ 445 unsigned long page_idx; 446 447 if (unlikely(PageCompound(page))) 448 if (unlikely(destroy_compound_page(page, order))) 449 return; 450 451 VM_BUG_ON(migratetype == -1); 452 453 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 454 455 VM_BUG_ON(page_idx & ((1 << order) - 1)); 456 VM_BUG_ON(bad_range(zone, page)); 457 458 while (order < MAX_ORDER-1) { 459 unsigned long combined_idx; 460 struct page *buddy; 461 462 buddy = __page_find_buddy(page, page_idx, order); 463 if (!page_is_buddy(page, buddy, order)) 464 break; 465 466 /* Our buddy is free, merge with it and move up one order. */ 467 list_del(&buddy->lru); 468 zone->free_area[order].nr_free--; 469 rmv_page_order(buddy); 470 combined_idx = __find_combined_index(page_idx, order); 471 page = page + (combined_idx - page_idx); 472 page_idx = combined_idx; 473 order++; 474 } 475 set_page_order(page, order); 476 list_add(&page->lru, 477 &zone->free_area[order].free_list[migratetype]); 478 zone->free_area[order].nr_free++; 479} 480 481#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 482/* 483 * free_page_mlock() -- clean up attempts to free and mlocked() page. 484 * Page should not be on lru, so no need to fix that up. 485 * free_pages_check() will verify... 486 */ 487static inline void free_page_mlock(struct page *page) 488{ 489 __ClearPageMlocked(page); 490 __dec_zone_page_state(page, NR_MLOCK); 491 __count_vm_event(UNEVICTABLE_MLOCKFREED); 492} 493#else 494static void free_page_mlock(struct page *page) { } 495#endif 496 497static inline int free_pages_check(struct page *page) 498{ 499 if (unlikely(page_mapcount(page) | 500 (page->mapping != NULL) | 501 (atomic_read(&page->_count) != 0) | 502 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) { 503 bad_page(page); 504 return 1; 505 } 506 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 507 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 508 return 0; 509} 510 511/* 512 * Frees a list of pages. 513 * Assumes all pages on list are in same zone, and of same order. 514 * count is the number of pages to free. 515 * 516 * If the zone was previously in an "all pages pinned" state then look to 517 * see if this freeing clears that state. 518 * 519 * And clear the zone's pages_scanned counter, to hold off the "all pages are 520 * pinned" detection logic. 521 */ 522static void free_pages_bulk(struct zone *zone, int count, 523 struct list_head *list, int order) 524{ 525 spin_lock(&zone->lock); 526 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 527 zone->pages_scanned = 0; 528 529 __mod_zone_page_state(zone, NR_FREE_PAGES, count << order); 530 while (count--) { 531 struct page *page; 532 533 VM_BUG_ON(list_empty(list)); 534 page = list_entry(list->prev, struct page, lru); 535 /* have to delete it as __free_one_page list manipulates */ 536 list_del(&page->lru); 537 __free_one_page(page, zone, order, page_private(page)); 538 } 539 spin_unlock(&zone->lock); 540} 541 542static void free_one_page(struct zone *zone, struct page *page, int order, 543 int migratetype) 544{ 545 spin_lock(&zone->lock); 546 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 547 zone->pages_scanned = 0; 548 549 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); 550 __free_one_page(page, zone, order, migratetype); 551 spin_unlock(&zone->lock); 552} 553 554static void __free_pages_ok(struct page *page, unsigned int order) 555{ 556 unsigned long flags; 557 int i; 558 int bad = 0; 559 int clearMlocked = PageMlocked(page); 560 561 for (i = 0 ; i < (1 << order) ; ++i) 562 bad += free_pages_check(page + i); 563 if (bad) 564 return; 565 566 if (!PageHighMem(page)) { 567 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 568 debug_check_no_obj_freed(page_address(page), 569 PAGE_SIZE << order); 570 } 571 arch_free_page(page, order); 572 kernel_map_pages(page, 1 << order, 0); 573 574 local_irq_save(flags); 575 if (unlikely(clearMlocked)) 576 free_page_mlock(page); 577 __count_vm_events(PGFREE, 1 << order); 578 free_one_page(page_zone(page), page, order, 579 get_pageblock_migratetype(page)); 580 local_irq_restore(flags); 581} 582 583/* 584 * permit the bootmem allocator to evade page validation on high-order frees 585 */ 586void __meminit __free_pages_bootmem(struct page *page, unsigned int order) 587{ 588 if (order == 0) { 589 __ClearPageReserved(page); 590 set_page_count(page, 0); 591 set_page_refcounted(page); 592 __free_page(page); 593 } else { 594 int loop; 595 596 prefetchw(page); 597 for (loop = 0; loop < BITS_PER_LONG; loop++) { 598 struct page *p = &page[loop]; 599 600 if (loop + 1 < BITS_PER_LONG) 601 prefetchw(p + 1); 602 __ClearPageReserved(p); 603 set_page_count(p, 0); 604 } 605 606 set_page_refcounted(page); 607 __free_pages(page, order); 608 } 609} 610 611 612/* 613 * The order of subdivision here is critical for the IO subsystem. 614 * Please do not alter this order without good reasons and regression 615 * testing. Specifically, as large blocks of memory are subdivided, 616 * the order in which smaller blocks are delivered depends on the order 617 * they're subdivided in this function. This is the primary factor 618 * influencing the order in which pages are delivered to the IO 619 * subsystem according to empirical testing, and this is also justified 620 * by considering the behavior of a buddy system containing a single 621 * large block of memory acted on by a series of small allocations. 622 * This behavior is a critical factor in sglist merging's success. 623 * 624 * -- wli 625 */ 626static inline void expand(struct zone *zone, struct page *page, 627 int low, int high, struct free_area *area, 628 int migratetype) 629{ 630 unsigned long size = 1 << high; 631 632 while (high > low) { 633 area--; 634 high--; 635 size >>= 1; 636 VM_BUG_ON(bad_range(zone, &page[size])); 637 list_add(&page[size].lru, &area->free_list[migratetype]); 638 area->nr_free++; 639 set_page_order(&page[size], high); 640 } 641} 642 643/* 644 * This page is about to be returned from the page allocator 645 */ 646static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 647{ 648 if (unlikely(page_mapcount(page) | 649 (page->mapping != NULL) | 650 (atomic_read(&page->_count) != 0) | 651 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) { 652 bad_page(page); 653 return 1; 654 } 655 656 set_page_private(page, 0); 657 set_page_refcounted(page); 658 659 arch_alloc_page(page, order); 660 kernel_map_pages(page, 1 << order, 1); 661 662 if (gfp_flags & __GFP_ZERO) 663 prep_zero_page(page, order, gfp_flags); 664 665 if (order && (gfp_flags & __GFP_COMP)) 666 prep_compound_page(page, order); 667 668 return 0; 669} 670 671/* 672 * Go through the free lists for the given migratetype and remove 673 * the smallest available page from the freelists 674 */ 675static inline 676struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 677 int migratetype) 678{ 679 unsigned int current_order; 680 struct free_area * area; 681 struct page *page; 682 683 /* Find a page of the appropriate size in the preferred list */ 684 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 685 area = &(zone->free_area[current_order]); 686 if (list_empty(&area->free_list[migratetype])) 687 continue; 688 689 page = list_entry(area->free_list[migratetype].next, 690 struct page, lru); 691 list_del(&page->lru); 692 rmv_page_order(page); 693 area->nr_free--; 694 expand(zone, page, order, current_order, area, migratetype); 695 return page; 696 } 697 698 return NULL; 699} 700 701 702/* 703 * This array describes the order lists are fallen back to when 704 * the free lists for the desirable migrate type are depleted 705 */ 706static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 707 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 708 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 709 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 710 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ 711}; 712 713/* 714 * Move the free pages in a range to the free lists of the requested type. 715 * Note that start_page and end_pages are not aligned on a pageblock 716 * boundary. If alignment is required, use move_freepages_block() 717 */ 718static int move_freepages(struct zone *zone, 719 struct page *start_page, struct page *end_page, 720 int migratetype) 721{ 722 struct page *page; 723 unsigned long order; 724 int pages_moved = 0; 725 726#ifndef CONFIG_HOLES_IN_ZONE 727 /* 728 * page_zone is not safe to call in this context when 729 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 730 * anyway as we check zone boundaries in move_freepages_block(). 731 * Remove at a later date when no bug reports exist related to 732 * grouping pages by mobility 733 */ 734 BUG_ON(page_zone(start_page) != page_zone(end_page)); 735#endif 736 737 for (page = start_page; page <= end_page;) { 738 /* Make sure we are not inadvertently changing nodes */ 739 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); 740 741 if (!pfn_valid_within(page_to_pfn(page))) { 742 page++; 743 continue; 744 } 745 746 if (!PageBuddy(page)) { 747 page++; 748 continue; 749 } 750 751 order = page_order(page); 752 list_del(&page->lru); 753 list_add(&page->lru, 754 &zone->free_area[order].free_list[migratetype]); 755 page += 1 << order; 756 pages_moved += 1 << order; 757 } 758 759 return pages_moved; 760} 761 762static int move_freepages_block(struct zone *zone, struct page *page, 763 int migratetype) 764{ 765 unsigned long start_pfn, end_pfn; 766 struct page *start_page, *end_page; 767 768 start_pfn = page_to_pfn(page); 769 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 770 start_page = pfn_to_page(start_pfn); 771 end_page = start_page + pageblock_nr_pages - 1; 772 end_pfn = start_pfn + pageblock_nr_pages - 1; 773 774 /* Do not cross zone boundaries */ 775 if (start_pfn < zone->zone_start_pfn) 776 start_page = page; 777 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) 778 return 0; 779 780 return move_freepages(zone, start_page, end_page, migratetype); 781} 782 783/* Remove an element from the buddy allocator from the fallback list */ 784static inline struct page * 785__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) 786{ 787 struct free_area * area; 788 int current_order; 789 struct page *page; 790 int migratetype, i; 791 792 /* Find the largest possible block of pages in the other list */ 793 for (current_order = MAX_ORDER-1; current_order >= order; 794 --current_order) { 795 for (i = 0; i < MIGRATE_TYPES - 1; i++) { 796 migratetype = fallbacks[start_migratetype][i]; 797 798 /* MIGRATE_RESERVE handled later if necessary */ 799 if (migratetype == MIGRATE_RESERVE) 800 continue; 801 802 area = &(zone->free_area[current_order]); 803 if (list_empty(&area->free_list[migratetype])) 804 continue; 805 806 page = list_entry(area->free_list[migratetype].next, 807 struct page, lru); 808 area->nr_free--; 809 810 /* 811 * If breaking a large block of pages, move all free 812 * pages to the preferred allocation list. If falling 813 * back for a reclaimable kernel allocation, be more 814 * agressive about taking ownership of free pages 815 */ 816 if (unlikely(current_order >= (pageblock_order >> 1)) || 817 start_migratetype == MIGRATE_RECLAIMABLE) { 818 unsigned long pages; 819 pages = move_freepages_block(zone, page, 820 start_migratetype); 821 822 /* Claim the whole block if over half of it is free */ 823 if (pages >= (1 << (pageblock_order-1))) 824 set_pageblock_migratetype(page, 825 start_migratetype); 826 827 migratetype = start_migratetype; 828 } 829 830 /* Remove the page from the freelists */ 831 list_del(&page->lru); 832 rmv_page_order(page); 833 834 if (current_order == pageblock_order) 835 set_pageblock_migratetype(page, 836 start_migratetype); 837 838 expand(zone, page, order, current_order, area, migratetype); 839 return page; 840 } 841 } 842 843 return NULL; 844} 845 846/* 847 * Do the hard work of removing an element from the buddy allocator. 848 * Call me with the zone->lock already held. 849 */ 850static struct page *__rmqueue(struct zone *zone, unsigned int order, 851 int migratetype) 852{ 853 struct page *page; 854 855retry_reserve: 856 page = __rmqueue_smallest(zone, order, migratetype); 857 858 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { 859 page = __rmqueue_fallback(zone, order, migratetype); 860 861 /* 862 * Use MIGRATE_RESERVE rather than fail an allocation. goto 863 * is used because __rmqueue_smallest is an inline function 864 * and we want just one call site 865 */ 866 if (!page) { 867 migratetype = MIGRATE_RESERVE; 868 goto retry_reserve; 869 } 870 } 871 872 return page; 873} 874 875/* 876 * Obtain a specified number of elements from the buddy allocator, all under 877 * a single hold of the lock, for efficiency. Add them to the supplied list. 878 * Returns the number of new pages which were placed at *list. 879 */ 880static int rmqueue_bulk(struct zone *zone, unsigned int order, 881 unsigned long count, struct list_head *list, 882 int migratetype) 883{ 884 int i; 885 886 spin_lock(&zone->lock); 887 for (i = 0; i < count; ++i) { 888 struct page *page = __rmqueue(zone, order, migratetype); 889 if (unlikely(page == NULL)) 890 break; 891 892 /* 893 * Split buddy pages returned by expand() are received here 894 * in physical page order. The page is added to the callers and 895 * list and the list head then moves forward. From the callers 896 * perspective, the linked list is ordered by page number in 897 * some conditions. This is useful for IO devices that can 898 * merge IO requests if the physical pages are ordered 899 * properly. 900 */ 901 list_add(&page->lru, list); 902 set_page_private(page, migratetype); 903 list = &page->lru; 904 } 905 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 906 spin_unlock(&zone->lock); 907 return i; 908} 909 910#ifdef CONFIG_NUMA 911/* 912 * Called from the vmstat counter updater to drain pagesets of this 913 * currently executing processor on remote nodes after they have 914 * expired. 915 * 916 * Note that this function must be called with the thread pinned to 917 * a single processor. 918 */ 919void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 920{ 921 unsigned long flags; 922 int to_drain; 923 924 local_irq_save(flags); 925 if (pcp->count >= pcp->batch) 926 to_drain = pcp->batch; 927 else 928 to_drain = pcp->count; 929 free_pages_bulk(zone, to_drain, &pcp->list, 0); 930 pcp->count -= to_drain; 931 local_irq_restore(flags); 932} 933#endif 934 935/* 936 * Drain pages of the indicated processor. 937 * 938 * The processor must either be the current processor and the 939 * thread pinned to the current processor or a processor that 940 * is not online. 941 */ 942static void drain_pages(unsigned int cpu) 943{ 944 unsigned long flags; 945 struct zone *zone; 946 947 for_each_populated_zone(zone) { 948 struct per_cpu_pageset *pset; 949 struct per_cpu_pages *pcp; 950 951 pset = zone_pcp(zone, cpu); 952 953 pcp = &pset->pcp; 954 local_irq_save(flags); 955 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 956 pcp->count = 0; 957 local_irq_restore(flags); 958 } 959} 960 961/* 962 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 963 */ 964void drain_local_pages(void *arg) 965{ 966 drain_pages(smp_processor_id()); 967} 968 969/* 970 * Spill all the per-cpu pages from all CPUs back into the buddy allocator 971 */ 972void drain_all_pages(void) 973{ 974 on_each_cpu(drain_local_pages, NULL, 1); 975} 976 977#ifdef CONFIG_HIBERNATION 978 979void mark_free_pages(struct zone *zone) 980{ 981 unsigned long pfn, max_zone_pfn; 982 unsigned long flags; 983 int order, t; 984 struct list_head *curr; 985 986 if (!zone->spanned_pages) 987 return; 988 989 spin_lock_irqsave(&zone->lock, flags); 990 991 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 992 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 993 if (pfn_valid(pfn)) { 994 struct page *page = pfn_to_page(pfn); 995 996 if (!swsusp_page_is_forbidden(page)) 997 swsusp_unset_page_free(page); 998 } 999 1000 for_each_migratetype_order(order, t) { 1001 list_for_each(curr, &zone->free_area[order].free_list[t]) { 1002 unsigned long i; 1003 1004 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 1005 for (i = 0; i < (1UL << order); i++) 1006 swsusp_set_page_free(pfn_to_page(pfn + i)); 1007 } 1008 } 1009 spin_unlock_irqrestore(&zone->lock, flags); 1010} 1011#endif /* CONFIG_PM */ 1012 1013/* 1014 * Free a 0-order page 1015 */ 1016static void free_hot_cold_page(struct page *page, int cold) 1017{ 1018 struct zone *zone = page_zone(page); 1019 struct per_cpu_pages *pcp; 1020 unsigned long flags; 1021 int clearMlocked = PageMlocked(page); 1022 1023 if (PageAnon(page)) 1024 page->mapping = NULL; 1025 if (free_pages_check(page)) 1026 return; 1027 1028 if (!PageHighMem(page)) { 1029 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 1030 debug_check_no_obj_freed(page_address(page), PAGE_SIZE); 1031 } 1032 arch_free_page(page, 0); 1033 kernel_map_pages(page, 1, 0); 1034 1035 pcp = &zone_pcp(zone, get_cpu())->pcp; 1036 set_page_private(page, get_pageblock_migratetype(page)); 1037 local_irq_save(flags); 1038 if (unlikely(clearMlocked)) 1039 free_page_mlock(page); 1040 __count_vm_event(PGFREE); 1041 1042 if (cold) 1043 list_add_tail(&page->lru, &pcp->list); 1044 else 1045 list_add(&page->lru, &pcp->list); 1046 pcp->count++; 1047 if (pcp->count >= pcp->high) { 1048 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 1049 pcp->count -= pcp->batch; 1050 } 1051 local_irq_restore(flags); 1052 put_cpu(); 1053} 1054 1055void free_hot_page(struct page *page) 1056{ 1057 free_hot_cold_page(page, 0); 1058} 1059 1060void free_cold_page(struct page *page) 1061{ 1062 free_hot_cold_page(page, 1); 1063} 1064 1065/* 1066 * split_page takes a non-compound higher-order page, and splits it into 1067 * n (1<<order) sub-pages: page[0..n] 1068 * Each sub-page must be freed individually. 1069 * 1070 * Note: this is probably too low level an operation for use in drivers. 1071 * Please consult with lkml before using this in your driver. 1072 */ 1073void split_page(struct page *page, unsigned int order) 1074{ 1075 int i; 1076 1077 VM_BUG_ON(PageCompound(page)); 1078 VM_BUG_ON(!page_count(page)); 1079 for (i = 1; i < (1 << order); i++) 1080 set_page_refcounted(page + i); 1081} 1082 1083/* 1084 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1085 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1086 * or two. 1087 */ 1088static inline 1089struct page *buffered_rmqueue(struct zone *preferred_zone, 1090 struct zone *zone, int order, gfp_t gfp_flags, 1091 int migratetype) 1092{ 1093 unsigned long flags; 1094 struct page *page; 1095 int cold = !!(gfp_flags & __GFP_COLD); 1096 int cpu; 1097 1098again: 1099 cpu = get_cpu(); 1100 if (likely(order == 0)) { 1101 struct per_cpu_pages *pcp; 1102 1103 pcp = &zone_pcp(zone, cpu)->pcp; 1104 local_irq_save(flags); 1105 if (!pcp->count) { 1106 pcp->count = rmqueue_bulk(zone, 0, 1107 pcp->batch, &pcp->list, migratetype); 1108 if (unlikely(!pcp->count)) 1109 goto failed; 1110 } 1111 1112 /* Find a page of the appropriate migrate type */ 1113 if (cold) { 1114 list_for_each_entry_reverse(page, &pcp->list, lru) 1115 if (page_private(page) == migratetype) 1116 break; 1117 } else { 1118 list_for_each_entry(page, &pcp->list, lru) 1119 if (page_private(page) == migratetype) 1120 break; 1121 } 1122 1123 /* Allocate more to the pcp list if necessary */ 1124 if (unlikely(&page->lru == &pcp->list)) { 1125 pcp->count += rmqueue_bulk(zone, 0, 1126 pcp->batch, &pcp->list, migratetype); 1127 page = list_entry(pcp->list.next, struct page, lru); 1128 } 1129 1130 list_del(&page->lru); 1131 pcp->count--; 1132 } else { 1133 if (unlikely(gfp_flags & __GFP_NOFAIL)) { 1134 /* 1135 * __GFP_NOFAIL is not to be used in new code. 1136 * 1137 * All __GFP_NOFAIL callers should be fixed so that they 1138 * properly detect and handle allocation failures. 1139 * 1140 * We most definitely don't want callers attempting to 1141 * allocate greater than single-page units with 1142 * __GFP_NOFAIL. 1143 */ 1144 WARN_ON_ONCE(order > 0); 1145 } 1146 spin_lock_irqsave(&zone->lock, flags); 1147 page = __rmqueue(zone, order, migratetype); 1148 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); 1149 spin_unlock(&zone->lock); 1150 if (!page) 1151 goto failed; 1152 } 1153 1154 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1155 zone_statistics(preferred_zone, zone); 1156 local_irq_restore(flags); 1157 put_cpu(); 1158 1159 VM_BUG_ON(bad_range(zone, page)); 1160 if (prep_new_page(page, order, gfp_flags)) 1161 goto again; 1162 return page; 1163 1164failed: 1165 local_irq_restore(flags); 1166 put_cpu(); 1167 return NULL; 1168} 1169 1170/* The ALLOC_WMARK bits are used as an index to zone->watermark */ 1171#define ALLOC_WMARK_MIN WMARK_MIN 1172#define ALLOC_WMARK_LOW WMARK_LOW 1173#define ALLOC_WMARK_HIGH WMARK_HIGH 1174#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ 1175 1176/* Mask to get the watermark bits */ 1177#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) 1178 1179#define ALLOC_HARDER 0x10 /* try to alloc harder */ 1180#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 1181#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1182 1183#ifdef CONFIG_FAIL_PAGE_ALLOC 1184 1185static struct fail_page_alloc_attr { 1186 struct fault_attr attr; 1187 1188 u32 ignore_gfp_highmem; 1189 u32 ignore_gfp_wait; 1190 u32 min_order; 1191 1192#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1193 1194 struct dentry *ignore_gfp_highmem_file; 1195 struct dentry *ignore_gfp_wait_file; 1196 struct dentry *min_order_file; 1197 1198#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1199 1200} fail_page_alloc = { 1201 .attr = FAULT_ATTR_INITIALIZER, 1202 .ignore_gfp_wait = 1, 1203 .ignore_gfp_highmem = 1, 1204 .min_order = 1, 1205}; 1206 1207static int __init setup_fail_page_alloc(char *str) 1208{ 1209 return setup_fault_attr(&fail_page_alloc.attr, str); 1210} 1211__setup("fail_page_alloc=", setup_fail_page_alloc); 1212 1213static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1214{ 1215 if (order < fail_page_alloc.min_order) 1216 return 0; 1217 if (gfp_mask & __GFP_NOFAIL) 1218 return 0; 1219 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1220 return 0; 1221 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1222 return 0; 1223 1224 return should_fail(&fail_page_alloc.attr, 1 << order); 1225} 1226 1227#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1228 1229static int __init fail_page_alloc_debugfs(void) 1230{ 1231 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1232 struct dentry *dir; 1233 int err; 1234 1235 err = init_fault_attr_dentries(&fail_page_alloc.attr, 1236 "fail_page_alloc"); 1237 if (err) 1238 return err; 1239 dir = fail_page_alloc.attr.dentries.dir; 1240 1241 fail_page_alloc.ignore_gfp_wait_file = 1242 debugfs_create_bool("ignore-gfp-wait", mode, dir, 1243 &fail_page_alloc.ignore_gfp_wait); 1244 1245 fail_page_alloc.ignore_gfp_highmem_file = 1246 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1247 &fail_page_alloc.ignore_gfp_highmem); 1248 fail_page_alloc.min_order_file = 1249 debugfs_create_u32("min-order", mode, dir, 1250 &fail_page_alloc.min_order); 1251 1252 if (!fail_page_alloc.ignore_gfp_wait_file || 1253 !fail_page_alloc.ignore_gfp_highmem_file || 1254 !fail_page_alloc.min_order_file) { 1255 err = -ENOMEM; 1256 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 1257 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 1258 debugfs_remove(fail_page_alloc.min_order_file); 1259 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 1260 } 1261 1262 return err; 1263} 1264 1265late_initcall(fail_page_alloc_debugfs); 1266 1267#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1268 1269#else /* CONFIG_FAIL_PAGE_ALLOC */ 1270 1271static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1272{ 1273 return 0; 1274} 1275 1276#endif /* CONFIG_FAIL_PAGE_ALLOC */ 1277 1278/* 1279 * Return 1 if free pages are above 'mark'. This takes into account the order 1280 * of the allocation. 1281 */ 1282int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1283 int classzone_idx, int alloc_flags) 1284{ 1285 /* free_pages my go negative - that's OK */ 1286 long min = mark; 1287 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 1288 int o; 1289 1290 if (alloc_flags & ALLOC_HIGH) 1291 min -= min / 2; 1292 if (alloc_flags & ALLOC_HARDER) 1293 min -= min / 4; 1294 1295 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1296 return 0; 1297 for (o = 0; o < order; o++) { 1298 /* At the next order, this order's pages become unavailable */ 1299 free_pages -= z->free_area[o].nr_free << o; 1300 1301 /* Require fewer higher order pages to be free */ 1302 min >>= 1; 1303 1304 if (free_pages <= min) 1305 return 0; 1306 } 1307 return 1; 1308} 1309 1310#ifdef CONFIG_NUMA 1311/* 1312 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1313 * skip over zones that are not allowed by the cpuset, or that have 1314 * been recently (in last second) found to be nearly full. See further 1315 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1316 * that have to skip over a lot of full or unallowed zones. 1317 * 1318 * If the zonelist cache is present in the passed in zonelist, then 1319 * returns a pointer to the allowed node mask (either the current 1320 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) 1321 * 1322 * If the zonelist cache is not available for this zonelist, does 1323 * nothing and returns NULL. 1324 * 1325 * If the fullzones BITMAP in the zonelist cache is stale (more than 1326 * a second since last zap'd) then we zap it out (clear its bits.) 1327 * 1328 * We hold off even calling zlc_setup, until after we've checked the 1329 * first zone in the zonelist, on the theory that most allocations will 1330 * be satisfied from that first zone, so best to examine that zone as 1331 * quickly as we can. 1332 */ 1333static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1334{ 1335 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1336 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1337 1338 zlc = zonelist->zlcache_ptr; 1339 if (!zlc) 1340 return NULL; 1341 1342 if (time_after(jiffies, zlc->last_full_zap + HZ)) { 1343 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1344 zlc->last_full_zap = jiffies; 1345 } 1346 1347 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1348 &cpuset_current_mems_allowed : 1349 &node_states[N_HIGH_MEMORY]; 1350 return allowednodes; 1351} 1352 1353/* 1354 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1355 * if it is worth looking at further for free memory: 1356 * 1) Check that the zone isn't thought to be full (doesn't have its 1357 * bit set in the zonelist_cache fullzones BITMAP). 1358 * 2) Check that the zones node (obtained from the zonelist_cache 1359 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1360 * Return true (non-zero) if zone is worth looking at further, or 1361 * else return false (zero) if it is not. 1362 * 1363 * This check -ignores- the distinction between various watermarks, 1364 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1365 * found to be full for any variation of these watermarks, it will 1366 * be considered full for up to one second by all requests, unless 1367 * we are so low on memory on all allowed nodes that we are forced 1368 * into the second scan of the zonelist. 1369 * 1370 * In the second scan we ignore this zonelist cache and exactly 1371 * apply the watermarks to all zones, even it is slower to do so. 1372 * We are low on memory in the second scan, and should leave no stone 1373 * unturned looking for a free page. 1374 */ 1375static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1376 nodemask_t *allowednodes) 1377{ 1378 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1379 int i; /* index of *z in zonelist zones */ 1380 int n; /* node that zone *z is on */ 1381 1382 zlc = zonelist->zlcache_ptr; 1383 if (!zlc) 1384 return 1; 1385 1386 i = z - zonelist->_zonerefs; 1387 n = zlc->z_to_n[i]; 1388 1389 /* This zone is worth trying if it is allowed but not full */ 1390 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1391} 1392 1393/* 1394 * Given 'z' scanning a zonelist, set the corresponding bit in 1395 * zlc->fullzones, so that subsequent attempts to allocate a page 1396 * from that zone don't waste time re-examining it. 1397 */ 1398static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1399{ 1400 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1401 int i; /* index of *z in zonelist zones */ 1402 1403 zlc = zonelist->zlcache_ptr; 1404 if (!zlc) 1405 return; 1406 1407 i = z - zonelist->_zonerefs; 1408 1409 set_bit(i, zlc->fullzones); 1410} 1411 1412#else /* CONFIG_NUMA */ 1413 1414static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1415{ 1416 return NULL; 1417} 1418 1419static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1420 nodemask_t *allowednodes) 1421{ 1422 return 1; 1423} 1424 1425static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1426{ 1427} 1428#endif /* CONFIG_NUMA */ 1429 1430/* 1431 * get_page_from_freelist goes through the zonelist trying to allocate 1432 * a page. 1433 */ 1434static struct page * 1435get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1436 struct zonelist *zonelist, int high_zoneidx, int alloc_flags, 1437 struct zone *preferred_zone, int migratetype) 1438{ 1439 struct zoneref *z; 1440 struct page *page = NULL; 1441 int classzone_idx; 1442 struct zone *zone; 1443 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1444 int zlc_active = 0; /* set if using zonelist_cache */ 1445 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1446 1447 classzone_idx = zone_idx(preferred_zone); 1448zonelist_scan: 1449 /* 1450 * Scan zonelist, looking for a zone with enough free. 1451 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1452 */ 1453 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1454 high_zoneidx, nodemask) { 1455 if (NUMA_BUILD && zlc_active && 1456 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1457 continue; 1458 if ((alloc_flags & ALLOC_CPUSET) && 1459 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1460 goto try_next_zone; 1461 1462 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 1463 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1464 unsigned long mark; 1465 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 1466 if (!zone_watermark_ok(zone, order, mark, 1467 classzone_idx, alloc_flags)) { 1468 if (!zone_reclaim_mode || 1469 !zone_reclaim(zone, gfp_mask, order)) 1470 goto this_zone_full; 1471 } 1472 } 1473 1474 page = buffered_rmqueue(preferred_zone, zone, order, 1475 gfp_mask, migratetype); 1476 if (page) 1477 break; 1478this_zone_full: 1479 if (NUMA_BUILD) 1480 zlc_mark_zone_full(zonelist, z); 1481try_next_zone: 1482 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) { 1483 /* 1484 * we do zlc_setup after the first zone is tried but only 1485 * if there are multiple nodes make it worthwhile 1486 */ 1487 allowednodes = zlc_setup(zonelist, alloc_flags); 1488 zlc_active = 1; 1489 did_zlc_setup = 1; 1490 } 1491 } 1492 1493 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1494 /* Disable zlc cache for second zonelist scan */ 1495 zlc_active = 0; 1496 goto zonelist_scan; 1497 } 1498 return page; 1499} 1500 1501static inline int 1502should_alloc_retry(gfp_t gfp_mask, unsigned int order, 1503 unsigned long pages_reclaimed) 1504{ 1505 /* Do not loop if specifically requested */ 1506 if (gfp_mask & __GFP_NORETRY) 1507 return 0; 1508 1509 /* 1510 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER 1511 * means __GFP_NOFAIL, but that may not be true in other 1512 * implementations. 1513 */ 1514 if (order <= PAGE_ALLOC_COSTLY_ORDER) 1515 return 1; 1516 1517 /* 1518 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is 1519 * specified, then we retry until we no longer reclaim any pages 1520 * (above), or we've reclaimed an order of pages at least as 1521 * large as the allocation's order. In both cases, if the 1522 * allocation still fails, we stop retrying. 1523 */ 1524 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) 1525 return 1; 1526 1527 /* 1528 * Don't let big-order allocations loop unless the caller 1529 * explicitly requests that. 1530 */ 1531 if (gfp_mask & __GFP_NOFAIL) 1532 return 1; 1533 1534 return 0; 1535} 1536 1537static inline struct page * 1538__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 1539 struct zonelist *zonelist, enum zone_type high_zoneidx, 1540 nodemask_t *nodemask, struct zone *preferred_zone, 1541 int migratetype) 1542{ 1543 struct page *page; 1544 1545 /* Acquire the OOM killer lock for the zones in zonelist */ 1546 if (!try_set_zone_oom(zonelist, gfp_mask)) { 1547 schedule_timeout_uninterruptible(1); 1548 return NULL; 1549 } 1550 1551 /* 1552 * Go through the zonelist yet one more time, keep very high watermark 1553 * here, this is only to catch a parallel oom killing, we must fail if 1554 * we're still under heavy pressure. 1555 */ 1556 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 1557 order, zonelist, high_zoneidx, 1558 ALLOC_WMARK_HIGH|ALLOC_CPUSET, 1559 preferred_zone, migratetype); 1560 if (page) 1561 goto out; 1562 1563 /* The OOM killer will not help higher order allocs */ 1564 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL)) 1565 goto out; 1566 1567 /* Exhausted what can be done so it's blamo time */ 1568 out_of_memory(zonelist, gfp_mask, order); 1569 1570out: 1571 clear_zonelist_oom(zonelist, gfp_mask); 1572 return page; 1573} 1574 1575/* The really slow allocator path where we enter direct reclaim */ 1576static inline struct page * 1577__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 1578 struct zonelist *zonelist, enum zone_type high_zoneidx, 1579 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1580 int migratetype, unsigned long *did_some_progress) 1581{ 1582 struct page *page = NULL; 1583 struct reclaim_state reclaim_state; 1584 struct task_struct *p = current; 1585 1586 cond_resched(); 1587 1588 /* We now go into synchronous reclaim */ 1589 cpuset_memory_pressure_bump(); 1590 1591 /* 1592 * The task's cpuset might have expanded its set of allowable nodes 1593 */ 1594 p->flags |= PF_MEMALLOC; 1595 lockdep_set_current_reclaim_state(gfp_mask); 1596 reclaim_state.reclaimed_slab = 0; 1597 p->reclaim_state = &reclaim_state; 1598 1599 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 1600 1601 p->reclaim_state = NULL; 1602 lockdep_clear_current_reclaim_state(); 1603 p->flags &= ~PF_MEMALLOC; 1604 1605 cond_resched(); 1606 1607 if (order != 0) 1608 drain_all_pages(); 1609 1610 if (likely(*did_some_progress)) 1611 page = get_page_from_freelist(gfp_mask, nodemask, order, 1612 zonelist, high_zoneidx, 1613 alloc_flags, preferred_zone, 1614 migratetype); 1615 return page; 1616} 1617 1618/* 1619 * This is called in the allocator slow-path if the allocation request is of 1620 * sufficient urgency to ignore watermarks and take other desperate measures 1621 */ 1622static inline struct page * 1623__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 1624 struct zonelist *zonelist, enum zone_type high_zoneidx, 1625 nodemask_t *nodemask, struct zone *preferred_zone, 1626 int migratetype) 1627{ 1628 struct page *page; 1629 1630 do { 1631 page = get_page_from_freelist(gfp_mask, nodemask, order, 1632 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, 1633 preferred_zone, migratetype); 1634 1635 if (!page && gfp_mask & __GFP_NOFAIL) 1636 congestion_wait(WRITE, HZ/50); 1637 } while (!page && (gfp_mask & __GFP_NOFAIL)); 1638 1639 return page; 1640} 1641 1642static inline 1643void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, 1644 enum zone_type high_zoneidx) 1645{ 1646 struct zoneref *z; 1647 struct zone *zone; 1648 1649 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 1650 wakeup_kswapd(zone, order); 1651} 1652 1653static inline int 1654gfp_to_alloc_flags(gfp_t gfp_mask) 1655{ 1656 struct task_struct *p = current; 1657 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 1658 const gfp_t wait = gfp_mask & __GFP_WAIT; 1659 1660 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 1661 BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH); 1662 1663 /* 1664 * The caller may dip into page reserves a bit more if the caller 1665 * cannot run direct reclaim, or if the caller has realtime scheduling 1666 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1667 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1668 */ 1669 alloc_flags |= (gfp_mask & __GFP_HIGH); 1670 1671 if (!wait) { 1672 alloc_flags |= ALLOC_HARDER; 1673 /* 1674 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1675 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1676 */ 1677 alloc_flags &= ~ALLOC_CPUSET; 1678 } else if (unlikely(rt_task(p))) 1679 alloc_flags |= ALLOC_HARDER; 1680 1681 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 1682 if (!in_interrupt() && 1683 ((p->flags & PF_MEMALLOC) || 1684 unlikely(test_thread_flag(TIF_MEMDIE)))) 1685 alloc_flags |= ALLOC_NO_WATERMARKS; 1686 } 1687 1688 return alloc_flags; 1689} 1690 1691static inline struct page * 1692__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 1693 struct zonelist *zonelist, enum zone_type high_zoneidx, 1694 nodemask_t *nodemask, struct zone *preferred_zone, 1695 int migratetype) 1696{ 1697 const gfp_t wait = gfp_mask & __GFP_WAIT; 1698 struct page *page = NULL; 1699 int alloc_flags; 1700 unsigned long pages_reclaimed = 0; 1701 unsigned long did_some_progress; 1702 struct task_struct *p = current; 1703 1704 /* 1705 * In the slowpath, we sanity check order to avoid ever trying to 1706 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 1707 * be using allocators in order of preference for an area that is 1708 * too large. 1709 */ 1710 if (WARN_ON_ONCE(order >= MAX_ORDER)) 1711 return NULL; 1712 1713 /* 1714 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1715 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1716 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1717 * using a larger set of nodes after it has established that the 1718 * allowed per node queues are empty and that nodes are 1719 * over allocated. 1720 */ 1721 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1722 goto nopage; 1723 1724 wake_all_kswapd(order, zonelist, high_zoneidx); 1725 1726 /* 1727 * OK, we're below the kswapd watermark and have kicked background 1728 * reclaim. Now things get more complex, so set up alloc_flags according 1729 * to how we want to proceed. 1730 */ 1731 alloc_flags = gfp_to_alloc_flags(gfp_mask); 1732 1733restart: 1734 /* This is the last chance, in general, before the goto nopage. */ 1735 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 1736 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 1737 preferred_zone, migratetype); 1738 if (page) 1739 goto got_pg; 1740 1741rebalance: 1742 /* Allocate without watermarks if the context allows */ 1743 if (alloc_flags & ALLOC_NO_WATERMARKS) { 1744 page = __alloc_pages_high_priority(gfp_mask, order, 1745 zonelist, high_zoneidx, nodemask, 1746 preferred_zone, migratetype); 1747 if (page) 1748 goto got_pg; 1749 } 1750 1751 /* Atomic allocations - we can't balance anything */ 1752 if (!wait) 1753 goto nopage; 1754 1755 /* Avoid recursion of direct reclaim */ 1756 if (p->flags & PF_MEMALLOC) 1757 goto nopage; 1758 1759 /* Try direct reclaim and then allocating */ 1760 page = __alloc_pages_direct_reclaim(gfp_mask, order, 1761 zonelist, high_zoneidx, 1762 nodemask, 1763 alloc_flags, preferred_zone, 1764 migratetype, &did_some_progress); 1765 if (page) 1766 goto got_pg; 1767 1768 /* 1769 * If we failed to make any progress reclaiming, then we are 1770 * running out of options and have to consider going OOM 1771 */ 1772 if (!did_some_progress) { 1773 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1774 if (oom_killer_disabled) 1775 goto nopage; 1776 page = __alloc_pages_may_oom(gfp_mask, order, 1777 zonelist, high_zoneidx, 1778 nodemask, preferred_zone, 1779 migratetype); 1780 if (page) 1781 goto got_pg; 1782 1783 /* 1784 * The OOM killer does not trigger for high-order 1785 * ~__GFP_NOFAIL allocations so if no progress is being 1786 * made, there are no other options and retrying is 1787 * unlikely to help. 1788 */ 1789 if (order > PAGE_ALLOC_COSTLY_ORDER && 1790 !(gfp_mask & __GFP_NOFAIL)) 1791 goto nopage; 1792 1793 goto restart; 1794 } 1795 } 1796 1797 /* Check if we should retry the allocation */ 1798 pages_reclaimed += did_some_progress; 1799 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { 1800 /* Wait for some write requests to complete then retry */ 1801 congestion_wait(WRITE, HZ/50); 1802 goto rebalance; 1803 } 1804 1805nopage: 1806 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1807 printk(KERN_WARNING "%s: page allocation failure." 1808 " order:%d, mode:0x%x\n", 1809 p->comm, order, gfp_mask); 1810 dump_stack(); 1811 show_mem(); 1812 } 1813got_pg: 1814 return page; 1815 1816} 1817 1818/* 1819 * This is the 'heart' of the zoned buddy allocator. 1820 */ 1821struct page * 1822__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 1823 struct zonelist *zonelist, nodemask_t *nodemask) 1824{ 1825 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1826 struct zone *preferred_zone; 1827 struct page *page; 1828 int migratetype = allocflags_to_migratetype(gfp_mask); 1829 1830 lockdep_trace_alloc(gfp_mask); 1831 1832 might_sleep_if(gfp_mask & __GFP_WAIT); 1833 1834 if (should_fail_alloc_page(gfp_mask, order)) 1835 return NULL; 1836 1837 /* 1838 * Check the zones suitable for the gfp_mask contain at least one 1839 * valid zone. It's possible to have an empty zonelist as a result 1840 * of GFP_THISNODE and a memoryless node 1841 */ 1842 if (unlikely(!zonelist->_zonerefs->zone)) 1843 return NULL; 1844 1845 /* The preferred zone is used for statistics later */ 1846 first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone); 1847 if (!preferred_zone) 1848 return NULL; 1849 1850 /* First allocation attempt */ 1851 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 1852 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, 1853 preferred_zone, migratetype); 1854 if (unlikely(!page)) 1855 page = __alloc_pages_slowpath(gfp_mask, order, 1856 zonelist, high_zoneidx, nodemask, 1857 preferred_zone, migratetype); 1858 1859 return page; 1860} 1861EXPORT_SYMBOL(__alloc_pages_nodemask); 1862 1863/* 1864 * Common helper functions. 1865 */ 1866unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1867{ 1868 struct page * page; 1869 page = alloc_pages(gfp_mask, order); 1870 if (!page) 1871 return 0; 1872 return (unsigned long) page_address(page); 1873} 1874 1875EXPORT_SYMBOL(__get_free_pages); 1876 1877unsigned long get_zeroed_page(gfp_t gfp_mask) 1878{ 1879 struct page * page; 1880 1881 /* 1882 * get_zeroed_page() returns a 32-bit address, which cannot represent 1883 * a highmem page 1884 */ 1885 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1886 1887 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1888 if (page) 1889 return (unsigned long) page_address(page); 1890 return 0; 1891} 1892 1893EXPORT_SYMBOL(get_zeroed_page); 1894 1895void __pagevec_free(struct pagevec *pvec) 1896{ 1897 int i = pagevec_count(pvec); 1898 1899 while (--i >= 0) 1900 free_hot_cold_page(pvec->pages[i], pvec->cold); 1901} 1902 1903void __free_pages(struct page *page, unsigned int order) 1904{ 1905 if (put_page_testzero(page)) { 1906 if (order == 0) 1907 free_hot_page(page); 1908 else 1909 __free_pages_ok(page, order); 1910 } 1911} 1912 1913EXPORT_SYMBOL(__free_pages); 1914 1915void free_pages(unsigned long addr, unsigned int order) 1916{ 1917 if (addr != 0) { 1918 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1919 __free_pages(virt_to_page((void *)addr), order); 1920 } 1921} 1922 1923EXPORT_SYMBOL(free_pages); 1924 1925/** 1926 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 1927 * @size: the number of bytes to allocate 1928 * @gfp_mask: GFP flags for the allocation 1929 * 1930 * This function is similar to alloc_pages(), except that it allocates the 1931 * minimum number of pages to satisfy the request. alloc_pages() can only 1932 * allocate memory in power-of-two pages. 1933 * 1934 * This function is also limited by MAX_ORDER. 1935 * 1936 * Memory allocated by this function must be released by free_pages_exact(). 1937 */ 1938void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 1939{ 1940 unsigned int order = get_order(size); 1941 unsigned long addr; 1942 1943 addr = __get_free_pages(gfp_mask, order); 1944 if (addr) { 1945 unsigned long alloc_end = addr + (PAGE_SIZE << order); 1946 unsigned long used = addr + PAGE_ALIGN(size); 1947 1948 split_page(virt_to_page(addr), order); 1949 while (used < alloc_end) { 1950 free_page(used); 1951 used += PAGE_SIZE; 1952 } 1953 } 1954 1955 return (void *)addr; 1956} 1957EXPORT_SYMBOL(alloc_pages_exact); 1958 1959/** 1960 * free_pages_exact - release memory allocated via alloc_pages_exact() 1961 * @virt: the value returned by alloc_pages_exact. 1962 * @size: size of allocation, same value as passed to alloc_pages_exact(). 1963 * 1964 * Release the memory allocated by a previous call to alloc_pages_exact. 1965 */ 1966void free_pages_exact(void *virt, size_t size) 1967{ 1968 unsigned long addr = (unsigned long)virt; 1969 unsigned long end = addr + PAGE_ALIGN(size); 1970 1971 while (addr < end) { 1972 free_page(addr); 1973 addr += PAGE_SIZE; 1974 } 1975} 1976EXPORT_SYMBOL(free_pages_exact); 1977 1978static unsigned int nr_free_zone_pages(int offset) 1979{ 1980 struct zoneref *z; 1981 struct zone *zone; 1982 1983 /* Just pick one node, since fallback list is circular */ 1984 unsigned int sum = 0; 1985 1986 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 1987 1988 for_each_zone_zonelist(zone, z, zonelist, offset) { 1989 unsigned long size = zone->present_pages; 1990 unsigned long high = high_wmark_pages(zone); 1991 if (size > high) 1992 sum += size - high; 1993 } 1994 1995 return sum; 1996} 1997 1998/* 1999 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 2000 */ 2001unsigned int nr_free_buffer_pages(void) 2002{ 2003 return nr_free_zone_pages(gfp_zone(GFP_USER)); 2004} 2005EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 2006 2007/* 2008 * Amount of free RAM allocatable within all zones 2009 */ 2010unsigned int nr_free_pagecache_pages(void) 2011{ 2012 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 2013} 2014 2015static inline void show_node(struct zone *zone) 2016{ 2017 if (NUMA_BUILD) 2018 printk("Node %d ", zone_to_nid(zone)); 2019} 2020 2021void si_meminfo(struct sysinfo *val) 2022{ 2023 val->totalram = totalram_pages; 2024 val->sharedram = 0; 2025 val->freeram = global_page_state(NR_FREE_PAGES); 2026 val->bufferram = nr_blockdev_pages(); 2027 val->totalhigh = totalhigh_pages; 2028 val->freehigh = nr_free_highpages(); 2029 val->mem_unit = PAGE_SIZE; 2030} 2031 2032EXPORT_SYMBOL(si_meminfo); 2033 2034#ifdef CONFIG_NUMA 2035void si_meminfo_node(struct sysinfo *val, int nid) 2036{ 2037 pg_data_t *pgdat = NODE_DATA(nid); 2038 2039 val->totalram = pgdat->node_present_pages; 2040 val->freeram = node_page_state(nid, NR_FREE_PAGES); 2041#ifdef CONFIG_HIGHMEM 2042 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 2043 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 2044 NR_FREE_PAGES); 2045#else 2046 val->totalhigh = 0; 2047 val->freehigh = 0; 2048#endif 2049 val->mem_unit = PAGE_SIZE; 2050} 2051#endif 2052 2053#define K(x) ((x) << (PAGE_SHIFT-10)) 2054 2055/* 2056 * Show free area list (used inside shift_scroll-lock stuff) 2057 * We also calculate the percentage fragmentation. We do this by counting the 2058 * memory on each free list with the exception of the first item on the list. 2059 */ 2060void show_free_areas(void) 2061{ 2062 int cpu; 2063 struct zone *zone; 2064 2065 for_each_populated_zone(zone) { 2066 show_node(zone); 2067 printk("%s per-cpu:\n", zone->name); 2068 2069 for_each_online_cpu(cpu) { 2070 struct per_cpu_pageset *pageset; 2071 2072 pageset = zone_pcp(zone, cpu); 2073 2074 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 2075 cpu, pageset->pcp.high, 2076 pageset->pcp.batch, pageset->pcp.count); 2077 } 2078 } 2079 2080 printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" 2081 " inactive_file:%lu" 2082 " unevictable:%lu" 2083 " dirty:%lu writeback:%lu unstable:%lu\n" 2084 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 2085 global_page_state(NR_ACTIVE_ANON), 2086 global_page_state(NR_ACTIVE_FILE), 2087 global_page_state(NR_INACTIVE_ANON), 2088 global_page_state(NR_INACTIVE_FILE), 2089 global_page_state(NR_UNEVICTABLE), 2090 global_page_state(NR_FILE_DIRTY), 2091 global_page_state(NR_WRITEBACK), 2092 global_page_state(NR_UNSTABLE_NFS), 2093 global_page_state(NR_FREE_PAGES), 2094 global_page_state(NR_SLAB_RECLAIMABLE) + 2095 global_page_state(NR_SLAB_UNRECLAIMABLE), 2096 global_page_state(NR_FILE_MAPPED), 2097 global_page_state(NR_PAGETABLE), 2098 global_page_state(NR_BOUNCE)); 2099 2100 for_each_populated_zone(zone) { 2101 int i; 2102 2103 show_node(zone); 2104 printk("%s" 2105 " free:%lukB" 2106 " min:%lukB" 2107 " low:%lukB" 2108 " high:%lukB" 2109 " active_anon:%lukB" 2110 " inactive_anon:%lukB" 2111 " active_file:%lukB" 2112 " inactive_file:%lukB" 2113 " unevictable:%lukB" 2114 " present:%lukB" 2115 " pages_scanned:%lu" 2116 " all_unreclaimable? %s" 2117 "\n", 2118 zone->name, 2119 K(zone_page_state(zone, NR_FREE_PAGES)), 2120 K(min_wmark_pages(zone)), 2121 K(low_wmark_pages(zone)), 2122 K(high_wmark_pages(zone)), 2123 K(zone_page_state(zone, NR_ACTIVE_ANON)), 2124 K(zone_page_state(zone, NR_INACTIVE_ANON)), 2125 K(zone_page_state(zone, NR_ACTIVE_FILE)), 2126 K(zone_page_state(zone, NR_INACTIVE_FILE)), 2127 K(zone_page_state(zone, NR_UNEVICTABLE)), 2128 K(zone->present_pages), 2129 zone->pages_scanned, 2130 (zone_is_all_unreclaimable(zone) ? "yes" : "no") 2131 ); 2132 printk("lowmem_reserve[]:"); 2133 for (i = 0; i < MAX_NR_ZONES; i++) 2134 printk(" %lu", zone->lowmem_reserve[i]); 2135 printk("\n"); 2136 } 2137 2138 for_each_populated_zone(zone) { 2139 unsigned long nr[MAX_ORDER], flags, order, total = 0; 2140 2141 show_node(zone); 2142 printk("%s: ", zone->name); 2143 2144 spin_lock_irqsave(&zone->lock, flags); 2145 for (order = 0; order < MAX_ORDER; order++) { 2146 nr[order] = zone->free_area[order].nr_free; 2147 total += nr[order] << order; 2148 } 2149 spin_unlock_irqrestore(&zone->lock, flags); 2150 for (order = 0; order < MAX_ORDER; order++) 2151 printk("%lu*%lukB ", nr[order], K(1UL) << order); 2152 printk("= %lukB\n", K(total)); 2153 } 2154 2155 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 2156 2157 show_swap_cache_info(); 2158} 2159 2160static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 2161{ 2162 zoneref->zone = zone; 2163 zoneref->zone_idx = zone_idx(zone); 2164} 2165 2166/* 2167 * Builds allocation fallback zone lists. 2168 * 2169 * Add all populated zones of a node to the zonelist. 2170 */ 2171static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 2172 int nr_zones, enum zone_type zone_type) 2173{ 2174 struct zone *zone; 2175 2176 BUG_ON(zone_type >= MAX_NR_ZONES); 2177 zone_type++; 2178 2179 do { 2180 zone_type--; 2181 zone = pgdat->node_zones + zone_type; 2182 if (populated_zone(zone)) { 2183 zoneref_set_zone(zone, 2184 &zonelist->_zonerefs[nr_zones++]); 2185 check_highest_zone(zone_type); 2186 } 2187 2188 } while (zone_type); 2189 return nr_zones; 2190} 2191 2192 2193/* 2194 * zonelist_order: 2195 * 0 = automatic detection of better ordering. 2196 * 1 = order by ([node] distance, -zonetype) 2197 * 2 = order by (-zonetype, [node] distance) 2198 * 2199 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 2200 * the same zonelist. So only NUMA can configure this param. 2201 */ 2202#define ZONELIST_ORDER_DEFAULT 0 2203#define ZONELIST_ORDER_NODE 1 2204#define ZONELIST_ORDER_ZONE 2 2205 2206/* zonelist order in the kernel. 2207 * set_zonelist_order() will set this to NODE or ZONE. 2208 */ 2209static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 2210static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 2211 2212 2213#ifdef CONFIG_NUMA 2214/* The value user specified ....changed by config */ 2215static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 2216/* string for sysctl */ 2217#define NUMA_ZONELIST_ORDER_LEN 16 2218char numa_zonelist_order[16] = "default"; 2219 2220/* 2221 * interface for configure zonelist ordering. 2222 * command line option "numa_zonelist_order" 2223 * = "[dD]efault - default, automatic configuration. 2224 * = "[nN]ode - order by node locality, then by zone within node 2225 * = "[zZ]one - order by zone, then by locality within zone 2226 */ 2227 2228static int __parse_numa_zonelist_order(char *s) 2229{ 2230 if (*s == 'd' || *s == 'D') { 2231 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 2232 } else if (*s == 'n' || *s == 'N') { 2233 user_zonelist_order = ZONELIST_ORDER_NODE; 2234 } else if (*s == 'z' || *s == 'Z') { 2235 user_zonelist_order = ZONELIST_ORDER_ZONE; 2236 } else { 2237 printk(KERN_WARNING 2238 "Ignoring invalid numa_zonelist_order value: " 2239 "%s\n", s); 2240 return -EINVAL; 2241 } 2242 return 0; 2243} 2244 2245static __init int setup_numa_zonelist_order(char *s) 2246{ 2247 if (s) 2248 return __parse_numa_zonelist_order(s); 2249 return 0; 2250} 2251early_param("numa_zonelist_order", setup_numa_zonelist_order); 2252 2253/* 2254 * sysctl handler for numa_zonelist_order 2255 */ 2256int numa_zonelist_order_handler(ctl_table *table, int write, 2257 struct file *file, void __user *buffer, size_t *length, 2258 loff_t *ppos) 2259{ 2260 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 2261 int ret; 2262 2263 if (write) 2264 strncpy(saved_string, (char*)table->data, 2265 NUMA_ZONELIST_ORDER_LEN); 2266 ret = proc_dostring(table, write, file, buffer, length, ppos); 2267 if (ret) 2268 return ret; 2269 if (write) { 2270 int oldval = user_zonelist_order; 2271 if (__parse_numa_zonelist_order((char*)table->data)) { 2272 /* 2273 * bogus value. restore saved string 2274 */ 2275 strncpy((char*)table->data, saved_string, 2276 NUMA_ZONELIST_ORDER_LEN); 2277 user_zonelist_order = oldval; 2278 } else if (oldval != user_zonelist_order) 2279 build_all_zonelists(); 2280 } 2281 return 0; 2282} 2283 2284 2285#define MAX_NODE_LOAD (nr_online_nodes) 2286static int node_load[MAX_NUMNODES]; 2287 2288/** 2289 * find_next_best_node - find the next node that should appear in a given node's fallback list 2290 * @node: node whose fallback list we're appending 2291 * @used_node_mask: nodemask_t of already used nodes 2292 * 2293 * We use a number of factors to determine which is the next node that should 2294 * appear on a given node's fallback list. The node should not have appeared 2295 * already in @node's fallback list, and it should be the next closest node 2296 * according to the distance array (which contains arbitrary distance values 2297 * from each node to each node in the system), and should also prefer nodes 2298 * with no CPUs, since presumably they'll have very little allocation pressure 2299 * on them otherwise. 2300 * It returns -1 if no node is found. 2301 */ 2302static int find_next_best_node(int node, nodemask_t *used_node_mask) 2303{ 2304 int n, val; 2305 int min_val = INT_MAX; 2306 int best_node = -1; 2307 const struct cpumask *tmp = cpumask_of_node(0); 2308 2309 /* Use the local node if we haven't already */ 2310 if (!node_isset(node, *used_node_mask)) { 2311 node_set(node, *used_node_mask); 2312 return node; 2313 } 2314 2315 for_each_node_state(n, N_HIGH_MEMORY) { 2316 2317 /* Don't want a node to appear more than once */ 2318 if (node_isset(n, *used_node_mask)) 2319 continue; 2320 2321 /* Use the distance array to find the distance */ 2322 val = node_distance(node, n); 2323 2324 /* Penalize nodes under us ("prefer the next node") */ 2325 val += (n < node); 2326 2327 /* Give preference to headless and unused nodes */ 2328 tmp = cpumask_of_node(n); 2329 if (!cpumask_empty(tmp)) 2330 val += PENALTY_FOR_NODE_WITH_CPUS; 2331 2332 /* Slight preference for less loaded node */ 2333 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 2334 val += node_load[n]; 2335 2336 if (val < min_val) { 2337 min_val = val; 2338 best_node = n; 2339 } 2340 } 2341 2342 if (best_node >= 0) 2343 node_set(best_node, *used_node_mask); 2344 2345 return best_node; 2346} 2347 2348 2349/* 2350 * Build zonelists ordered by node and zones within node. 2351 * This results in maximum locality--normal zone overflows into local 2352 * DMA zone, if any--but risks exhausting DMA zone. 2353 */ 2354static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 2355{ 2356 int j; 2357 struct zonelist *zonelist; 2358 2359 zonelist = &pgdat->node_zonelists[0]; 2360 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 2361 ; 2362 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2363 MAX_NR_ZONES - 1); 2364 zonelist->_zonerefs[j].zone = NULL; 2365 zonelist->_zonerefs[j].zone_idx = 0; 2366} 2367 2368/* 2369 * Build gfp_thisnode zonelists 2370 */ 2371static void build_thisnode_zonelists(pg_data_t *pgdat) 2372{ 2373 int j; 2374 struct zonelist *zonelist; 2375 2376 zonelist = &pgdat->node_zonelists[1]; 2377 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2378 zonelist->_zonerefs[j].zone = NULL; 2379 zonelist->_zonerefs[j].zone_idx = 0; 2380} 2381 2382/* 2383 * Build zonelists ordered by zone and nodes within zones. 2384 * This results in conserving DMA zone[s] until all Normal memory is 2385 * exhausted, but results in overflowing to remote node while memory 2386 * may still exist in local DMA zone. 2387 */ 2388static int node_order[MAX_NUMNODES]; 2389 2390static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 2391{ 2392 int pos, j, node; 2393 int zone_type; /* needs to be signed */ 2394 struct zone *z; 2395 struct zonelist *zonelist; 2396 2397 zonelist = &pgdat->node_zonelists[0]; 2398 pos = 0; 2399 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 2400 for (j = 0; j < nr_nodes; j++) { 2401 node = node_order[j]; 2402 z = &NODE_DATA(node)->node_zones[zone_type]; 2403 if (populated_zone(z)) { 2404 zoneref_set_zone(z, 2405 &zonelist->_zonerefs[pos++]); 2406 check_highest_zone(zone_type); 2407 } 2408 } 2409 } 2410 zonelist->_zonerefs[pos].zone = NULL; 2411 zonelist->_zonerefs[pos].zone_idx = 0; 2412} 2413 2414static int default_zonelist_order(void) 2415{ 2416 int nid, zone_type; 2417 unsigned long low_kmem_size,total_size; 2418 struct zone *z; 2419 int average_size; 2420 /* 2421 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem. 2422 * If they are really small and used heavily, the system can fall 2423 * into OOM very easily. 2424 * This function detect ZONE_DMA/DMA32 size and confgigures zone order. 2425 */ 2426 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ 2427 low_kmem_size = 0; 2428 total_size = 0; 2429 for_each_online_node(nid) { 2430 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2431 z = &NODE_DATA(nid)->node_zones[zone_type]; 2432 if (populated_zone(z)) { 2433 if (zone_type < ZONE_NORMAL) 2434 low_kmem_size += z->present_pages; 2435 total_size += z->present_pages; 2436 } 2437 } 2438 } 2439 if (!low_kmem_size || /* there are no DMA area. */ 2440 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ 2441 return ZONELIST_ORDER_NODE; 2442 /* 2443 * look into each node's config. 2444 * If there is a node whose DMA/DMA32 memory is very big area on 2445 * local memory, NODE_ORDER may be suitable. 2446 */ 2447 average_size = total_size / 2448 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); 2449 for_each_online_node(nid) { 2450 low_kmem_size = 0; 2451 total_size = 0; 2452 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2453 z = &NODE_DATA(nid)->node_zones[zone_type]; 2454 if (populated_zone(z)) { 2455 if (zone_type < ZONE_NORMAL) 2456 low_kmem_size += z->present_pages; 2457 total_size += z->present_pages; 2458 } 2459 } 2460 if (low_kmem_size && 2461 total_size > average_size && /* ignore small node */ 2462 low_kmem_size > total_size * 70/100) 2463 return ZONELIST_ORDER_NODE; 2464 } 2465 return ZONELIST_ORDER_ZONE; 2466} 2467 2468static void set_zonelist_order(void) 2469{ 2470 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 2471 current_zonelist_order = default_zonelist_order(); 2472 else 2473 current_zonelist_order = user_zonelist_order; 2474} 2475 2476static void build_zonelists(pg_data_t *pgdat) 2477{ 2478 int j, node, load; 2479 enum zone_type i; 2480 nodemask_t used_mask; 2481 int local_node, prev_node; 2482 struct zonelist *zonelist; 2483 int order = current_zonelist_order; 2484 2485 /* initialize zonelists */ 2486 for (i = 0; i < MAX_ZONELISTS; i++) { 2487 zonelist = pgdat->node_zonelists + i; 2488 zonelist->_zonerefs[0].zone = NULL; 2489 zonelist->_zonerefs[0].zone_idx = 0; 2490 } 2491 2492 /* NUMA-aware ordering of nodes */ 2493 local_node = pgdat->node_id; 2494 load = nr_online_nodes; 2495 prev_node = local_node; 2496 nodes_clear(used_mask); 2497 2498 memset(node_load, 0, sizeof(node_load)); 2499 memset(node_order, 0, sizeof(node_order)); 2500 j = 0; 2501 2502 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 2503 int distance = node_distance(local_node, node); 2504 2505 /* 2506 * If another node is sufficiently far away then it is better 2507 * to reclaim pages in a zone before going off node. 2508 */ 2509 if (distance > RECLAIM_DISTANCE) 2510 zone_reclaim_mode = 1; 2511 2512 /* 2513 * We don't want to pressure a particular node. 2514 * So adding penalty to the first node in same 2515 * distance group to make it round-robin. 2516 */ 2517 if (distance != node_distance(local_node, prev_node)) 2518 node_load[node] = load; 2519 2520 prev_node = node; 2521 load--; 2522 if (order == ZONELIST_ORDER_NODE) 2523 build_zonelists_in_node_order(pgdat, node); 2524 else 2525 node_order[j++] = node; /* remember order */ 2526 } 2527 2528 if (order == ZONELIST_ORDER_ZONE) { 2529 /* calculate node order -- i.e., DMA last! */ 2530 build_zonelists_in_zone_order(pgdat, j); 2531 } 2532 2533 build_thisnode_zonelists(pgdat); 2534} 2535 2536/* Construct the zonelist performance cache - see further mmzone.h */ 2537static void build_zonelist_cache(pg_data_t *pgdat) 2538{ 2539 struct zonelist *zonelist; 2540 struct zonelist_cache *zlc; 2541 struct zoneref *z; 2542 2543 zonelist = &pgdat->node_zonelists[0]; 2544 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 2545 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 2546 for (z = zonelist->_zonerefs; z->zone; z++) 2547 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 2548} 2549 2550 2551#else /* CONFIG_NUMA */ 2552 2553static void set_zonelist_order(void) 2554{ 2555 current_zonelist_order = ZONELIST_ORDER_ZONE; 2556} 2557 2558static void build_zonelists(pg_data_t *pgdat) 2559{ 2560 int node, local_node; 2561 enum zone_type j; 2562 struct zonelist *zonelist; 2563 2564 local_node = pgdat->node_id; 2565 2566 zonelist = &pgdat->node_zonelists[0]; 2567 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2568 2569 /* 2570 * Now we build the zonelist so that it contains the zones 2571 * of all the other nodes. 2572 * We don't want to pressure a particular node, so when 2573 * building the zones for node N, we make sure that the 2574 * zones coming right after the local ones are those from 2575 * node N+1 (modulo N) 2576 */ 2577 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 2578 if (!node_online(node)) 2579 continue; 2580 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2581 MAX_NR_ZONES - 1); 2582 } 2583 for (node = 0; node < local_node; node++) { 2584 if (!node_online(node)) 2585 continue; 2586 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2587 MAX_NR_ZONES - 1); 2588 } 2589 2590 zonelist->_zonerefs[j].zone = NULL; 2591 zonelist->_zonerefs[j].zone_idx = 0; 2592} 2593 2594/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 2595static void build_zonelist_cache(pg_data_t *pgdat) 2596{ 2597 pgdat->node_zonelists[0].zlcache_ptr = NULL; 2598} 2599 2600#endif /* CONFIG_NUMA */ 2601 2602/* return values int ....just for stop_machine() */ 2603static int __build_all_zonelists(void *dummy) 2604{ 2605 int nid; 2606 2607 for_each_online_node(nid) { 2608 pg_data_t *pgdat = NODE_DATA(nid); 2609 2610 build_zonelists(pgdat); 2611 build_zonelist_cache(pgdat); 2612 } 2613 return 0; 2614} 2615 2616void build_all_zonelists(void) 2617{ 2618 set_zonelist_order(); 2619 2620 if (system_state == SYSTEM_BOOTING) { 2621 __build_all_zonelists(NULL); 2622 mminit_verify_zonelist(); 2623 cpuset_init_current_mems_allowed(); 2624 } else { 2625 /* we have to stop all cpus to guarantee there is no user 2626 of zonelist */ 2627 stop_machine(__build_all_zonelists, NULL, NULL); 2628 /* cpuset refresh routine should be here */ 2629 } 2630 vm_total_pages = nr_free_pagecache_pages(); 2631 /* 2632 * Disable grouping by mobility if the number of pages in the 2633 * system is too low to allow the mechanism to work. It would be 2634 * more accurate, but expensive to check per-zone. This check is 2635 * made on memory-hotadd so a system can start with mobility 2636 * disabled and enable it later 2637 */ 2638 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 2639 page_group_by_mobility_disabled = 1; 2640 else 2641 page_group_by_mobility_disabled = 0; 2642 2643 printk("Built %i zonelists in %s order, mobility grouping %s. " 2644 "Total pages: %ld\n", 2645 nr_online_nodes, 2646 zonelist_order_name[current_zonelist_order], 2647 page_group_by_mobility_disabled ? "off" : "on", 2648 vm_total_pages); 2649#ifdef CONFIG_NUMA 2650 printk("Policy zone: %s\n", zone_names[policy_zone]); 2651#endif 2652} 2653 2654/* 2655 * Helper functions to size the waitqueue hash table. 2656 * Essentially these want to choose hash table sizes sufficiently 2657 * large so that collisions trying to wait on pages are rare. 2658 * But in fact, the number of active page waitqueues on typical 2659 * systems is ridiculously low, less than 200. So this is even 2660 * conservative, even though it seems large. 2661 * 2662 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 2663 * waitqueues, i.e. the size of the waitq table given the number of pages. 2664 */ 2665#define PAGES_PER_WAITQUEUE 256 2666 2667#ifndef CONFIG_MEMORY_HOTPLUG 2668static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2669{ 2670 unsigned long size = 1; 2671 2672 pages /= PAGES_PER_WAITQUEUE; 2673 2674 while (size < pages) 2675 size <<= 1; 2676 2677 /* 2678 * Once we have dozens or even hundreds of threads sleeping 2679 * on IO we've got bigger problems than wait queue collision. 2680 * Limit the size of the wait table to a reasonable size. 2681 */ 2682 size = min(size, 4096UL); 2683 2684 return max(size, 4UL); 2685} 2686#else 2687/* 2688 * A zone's size might be changed by hot-add, so it is not possible to determine 2689 * a suitable size for its wait_table. So we use the maximum size now. 2690 * 2691 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 2692 * 2693 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 2694 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 2695 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 2696 * 2697 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 2698 * or more by the traditional way. (See above). It equals: 2699 * 2700 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 2701 * ia64(16K page size) : = ( 8G + 4M)byte. 2702 * powerpc (64K page size) : = (32G +16M)byte. 2703 */ 2704static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2705{ 2706 return 4096UL; 2707} 2708#endif 2709 2710/* 2711 * This is an integer logarithm so that shifts can be used later 2712 * to extract the more random high bits from the multiplicative 2713 * hash function before the remainder is taken. 2714 */ 2715static inline unsigned long wait_table_bits(unsigned long size) 2716{ 2717 return ffz(~size); 2718} 2719 2720#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 2721 2722/* 2723 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 2724 * of blocks reserved is based on min_wmark_pages(zone). The memory within 2725 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes 2726 * higher will lead to a bigger reserve which will get freed as contiguous 2727 * blocks as reclaim kicks in 2728 */ 2729static void setup_zone_migrate_reserve(struct zone *zone) 2730{ 2731 unsigned long start_pfn, pfn, end_pfn; 2732 struct page *page; 2733 unsigned long reserve, block_migratetype; 2734 2735 /* Get the start pfn, end pfn and the number of blocks to reserve */ 2736 start_pfn = zone->zone_start_pfn; 2737 end_pfn = start_pfn + zone->spanned_pages; 2738 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> 2739 pageblock_order; 2740 2741 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 2742 if (!pfn_valid(pfn)) 2743 continue; 2744 page = pfn_to_page(pfn); 2745 2746 /* Watch out for overlapping nodes */ 2747 if (page_to_nid(page) != zone_to_nid(zone)) 2748 continue; 2749 2750 /* Blocks with reserved pages will never free, skip them. */ 2751 if (PageReserved(page)) 2752 continue; 2753 2754 block_migratetype = get_pageblock_migratetype(page); 2755 2756 /* If this block is reserved, account for it */ 2757 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { 2758 reserve--; 2759 continue; 2760 } 2761 2762 /* Suitable for reserving if this block is movable */ 2763 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { 2764 set_pageblock_migratetype(page, MIGRATE_RESERVE); 2765 move_freepages_block(zone, page, MIGRATE_RESERVE); 2766 reserve--; 2767 continue; 2768 } 2769 2770 /* 2771 * If the reserve is met and this is a previous reserved block, 2772 * take it back 2773 */ 2774 if (block_migratetype == MIGRATE_RESERVE) { 2775 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2776 move_freepages_block(zone, page, MIGRATE_MOVABLE); 2777 } 2778 } 2779} 2780 2781/* 2782 * Initially all pages are reserved - free ones are freed 2783 * up by free_all_bootmem() once the early boot process is 2784 * done. Non-atomic initialization, single-pass. 2785 */ 2786void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 2787 unsigned long start_pfn, enum memmap_context context) 2788{ 2789 struct page *page; 2790 unsigned long end_pfn = start_pfn + size; 2791 unsigned long pfn; 2792 struct zone *z; 2793 2794 if (highest_memmap_pfn < end_pfn - 1) 2795 highest_memmap_pfn = end_pfn - 1; 2796 2797 z = &NODE_DATA(nid)->node_zones[zone]; 2798 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 2799 /* 2800 * There can be holes in boot-time mem_map[]s 2801 * handed to this function. They do not 2802 * exist on hotplugged memory. 2803 */ 2804 if (context == MEMMAP_EARLY) { 2805 if (!early_pfn_valid(pfn)) 2806 continue; 2807 if (!early_pfn_in_nid(pfn, nid)) 2808 continue; 2809 } 2810 page = pfn_to_page(pfn); 2811 set_page_links(page, zone, nid, pfn); 2812 mminit_verify_page_links(page, zone, nid, pfn); 2813 init_page_count(page); 2814 reset_page_mapcount(page); 2815 SetPageReserved(page); 2816 /* 2817 * Mark the block movable so that blocks are reserved for 2818 * movable at startup. This will force kernel allocations 2819 * to reserve their blocks rather than leaking throughout 2820 * the address space during boot when many long-lived 2821 * kernel allocations are made. Later some blocks near 2822 * the start are marked MIGRATE_RESERVE by 2823 * setup_zone_migrate_reserve() 2824 * 2825 * bitmap is created for zone's valid pfn range. but memmap 2826 * can be created for invalid pages (for alignment) 2827 * check here not to call set_pageblock_migratetype() against 2828 * pfn out of zone. 2829 */ 2830 if ((z->zone_start_pfn <= pfn) 2831 && (pfn < z->zone_start_pfn + z->spanned_pages) 2832 && !(pfn & (pageblock_nr_pages - 1))) 2833 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2834 2835 INIT_LIST_HEAD(&page->lru); 2836#ifdef WANT_PAGE_VIRTUAL 2837 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 2838 if (!is_highmem_idx(zone)) 2839 set_page_address(page, __va(pfn << PAGE_SHIFT)); 2840#endif 2841 } 2842} 2843 2844static void __meminit zone_init_free_lists(struct zone *zone) 2845{ 2846 int order, t; 2847 for_each_migratetype_order(order, t) { 2848 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 2849 zone->free_area[order].nr_free = 0; 2850 } 2851} 2852 2853#ifndef __HAVE_ARCH_MEMMAP_INIT 2854#define memmap_init(size, nid, zone, start_pfn) \ 2855 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 2856#endif 2857 2858static int zone_batchsize(struct zone *zone) 2859{ 2860#ifdef CONFIG_MMU 2861 int batch; 2862 2863 /* 2864 * The per-cpu-pages pools are set to around 1000th of the 2865 * size of the zone. But no more than 1/2 of a meg. 2866 * 2867 * OK, so we don't know how big the cache is. So guess. 2868 */ 2869 batch = zone->present_pages / 1024; 2870 if (batch * PAGE_SIZE > 512 * 1024) 2871 batch = (512 * 1024) / PAGE_SIZE; 2872 batch /= 4; /* We effectively *= 4 below */ 2873 if (batch < 1) 2874 batch = 1; 2875 2876 /* 2877 * Clamp the batch to a 2^n - 1 value. Having a power 2878 * of 2 value was found to be more likely to have 2879 * suboptimal cache aliasing properties in some cases. 2880 * 2881 * For example if 2 tasks are alternately allocating 2882 * batches of pages, one task can end up with a lot 2883 * of pages of one half of the possible page colors 2884 * and the other with pages of the other colors. 2885 */ 2886 batch = rounddown_pow_of_two(batch + batch/2) - 1; 2887 2888 return batch; 2889 2890#else 2891 /* The deferral and batching of frees should be suppressed under NOMMU 2892 * conditions. 2893 * 2894 * The problem is that NOMMU needs to be able to allocate large chunks 2895 * of contiguous memory as there's no hardware page translation to 2896 * assemble apparent contiguous memory from discontiguous pages. 2897 * 2898 * Queueing large contiguous runs of pages for batching, however, 2899 * causes the pages to actually be freed in smaller chunks. As there 2900 * can be a significant delay between the individual batches being 2901 * recycled, this leads to the once large chunks of space being 2902 * fragmented and becoming unavailable for high-order allocations. 2903 */ 2904 return 0; 2905#endif 2906} 2907 2908static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2909{ 2910 struct per_cpu_pages *pcp; 2911 2912 memset(p, 0, sizeof(*p)); 2913 2914 pcp = &p->pcp; 2915 pcp->count = 0; 2916 pcp->high = 6 * batch; 2917 pcp->batch = max(1UL, 1 * batch); 2918 INIT_LIST_HEAD(&pcp->list); 2919} 2920 2921/* 2922 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 2923 * to the value high for the pageset p. 2924 */ 2925 2926static void setup_pagelist_highmark(struct per_cpu_pageset *p, 2927 unsigned long high) 2928{ 2929 struct per_cpu_pages *pcp; 2930 2931 pcp = &p->pcp; 2932 pcp->high = high; 2933 pcp->batch = max(1UL, high/4); 2934 if ((high/4) > (PAGE_SHIFT * 8)) 2935 pcp->batch = PAGE_SHIFT * 8; 2936} 2937 2938 2939#ifdef CONFIG_NUMA 2940/* 2941 * Boot pageset table. One per cpu which is going to be used for all 2942 * zones and all nodes. The parameters will be set in such a way 2943 * that an item put on a list will immediately be handed over to 2944 * the buddy list. This is safe since pageset manipulation is done 2945 * with interrupts disabled. 2946 * 2947 * Some NUMA counter updates may also be caught by the boot pagesets. 2948 * 2949 * The boot_pagesets must be kept even after bootup is complete for 2950 * unused processors and/or zones. They do play a role for bootstrapping 2951 * hotplugged processors. 2952 * 2953 * zoneinfo_show() and maybe other functions do 2954 * not check if the processor is online before following the pageset pointer. 2955 * Other parts of the kernel may not check if the zone is available. 2956 */ 2957static struct per_cpu_pageset boot_pageset[NR_CPUS]; 2958 2959/* 2960 * Dynamically allocate memory for the 2961 * per cpu pageset array in struct zone. 2962 */ 2963static int __cpuinit process_zones(int cpu) 2964{ 2965 struct zone *zone, *dzone; 2966 int node = cpu_to_node(cpu); 2967 2968 node_set_state(node, N_CPU); /* this node has a cpu */ 2969 2970 for_each_populated_zone(zone) { 2971 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2972 GFP_KERNEL, node); 2973 if (!zone_pcp(zone, cpu)) 2974 goto bad; 2975 2976 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 2977 2978 if (percpu_pagelist_fraction) 2979 setup_pagelist_highmark(zone_pcp(zone, cpu), 2980 (zone->present_pages / percpu_pagelist_fraction)); 2981 } 2982 2983 return 0; 2984bad: 2985 for_each_zone(dzone) { 2986 if (!populated_zone(dzone)) 2987 continue; 2988 if (dzone == zone) 2989 break; 2990 kfree(zone_pcp(dzone, cpu)); 2991 zone_pcp(dzone, cpu) = NULL; 2992 } 2993 return -ENOMEM; 2994} 2995 2996static inline void free_zone_pagesets(int cpu) 2997{ 2998 struct zone *zone; 2999 3000 for_each_zone(zone) { 3001 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 3002 3003 /* Free per_cpu_pageset if it is slab allocated */ 3004 if (pset != &boot_pageset[cpu]) 3005 kfree(pset); 3006 zone_pcp(zone, cpu) = NULL; 3007 } 3008} 3009 3010static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 3011 unsigned long action, 3012 void *hcpu) 3013{ 3014 int cpu = (long)hcpu; 3015 int ret = NOTIFY_OK; 3016 3017 switch (action) { 3018 case CPU_UP_PREPARE: 3019 case CPU_UP_PREPARE_FROZEN: 3020 if (process_zones(cpu)) 3021 ret = NOTIFY_BAD; 3022 break; 3023 case CPU_UP_CANCELED: 3024 case CPU_UP_CANCELED_FROZEN: 3025 case CPU_DEAD: 3026 case CPU_DEAD_FROZEN: 3027 free_zone_pagesets(cpu); 3028 break; 3029 default: 3030 break; 3031 } 3032 return ret; 3033} 3034 3035static struct notifier_block __cpuinitdata pageset_notifier = 3036 { &pageset_cpuup_callback, NULL, 0 }; 3037 3038void __init setup_per_cpu_pageset(void) 3039{ 3040 int err; 3041 3042 /* Initialize per_cpu_pageset for cpu 0. 3043 * A cpuup callback will do this for every cpu 3044 * as it comes online 3045 */ 3046 err = process_zones(smp_processor_id()); 3047 BUG_ON(err); 3048 register_cpu_notifier(&pageset_notifier); 3049} 3050 3051#endif 3052 3053static noinline __init_refok 3054int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 3055{ 3056 int i; 3057 struct pglist_data *pgdat = zone->zone_pgdat; 3058 size_t alloc_size; 3059 3060 /* 3061 * The per-page waitqueue mechanism uses hashed waitqueues 3062 * per zone. 3063 */ 3064 zone->wait_table_hash_nr_entries = 3065 wait_table_hash_nr_entries(zone_size_pages); 3066 zone->wait_table_bits = 3067 wait_table_bits(zone->wait_table_hash_nr_entries); 3068 alloc_size = zone->wait_table_hash_nr_entries 3069 * sizeof(wait_queue_head_t); 3070 3071 if (!slab_is_available()) { 3072 zone->wait_table = (wait_queue_head_t *) 3073 alloc_bootmem_node(pgdat, alloc_size); 3074 } else { 3075 /* 3076 * This case means that a zone whose size was 0 gets new memory 3077 * via memory hot-add. 3078 * But it may be the case that a new node was hot-added. In 3079 * this case vmalloc() will not be able to use this new node's 3080 * memory - this wait_table must be initialized to use this new 3081 * node itself as well. 3082 * To use this new node's memory, further consideration will be 3083 * necessary. 3084 */ 3085 zone->wait_table = vmalloc(alloc_size); 3086 } 3087 if (!zone->wait_table) 3088 return -ENOMEM; 3089 3090 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 3091 init_waitqueue_head(zone->wait_table + i); 3092 3093 return 0; 3094} 3095 3096static __meminit void zone_pcp_init(struct zone *zone) 3097{ 3098 int cpu; 3099 unsigned long batch = zone_batchsize(zone); 3100 3101 for (cpu = 0; cpu < NR_CPUS; cpu++) { 3102#ifdef CONFIG_NUMA 3103 /* Early boot. Slab allocator not functional yet */ 3104 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 3105 setup_pageset(&boot_pageset[cpu],0); 3106#else 3107 setup_pageset(zone_pcp(zone,cpu), batch); 3108#endif 3109 } 3110 if (zone->present_pages) 3111 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 3112 zone->name, zone->present_pages, batch); 3113} 3114 3115__meminit int init_currently_empty_zone(struct zone *zone, 3116 unsigned long zone_start_pfn, 3117 unsigned long size, 3118 enum memmap_context context) 3119{ 3120 struct pglist_data *pgdat = zone->zone_pgdat; 3121 int ret; 3122 ret = zone_wait_table_init(zone, size); 3123 if (ret) 3124 return ret; 3125 pgdat->nr_zones = zone_idx(zone) + 1; 3126 3127 zone->zone_start_pfn = zone_start_pfn; 3128 3129 mminit_dprintk(MMINIT_TRACE, "memmap_init", 3130 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 3131 pgdat->node_id, 3132 (unsigned long)zone_idx(zone), 3133 zone_start_pfn, (zone_start_pfn + size)); 3134 3135 zone_init_free_lists(zone); 3136 3137 return 0; 3138} 3139 3140#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3141/* 3142 * Basic iterator support. Return the first range of PFNs for a node 3143 * Note: nid == MAX_NUMNODES returns first region regardless of node 3144 */ 3145static int __meminit first_active_region_index_in_nid(int nid) 3146{ 3147 int i; 3148 3149 for (i = 0; i < nr_nodemap_entries; i++) 3150 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 3151 return i; 3152 3153 return -1; 3154} 3155 3156/* 3157 * Basic iterator support. Return the next active range of PFNs for a node 3158 * Note: nid == MAX_NUMNODES returns next region regardless of node 3159 */ 3160static int __meminit next_active_region_index_in_nid(int index, int nid) 3161{ 3162 for (index = index + 1; index < nr_nodemap_entries; index++) 3163 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 3164 return index; 3165 3166 return -1; 3167} 3168 3169#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 3170/* 3171 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 3172 * Architectures may implement their own version but if add_active_range() 3173 * was used and there are no special requirements, this is a convenient 3174 * alternative 3175 */ 3176int __meminit __early_pfn_to_nid(unsigned long pfn) 3177{ 3178 int i; 3179 3180 for (i = 0; i < nr_nodemap_entries; i++) { 3181 unsigned long start_pfn = early_node_map[i].start_pfn; 3182 unsigned long end_pfn = early_node_map[i].end_pfn; 3183 3184 if (start_pfn <= pfn && pfn < end_pfn) 3185 return early_node_map[i].nid; 3186 } 3187 /* This is a memory hole */ 3188 return -1; 3189} 3190#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 3191 3192int __meminit early_pfn_to_nid(unsigned long pfn) 3193{ 3194 int nid; 3195 3196 nid = __early_pfn_to_nid(pfn); 3197 if (nid >= 0) 3198 return nid; 3199 /* just returns 0 */ 3200 return 0; 3201} 3202 3203#ifdef CONFIG_NODES_SPAN_OTHER_NODES 3204bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 3205{ 3206 int nid; 3207 3208 nid = __early_pfn_to_nid(pfn); 3209 if (nid >= 0 && nid != node) 3210 return false; 3211 return true; 3212} 3213#endif 3214 3215/* Basic iterator support to walk early_node_map[] */ 3216#define for_each_active_range_index_in_nid(i, nid) \ 3217 for (i = first_active_region_index_in_nid(nid); i != -1; \ 3218 i = next_active_region_index_in_nid(i, nid)) 3219 3220/** 3221 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 3222 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 3223 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 3224 * 3225 * If an architecture guarantees that all ranges registered with 3226 * add_active_ranges() contain no holes and may be freed, this 3227 * this function may be used instead of calling free_bootmem() manually. 3228 */ 3229void __init free_bootmem_with_active_regions(int nid, 3230 unsigned long max_low_pfn) 3231{ 3232 int i; 3233 3234 for_each_active_range_index_in_nid(i, nid) { 3235 unsigned long size_pages = 0; 3236 unsigned long end_pfn = early_node_map[i].end_pfn; 3237 3238 if (early_node_map[i].start_pfn >= max_low_pfn) 3239 continue; 3240 3241 if (end_pfn > max_low_pfn) 3242 end_pfn = max_low_pfn; 3243 3244 size_pages = end_pfn - early_node_map[i].start_pfn; 3245 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 3246 PFN_PHYS(early_node_map[i].start_pfn), 3247 size_pages << PAGE_SHIFT); 3248 } 3249} 3250 3251void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) 3252{ 3253 int i; 3254 int ret; 3255 3256 for_each_active_range_index_in_nid(i, nid) { 3257 ret = work_fn(early_node_map[i].start_pfn, 3258 early_node_map[i].end_pfn, data); 3259 if (ret) 3260 break; 3261 } 3262} 3263/** 3264 * sparse_memory_present_with_active_regions - Call memory_present for each active range 3265 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 3266 * 3267 * If an architecture guarantees that all ranges registered with 3268 * add_active_ranges() contain no holes and may be freed, this 3269 * function may be used instead of calling memory_present() manually. 3270 */ 3271void __init sparse_memory_present_with_active_regions(int nid) 3272{ 3273 int i; 3274 3275 for_each_active_range_index_in_nid(i, nid) 3276 memory_present(early_node_map[i].nid, 3277 early_node_map[i].start_pfn, 3278 early_node_map[i].end_pfn); 3279} 3280 3281/** 3282 * get_pfn_range_for_nid - Return the start and end page frames for a node 3283 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 3284 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 3285 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 3286 * 3287 * It returns the start and end page frame of a node based on information 3288 * provided by an arch calling add_active_range(). If called for a node 3289 * with no available memory, a warning is printed and the start and end 3290 * PFNs will be 0. 3291 */ 3292void __meminit get_pfn_range_for_nid(unsigned int nid, 3293 unsigned long *start_pfn, unsigned long *end_pfn) 3294{ 3295 int i; 3296 *start_pfn = -1UL; 3297 *end_pfn = 0; 3298 3299 for_each_active_range_index_in_nid(i, nid) { 3300 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 3301 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 3302 } 3303 3304 if (*start_pfn == -1UL) 3305 *start_pfn = 0; 3306} 3307 3308/* 3309 * This finds a zone that can be used for ZONE_MOVABLE pages. The 3310 * assumption is made that zones within a node are ordered in monotonic 3311 * increasing memory addresses so that the "highest" populated zone is used 3312 */ 3313static void __init find_usable_zone_for_movable(void) 3314{ 3315 int zone_index; 3316 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 3317 if (zone_index == ZONE_MOVABLE) 3318 continue; 3319 3320 if (arch_zone_highest_possible_pfn[zone_index] > 3321 arch_zone_lowest_possible_pfn[zone_index]) 3322 break; 3323 } 3324 3325 VM_BUG_ON(zone_index == -1); 3326 movable_zone = zone_index; 3327} 3328 3329/* 3330 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 3331 * because it is sized independant of architecture. Unlike the other zones, 3332 * the starting point for ZONE_MOVABLE is not fixed. It may be different 3333 * in each node depending on the size of each node and how evenly kernelcore 3334 * is distributed. This helper function adjusts the zone ranges 3335 * provided by the architecture for a given node by using the end of the 3336 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 3337 * zones within a node are in order of monotonic increases memory addresses 3338 */ 3339static void __meminit adjust_zone_range_for_zone_movable(int nid, 3340 unsigned long zone_type, 3341 unsigned long node_start_pfn, 3342 unsigned long node_end_pfn, 3343 unsigned long *zone_start_pfn, 3344 unsigned long *zone_end_pfn) 3345{ 3346 /* Only adjust if ZONE_MOVABLE is on this node */ 3347 if (zone_movable_pfn[nid]) { 3348 /* Size ZONE_MOVABLE */ 3349 if (zone_type == ZONE_MOVABLE) { 3350 *zone_start_pfn = zone_movable_pfn[nid]; 3351 *zone_end_pfn = min(node_end_pfn, 3352 arch_zone_highest_possible_pfn[movable_zone]); 3353 3354 /* Adjust for ZONE_MOVABLE starting within this range */ 3355 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 3356 *zone_end_pfn > zone_movable_pfn[nid]) { 3357 *zone_end_pfn = zone_movable_pfn[nid]; 3358 3359 /* Check if this whole range is within ZONE_MOVABLE */ 3360 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 3361 *zone_start_pfn = *zone_end_pfn; 3362 } 3363} 3364 3365/* 3366 * Return the number of pages a zone spans in a node, including holes 3367 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 3368 */ 3369static unsigned long __meminit zone_spanned_pages_in_node(int nid, 3370 unsigned long zone_type, 3371 unsigned long *ignored) 3372{ 3373 unsigned long node_start_pfn, node_end_pfn; 3374 unsigned long zone_start_pfn, zone_end_pfn; 3375 3376 /* Get the start and end of the node and zone */ 3377 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3378 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 3379 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 3380 adjust_zone_range_for_zone_movable(nid, zone_type, 3381 node_start_pfn, node_end_pfn, 3382 &zone_start_pfn, &zone_end_pfn); 3383 3384 /* Check that this node has pages within the zone's required range */ 3385 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 3386 return 0; 3387 3388 /* Move the zone boundaries inside the node if necessary */ 3389 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 3390 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 3391 3392 /* Return the spanned pages */ 3393 return zone_end_pfn - zone_start_pfn; 3394} 3395 3396/* 3397 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 3398 * then all holes in the requested range will be accounted for. 3399 */ 3400static unsigned long __meminit __absent_pages_in_range(int nid, 3401 unsigned long range_start_pfn, 3402 unsigned long range_end_pfn) 3403{ 3404 int i = 0; 3405 unsigned long prev_end_pfn = 0, hole_pages = 0; 3406 unsigned long start_pfn; 3407 3408 /* Find the end_pfn of the first active range of pfns in the node */ 3409 i = first_active_region_index_in_nid(nid); 3410 if (i == -1) 3411 return 0; 3412 3413 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3414 3415 /* Account for ranges before physical memory on this node */ 3416 if (early_node_map[i].start_pfn > range_start_pfn) 3417 hole_pages = prev_end_pfn - range_start_pfn; 3418 3419 /* Find all holes for the zone within the node */ 3420 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 3421 3422 /* No need to continue if prev_end_pfn is outside the zone */ 3423 if (prev_end_pfn >= range_end_pfn) 3424 break; 3425 3426 /* Make sure the end of the zone is not within the hole */ 3427 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3428 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 3429 3430 /* Update the hole size cound and move on */ 3431 if (start_pfn > range_start_pfn) { 3432 BUG_ON(prev_end_pfn > start_pfn); 3433 hole_pages += start_pfn - prev_end_pfn; 3434 } 3435 prev_end_pfn = early_node_map[i].end_pfn; 3436 } 3437 3438 /* Account for ranges past physical memory on this node */ 3439 if (range_end_pfn > prev_end_pfn) 3440 hole_pages += range_end_pfn - 3441 max(range_start_pfn, prev_end_pfn); 3442 3443 return hole_pages; 3444} 3445 3446/** 3447 * absent_pages_in_range - Return number of page frames in holes within a range 3448 * @start_pfn: The start PFN to start searching for holes 3449 * @end_pfn: The end PFN to stop searching for holes 3450 * 3451 * It returns the number of pages frames in memory holes within a range. 3452 */ 3453unsigned long __init absent_pages_in_range(unsigned long start_pfn, 3454 unsigned long end_pfn) 3455{ 3456 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 3457} 3458 3459/* Return the number of page frames in holes in a zone on a node */ 3460static unsigned long __meminit zone_absent_pages_in_node(int nid, 3461 unsigned long zone_type, 3462 unsigned long *ignored) 3463{ 3464 unsigned long node_start_pfn, node_end_pfn; 3465 unsigned long zone_start_pfn, zone_end_pfn; 3466 3467 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3468 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 3469 node_start_pfn); 3470 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 3471 node_end_pfn); 3472 3473 adjust_zone_range_for_zone_movable(nid, zone_type, 3474 node_start_pfn, node_end_pfn, 3475 &zone_start_pfn, &zone_end_pfn); 3476 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 3477} 3478 3479#else 3480static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 3481 unsigned long zone_type, 3482 unsigned long *zones_size) 3483{ 3484 return zones_size[zone_type]; 3485} 3486 3487static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 3488 unsigned long zone_type, 3489 unsigned long *zholes_size) 3490{ 3491 if (!zholes_size) 3492 return 0; 3493 3494 return zholes_size[zone_type]; 3495} 3496 3497#endif 3498 3499static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 3500 unsigned long *zones_size, unsigned long *zholes_size) 3501{ 3502 unsigned long realtotalpages, totalpages = 0; 3503 enum zone_type i; 3504 3505 for (i = 0; i < MAX_NR_ZONES; i++) 3506 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 3507 zones_size); 3508 pgdat->node_spanned_pages = totalpages; 3509 3510 realtotalpages = totalpages; 3511 for (i = 0; i < MAX_NR_ZONES; i++) 3512 realtotalpages -= 3513 zone_absent_pages_in_node(pgdat->node_id, i, 3514 zholes_size); 3515 pgdat->node_present_pages = realtotalpages; 3516 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 3517 realtotalpages); 3518} 3519 3520#ifndef CONFIG_SPARSEMEM 3521/* 3522 * Calculate the size of the zone->blockflags rounded to an unsigned long 3523 * Start by making sure zonesize is a multiple of pageblock_order by rounding 3524 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 3525 * round what is now in bits to nearest long in bits, then return it in 3526 * bytes. 3527 */ 3528static unsigned long __init usemap_size(unsigned long zonesize) 3529{ 3530 unsigned long usemapsize; 3531 3532 usemapsize = roundup(zonesize, pageblock_nr_pages); 3533 usemapsize = usemapsize >> pageblock_order; 3534 usemapsize *= NR_PAGEBLOCK_BITS; 3535 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 3536 3537 return usemapsize / 8; 3538} 3539 3540static void __init setup_usemap(struct pglist_data *pgdat, 3541 struct zone *zone, unsigned long zonesize) 3542{ 3543 unsigned long usemapsize = usemap_size(zonesize); 3544 zone->pageblock_flags = NULL; 3545 if (usemapsize) 3546 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 3547} 3548#else 3549static void inline setup_usemap(struct pglist_data *pgdat, 3550 struct zone *zone, unsigned long zonesize) {} 3551#endif /* CONFIG_SPARSEMEM */ 3552 3553#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 3554 3555/* Return a sensible default order for the pageblock size. */ 3556static inline int pageblock_default_order(void) 3557{ 3558 if (HPAGE_SHIFT > PAGE_SHIFT) 3559 return HUGETLB_PAGE_ORDER; 3560 3561 return MAX_ORDER-1; 3562} 3563 3564/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 3565static inline void __init set_pageblock_order(unsigned int order) 3566{ 3567 /* Check that pageblock_nr_pages has not already been setup */ 3568 if (pageblock_order) 3569 return; 3570 3571 /* 3572 * Assume the largest contiguous order of interest is a huge page. 3573 * This value may be variable depending on boot parameters on IA64 3574 */ 3575 pageblock_order = order; 3576} 3577#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3578 3579/* 3580 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 3581 * and pageblock_default_order() are unused as pageblock_order is set 3582 * at compile-time. See include/linux/pageblock-flags.h for the values of 3583 * pageblock_order based on the kernel config 3584 */ 3585static inline int pageblock_default_order(unsigned int order) 3586{ 3587 return MAX_ORDER-1; 3588} 3589#define set_pageblock_order(x) do {} while (0) 3590 3591#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3592 3593/* 3594 * Set up the zone data structures: 3595 * - mark all pages reserved 3596 * - mark all memory queues empty 3597 * - clear the memory bitmaps 3598 */ 3599static void __paginginit free_area_init_core(struct pglist_data *pgdat, 3600 unsigned long *zones_size, unsigned long *zholes_size) 3601{ 3602 enum zone_type j; 3603 int nid = pgdat->node_id; 3604 unsigned long zone_start_pfn = pgdat->node_start_pfn; 3605 int ret; 3606 3607 pgdat_resize_init(pgdat); 3608 pgdat->nr_zones = 0; 3609 init_waitqueue_head(&pgdat->kswapd_wait); 3610 pgdat->kswapd_max_order = 0; 3611 pgdat_page_cgroup_init(pgdat); 3612 3613 for (j = 0; j < MAX_NR_ZONES; j++) { 3614 struct zone *zone = pgdat->node_zones + j; 3615 unsigned long size, realsize, memmap_pages; 3616 enum lru_list l; 3617 3618 size = zone_spanned_pages_in_node(nid, j, zones_size); 3619 realsize = size - zone_absent_pages_in_node(nid, j, 3620 zholes_size); 3621 3622 /* 3623 * Adjust realsize so that it accounts for how much memory 3624 * is used by this zone for memmap. This affects the watermark 3625 * and per-cpu initialisations 3626 */ 3627 memmap_pages = 3628 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; 3629 if (realsize >= memmap_pages) { 3630 realsize -= memmap_pages; 3631 if (memmap_pages) 3632 printk(KERN_DEBUG 3633 " %s zone: %lu pages used for memmap\n", 3634 zone_names[j], memmap_pages); 3635 } else 3636 printk(KERN_WARNING 3637 " %s zone: %lu pages exceeds realsize %lu\n", 3638 zone_names[j], memmap_pages, realsize); 3639 3640 /* Account for reserved pages */ 3641 if (j == 0 && realsize > dma_reserve) { 3642 realsize -= dma_reserve; 3643 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 3644 zone_names[0], dma_reserve); 3645 } 3646 3647 if (!is_highmem_idx(j)) 3648 nr_kernel_pages += realsize; 3649 nr_all_pages += realsize; 3650 3651 zone->spanned_pages = size; 3652 zone->present_pages = realsize; 3653#ifdef CONFIG_NUMA 3654 zone->node = nid; 3655 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 3656 / 100; 3657 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 3658#endif 3659 zone->name = zone_names[j]; 3660 spin_lock_init(&zone->lock); 3661 spin_lock_init(&zone->lru_lock); 3662 zone_seqlock_init(zone); 3663 zone->zone_pgdat = pgdat; 3664 3665 zone->prev_priority = DEF_PRIORITY; 3666 3667 zone_pcp_init(zone); 3668 for_each_lru(l) { 3669 INIT_LIST_HEAD(&zone->lru[l].list); 3670 zone->lru[l].nr_saved_scan = 0; 3671 } 3672 zone->reclaim_stat.recent_rotated[0] = 0; 3673 zone->reclaim_stat.recent_rotated[1] = 0; 3674 zone->reclaim_stat.recent_scanned[0] = 0; 3675 zone->reclaim_stat.recent_scanned[1] = 0; 3676 zap_zone_vm_stats(zone); 3677 zone->flags = 0; 3678 if (!size) 3679 continue; 3680 3681 set_pageblock_order(pageblock_default_order()); 3682 setup_usemap(pgdat, zone, size); 3683 ret = init_currently_empty_zone(zone, zone_start_pfn, 3684 size, MEMMAP_EARLY); 3685 BUG_ON(ret); 3686 memmap_init(size, nid, j, zone_start_pfn); 3687 zone_start_pfn += size; 3688 } 3689} 3690 3691static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 3692{ 3693 /* Skip empty nodes */ 3694 if (!pgdat->node_spanned_pages) 3695 return; 3696 3697#ifdef CONFIG_FLAT_NODE_MEM_MAP 3698 /* ia64 gets its own node_mem_map, before this, without bootmem */ 3699 if (!pgdat->node_mem_map) { 3700 unsigned long size, start, end; 3701 struct page *map; 3702 3703 /* 3704 * The zone's endpoints aren't required to be MAX_ORDER 3705 * aligned but the node_mem_map endpoints must be in order 3706 * for the buddy allocator to function correctly. 3707 */ 3708 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 3709 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 3710 end = ALIGN(end, MAX_ORDER_NR_PAGES); 3711 size = (end - start) * sizeof(struct page); 3712 map = alloc_remap(pgdat->node_id, size); 3713 if (!map) 3714 map = alloc_bootmem_node(pgdat, size); 3715 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 3716 } 3717#ifndef CONFIG_NEED_MULTIPLE_NODES 3718 /* 3719 * With no DISCONTIG, the global mem_map is just set as node 0's 3720 */ 3721 if (pgdat == NODE_DATA(0)) { 3722 mem_map = NODE_DATA(0)->node_mem_map; 3723#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3724 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 3725 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 3726#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3727 } 3728#endif 3729#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 3730} 3731 3732void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 3733 unsigned long node_start_pfn, unsigned long *zholes_size) 3734{ 3735 pg_data_t *pgdat = NODE_DATA(nid); 3736 3737 pgdat->node_id = nid; 3738 pgdat->node_start_pfn = node_start_pfn; 3739 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3740 3741 alloc_node_mem_map(pgdat); 3742#ifdef CONFIG_FLAT_NODE_MEM_MAP 3743 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 3744 nid, (unsigned long)pgdat, 3745 (unsigned long)pgdat->node_mem_map); 3746#endif 3747 3748 free_area_init_core(pgdat, zones_size, zholes_size); 3749} 3750 3751#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3752 3753#if MAX_NUMNODES > 1 3754/* 3755 * Figure out the number of possible node ids. 3756 */ 3757static void __init setup_nr_node_ids(void) 3758{ 3759 unsigned int node; 3760 unsigned int highest = 0; 3761 3762 for_each_node_mask(node, node_possible_map) 3763 highest = node; 3764 nr_node_ids = highest + 1; 3765} 3766#else 3767static inline void setup_nr_node_ids(void) 3768{ 3769} 3770#endif 3771 3772/** 3773 * add_active_range - Register a range of PFNs backed by physical memory 3774 * @nid: The node ID the range resides on 3775 * @start_pfn: The start PFN of the available physical memory 3776 * @end_pfn: The end PFN of the available physical memory 3777 * 3778 * These ranges are stored in an early_node_map[] and later used by 3779 * free_area_init_nodes() to calculate zone sizes and holes. If the 3780 * range spans a memory hole, it is up to the architecture to ensure 3781 * the memory is not freed by the bootmem allocator. If possible 3782 * the range being registered will be merged with existing ranges. 3783 */ 3784void __init add_active_range(unsigned int nid, unsigned long start_pfn, 3785 unsigned long end_pfn) 3786{ 3787 int i; 3788 3789 mminit_dprintk(MMINIT_TRACE, "memory_register", 3790 "Entering add_active_range(%d, %#lx, %#lx) " 3791 "%d entries of %d used\n", 3792 nid, start_pfn, end_pfn, 3793 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3794 3795 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); 3796 3797 /* Merge with existing active regions if possible */ 3798 for (i = 0; i < nr_nodemap_entries; i++) { 3799 if (early_node_map[i].nid != nid) 3800 continue; 3801 3802 /* Skip if an existing region covers this new one */ 3803 if (start_pfn >= early_node_map[i].start_pfn && 3804 end_pfn <= early_node_map[i].end_pfn) 3805 return; 3806 3807 /* Merge forward if suitable */ 3808 if (start_pfn <= early_node_map[i].end_pfn && 3809 end_pfn > early_node_map[i].end_pfn) { 3810 early_node_map[i].end_pfn = end_pfn; 3811 return; 3812 } 3813 3814 /* Merge backward if suitable */ 3815 if (start_pfn < early_node_map[i].end_pfn && 3816 end_pfn >= early_node_map[i].start_pfn) { 3817 early_node_map[i].start_pfn = start_pfn; 3818 return; 3819 } 3820 } 3821 3822 /* Check that early_node_map is large enough */ 3823 if (i >= MAX_ACTIVE_REGIONS) { 3824 printk(KERN_CRIT "More than %d memory regions, truncating\n", 3825 MAX_ACTIVE_REGIONS); 3826 return; 3827 } 3828 3829 early_node_map[i].nid = nid; 3830 early_node_map[i].start_pfn = start_pfn; 3831 early_node_map[i].end_pfn = end_pfn; 3832 nr_nodemap_entries = i + 1; 3833} 3834 3835/** 3836 * remove_active_range - Shrink an existing registered range of PFNs 3837 * @nid: The node id the range is on that should be shrunk 3838 * @start_pfn: The new PFN of the range 3839 * @end_pfn: The new PFN of the range 3840 * 3841 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3842 * The map is kept near the end physical page range that has already been 3843 * registered. This function allows an arch to shrink an existing registered 3844 * range. 3845 */ 3846void __init remove_active_range(unsigned int nid, unsigned long start_pfn, 3847 unsigned long end_pfn) 3848{ 3849 int i, j; 3850 int removed = 0; 3851 3852 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n", 3853 nid, start_pfn, end_pfn); 3854 3855 /* Find the old active region end and shrink */ 3856 for_each_active_range_index_in_nid(i, nid) { 3857 if (early_node_map[i].start_pfn >= start_pfn && 3858 early_node_map[i].end_pfn <= end_pfn) { 3859 /* clear it */ 3860 early_node_map[i].start_pfn = 0; 3861 early_node_map[i].end_pfn = 0; 3862 removed = 1; 3863 continue; 3864 } 3865 if (early_node_map[i].start_pfn < start_pfn && 3866 early_node_map[i].end_pfn > start_pfn) { 3867 unsigned long temp_end_pfn = early_node_map[i].end_pfn; 3868 early_node_map[i].end_pfn = start_pfn; 3869 if (temp_end_pfn > end_pfn) 3870 add_active_range(nid, end_pfn, temp_end_pfn); 3871 continue; 3872 } 3873 if (early_node_map[i].start_pfn >= start_pfn && 3874 early_node_map[i].end_pfn > end_pfn && 3875 early_node_map[i].start_pfn < end_pfn) { 3876 early_node_map[i].start_pfn = end_pfn; 3877 continue; 3878 } 3879 } 3880 3881 if (!removed) 3882 return; 3883 3884 /* remove the blank ones */ 3885 for (i = nr_nodemap_entries - 1; i > 0; i--) { 3886 if (early_node_map[i].nid != nid) 3887 continue; 3888 if (early_node_map[i].end_pfn) 3889 continue; 3890 /* we found it, get rid of it */ 3891 for (j = i; j < nr_nodemap_entries - 1; j++) 3892 memcpy(&early_node_map[j], &early_node_map[j+1], 3893 sizeof(early_node_map[j])); 3894 j = nr_nodemap_entries - 1; 3895 memset(&early_node_map[j], 0, sizeof(early_node_map[j])); 3896 nr_nodemap_entries--; 3897 } 3898} 3899 3900/** 3901 * remove_all_active_ranges - Remove all currently registered regions 3902 * 3903 * During discovery, it may be found that a table like SRAT is invalid 3904 * and an alternative discovery method must be used. This function removes 3905 * all currently registered regions. 3906 */ 3907void __init remove_all_active_ranges(void) 3908{ 3909 memset(early_node_map, 0, sizeof(early_node_map)); 3910 nr_nodemap_entries = 0; 3911} 3912 3913/* Compare two active node_active_regions */ 3914static int __init cmp_node_active_region(const void *a, const void *b) 3915{ 3916 struct node_active_region *arange = (struct node_active_region *)a; 3917 struct node_active_region *brange = (struct node_active_region *)b; 3918 3919 /* Done this way to avoid overflows */ 3920 if (arange->start_pfn > brange->start_pfn) 3921 return 1; 3922 if (arange->start_pfn < brange->start_pfn) 3923 return -1; 3924 3925 return 0; 3926} 3927 3928/* sort the node_map by start_pfn */ 3929static void __init sort_node_map(void) 3930{ 3931 sort(early_node_map, (size_t)nr_nodemap_entries, 3932 sizeof(struct node_active_region), 3933 cmp_node_active_region, NULL); 3934} 3935 3936/* Find the lowest pfn for a node */ 3937static unsigned long __init find_min_pfn_for_node(int nid) 3938{ 3939 int i; 3940 unsigned long min_pfn = ULONG_MAX; 3941 3942 /* Assuming a sorted map, the first range found has the starting pfn */ 3943 for_each_active_range_index_in_nid(i, nid) 3944 min_pfn = min(min_pfn, early_node_map[i].start_pfn); 3945 3946 if (min_pfn == ULONG_MAX) { 3947 printk(KERN_WARNING 3948 "Could not find start_pfn for node %d\n", nid); 3949 return 0; 3950 } 3951 3952 return min_pfn; 3953} 3954 3955/** 3956 * find_min_pfn_with_active_regions - Find the minimum PFN registered 3957 * 3958 * It returns the minimum PFN based on information provided via 3959 * add_active_range(). 3960 */ 3961unsigned long __init find_min_pfn_with_active_regions(void) 3962{ 3963 return find_min_pfn_for_node(MAX_NUMNODES); 3964} 3965 3966/* 3967 * early_calculate_totalpages() 3968 * Sum pages in active regions for movable zone. 3969 * Populate N_HIGH_MEMORY for calculating usable_nodes. 3970 */ 3971static unsigned long __init early_calculate_totalpages(void) 3972{ 3973 int i; 3974 unsigned long totalpages = 0; 3975 3976 for (i = 0; i < nr_nodemap_entries; i++) { 3977 unsigned long pages = early_node_map[i].end_pfn - 3978 early_node_map[i].start_pfn; 3979 totalpages += pages; 3980 if (pages) 3981 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); 3982 } 3983 return totalpages; 3984} 3985 3986/* 3987 * Find the PFN the Movable zone begins in each node. Kernel memory 3988 * is spread evenly between nodes as long as the nodes have enough 3989 * memory. When they don't, some nodes will have more kernelcore than 3990 * others 3991 */ 3992static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) 3993{ 3994 int i, nid; 3995 unsigned long usable_startpfn; 3996 unsigned long kernelcore_node, kernelcore_remaining; 3997 unsigned long totalpages = early_calculate_totalpages(); 3998 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 3999 4000 /* 4001 * If movablecore was specified, calculate what size of 4002 * kernelcore that corresponds so that memory usable for 4003 * any allocation type is evenly spread. If both kernelcore 4004 * and movablecore are specified, then the value of kernelcore 4005 * will be used for required_kernelcore if it's greater than 4006 * what movablecore would have allowed. 4007 */ 4008 if (required_movablecore) { 4009 unsigned long corepages; 4010 4011 /* 4012 * Round-up so that ZONE_MOVABLE is at least as large as what 4013 * was requested by the user 4014 */ 4015 required_movablecore = 4016 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 4017 corepages = totalpages - required_movablecore; 4018 4019 required_kernelcore = max(required_kernelcore, corepages); 4020 } 4021 4022 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 4023 if (!required_kernelcore) 4024 return; 4025 4026 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 4027 find_usable_zone_for_movable(); 4028 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 4029 4030restart: 4031 /* Spread kernelcore memory as evenly as possible throughout nodes */ 4032 kernelcore_node = required_kernelcore / usable_nodes; 4033 for_each_node_state(nid, N_HIGH_MEMORY) { 4034 /* 4035 * Recalculate kernelcore_node if the division per node 4036 * now exceeds what is necessary to satisfy the requested 4037 * amount of memory for the kernel 4038 */ 4039 if (required_kernelcore < kernelcore_node) 4040 kernelcore_node = required_kernelcore / usable_nodes; 4041 4042 /* 4043 * As the map is walked, we track how much memory is usable 4044 * by the kernel using kernelcore_remaining. When it is 4045 * 0, the rest of the node is usable by ZONE_MOVABLE 4046 */ 4047 kernelcore_remaining = kernelcore_node; 4048 4049 /* Go through each range of PFNs within this node */ 4050 for_each_active_range_index_in_nid(i, nid) { 4051 unsigned long start_pfn, end_pfn; 4052 unsigned long size_pages; 4053 4054 start_pfn = max(early_node_map[i].start_pfn, 4055 zone_movable_pfn[nid]); 4056 end_pfn = early_node_map[i].end_pfn; 4057 if (start_pfn >= end_pfn) 4058 continue; 4059 4060 /* Account for what is only usable for kernelcore */ 4061 if (start_pfn < usable_startpfn) { 4062 unsigned long kernel_pages; 4063 kernel_pages = min(end_pfn, usable_startpfn) 4064 - start_pfn; 4065 4066 kernelcore_remaining -= min(kernel_pages, 4067 kernelcore_remaining); 4068 required_kernelcore -= min(kernel_pages, 4069 required_kernelcore); 4070 4071 /* Continue if range is now fully accounted */ 4072 if (end_pfn <= usable_startpfn) { 4073 4074 /* 4075 * Push zone_movable_pfn to the end so 4076 * that if we have to rebalance 4077 * kernelcore across nodes, we will 4078 * not double account here 4079 */ 4080 zone_movable_pfn[nid] = end_pfn; 4081 continue; 4082 } 4083 start_pfn = usable_startpfn; 4084 } 4085 4086 /* 4087 * The usable PFN range for ZONE_MOVABLE is from 4088 * start_pfn->end_pfn. Calculate size_pages as the 4089 * number of pages used as kernelcore 4090 */ 4091 size_pages = end_pfn - start_pfn; 4092 if (size_pages > kernelcore_remaining) 4093 size_pages = kernelcore_remaining; 4094 zone_movable_pfn[nid] = start_pfn + size_pages; 4095 4096 /* 4097 * Some kernelcore has been met, update counts and 4098 * break if the kernelcore for this node has been 4099 * satisified 4100 */ 4101 required_kernelcore -= min(required_kernelcore, 4102 size_pages); 4103 kernelcore_remaining -= size_pages; 4104 if (!kernelcore_remaining) 4105 break; 4106 } 4107 } 4108 4109 /* 4110 * If there is still required_kernelcore, we do another pass with one 4111 * less node in the count. This will push zone_movable_pfn[nid] further 4112 * along on the nodes that still have memory until kernelcore is 4113 * satisified 4114 */ 4115 usable_nodes--; 4116 if (usable_nodes && required_kernelcore > usable_nodes) 4117 goto restart; 4118 4119 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 4120 for (nid = 0; nid < MAX_NUMNODES; nid++) 4121 zone_movable_pfn[nid] = 4122 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 4123} 4124 4125/* Any regular memory on that node ? */ 4126static void check_for_regular_memory(pg_data_t *pgdat) 4127{ 4128#ifdef CONFIG_HIGHMEM 4129 enum zone_type zone_type; 4130 4131 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { 4132 struct zone *zone = &pgdat->node_zones[zone_type]; 4133 if (zone->present_pages) 4134 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); 4135 } 4136#endif 4137} 4138 4139/** 4140 * free_area_init_nodes - Initialise all pg_data_t and zone data 4141 * @max_zone_pfn: an array of max PFNs for each zone 4142 * 4143 * This will call free_area_init_node() for each active node in the system. 4144 * Using the page ranges provided by add_active_range(), the size of each 4145 * zone in each node and their holes is calculated. If the maximum PFN 4146 * between two adjacent zones match, it is assumed that the zone is empty. 4147 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 4148 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 4149 * starts where the previous one ended. For example, ZONE_DMA32 starts 4150 * at arch_max_dma_pfn. 4151 */ 4152void __init free_area_init_nodes(unsigned long *max_zone_pfn) 4153{ 4154 unsigned long nid; 4155 int i; 4156 4157 /* Sort early_node_map as initialisation assumes it is sorted */ 4158 sort_node_map(); 4159 4160 /* Record where the zone boundaries are */ 4161 memset(arch_zone_lowest_possible_pfn, 0, 4162 sizeof(arch_zone_lowest_possible_pfn)); 4163 memset(arch_zone_highest_possible_pfn, 0, 4164 sizeof(arch_zone_highest_possible_pfn)); 4165 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 4166 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 4167 for (i = 1; i < MAX_NR_ZONES; i++) { 4168 if (i == ZONE_MOVABLE) 4169 continue; 4170 arch_zone_lowest_possible_pfn[i] = 4171 arch_zone_highest_possible_pfn[i-1]; 4172 arch_zone_highest_possible_pfn[i] = 4173 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 4174 } 4175 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 4176 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 4177 4178 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 4179 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 4180 find_zone_movable_pfns_for_nodes(zone_movable_pfn); 4181 4182 /* Print out the zone ranges */ 4183 printk("Zone PFN ranges:\n"); 4184 for (i = 0; i < MAX_NR_ZONES; i++) { 4185 if (i == ZONE_MOVABLE) 4186 continue; 4187 printk(" %-8s %0#10lx -> %0#10lx\n", 4188 zone_names[i], 4189 arch_zone_lowest_possible_pfn[i], 4190 arch_zone_highest_possible_pfn[i]); 4191 } 4192 4193 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 4194 printk("Movable zone start PFN for each node\n"); 4195 for (i = 0; i < MAX_NUMNODES; i++) { 4196 if (zone_movable_pfn[i]) 4197 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); 4198 } 4199 4200 /* Print out the early_node_map[] */ 4201 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 4202 for (i = 0; i < nr_nodemap_entries; i++) 4203 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid, 4204 early_node_map[i].start_pfn, 4205 early_node_map[i].end_pfn); 4206 4207 /* Initialise every node */ 4208 mminit_verify_pageflags_layout(); 4209 setup_nr_node_ids(); 4210 for_each_online_node(nid) { 4211 pg_data_t *pgdat = NODE_DATA(nid); 4212 free_area_init_node(nid, NULL, 4213 find_min_pfn_for_node(nid), NULL); 4214 4215 /* Any memory on that node */ 4216 if (pgdat->node_present_pages) 4217 node_set_state(nid, N_HIGH_MEMORY); 4218 check_for_regular_memory(pgdat); 4219 } 4220} 4221 4222static int __init cmdline_parse_core(char *p, unsigned long *core) 4223{ 4224 unsigned long long coremem; 4225 if (!p) 4226 return -EINVAL; 4227 4228 coremem = memparse(p, &p); 4229 *core = coremem >> PAGE_SHIFT; 4230 4231 /* Paranoid check that UL is enough for the coremem value */ 4232 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 4233 4234 return 0; 4235} 4236 4237/* 4238 * kernelcore=size sets the amount of memory for use for allocations that 4239 * cannot be reclaimed or migrated. 4240 */ 4241static int __init cmdline_parse_kernelcore(char *p) 4242{ 4243 return cmdline_parse_core(p, &required_kernelcore); 4244} 4245 4246/* 4247 * movablecore=size sets the amount of memory for use for allocations that 4248 * can be reclaimed or migrated. 4249 */ 4250static int __init cmdline_parse_movablecore(char *p) 4251{ 4252 return cmdline_parse_core(p, &required_movablecore); 4253} 4254 4255early_param("kernelcore", cmdline_parse_kernelcore); 4256early_param("movablecore", cmdline_parse_movablecore); 4257 4258#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 4259 4260/** 4261 * set_dma_reserve - set the specified number of pages reserved in the first zone 4262 * @new_dma_reserve: The number of pages to mark reserved 4263 * 4264 * The per-cpu batchsize and zone watermarks are determined by present_pages. 4265 * In the DMA zone, a significant percentage may be consumed by kernel image 4266 * and other unfreeable allocations which can skew the watermarks badly. This 4267 * function may optionally be used to account for unfreeable pages in the 4268 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 4269 * smaller per-cpu batchsize. 4270 */ 4271void __init set_dma_reserve(unsigned long new_dma_reserve) 4272{ 4273 dma_reserve = new_dma_reserve; 4274} 4275 4276#ifndef CONFIG_NEED_MULTIPLE_NODES 4277struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] }; 4278EXPORT_SYMBOL(contig_page_data); 4279#endif 4280 4281void __init free_area_init(unsigned long *zones_size) 4282{ 4283 free_area_init_node(0, zones_size, 4284 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 4285} 4286 4287static int page_alloc_cpu_notify(struct notifier_block *self, 4288 unsigned long action, void *hcpu) 4289{ 4290 int cpu = (unsigned long)hcpu; 4291 4292 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 4293 drain_pages(cpu); 4294 4295 /* 4296 * Spill the event counters of the dead processor 4297 * into the current processors event counters. 4298 * This artificially elevates the count of the current 4299 * processor. 4300 */ 4301 vm_events_fold_cpu(cpu); 4302 4303 /* 4304 * Zero the differential counters of the dead processor 4305 * so that the vm statistics are consistent. 4306 * 4307 * This is only okay since the processor is dead and cannot 4308 * race with what we are doing. 4309 */ 4310 refresh_cpu_vm_stats(cpu); 4311 } 4312 return NOTIFY_OK; 4313} 4314 4315void __init page_alloc_init(void) 4316{ 4317 hotcpu_notifier(page_alloc_cpu_notify, 0); 4318} 4319 4320/* 4321 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 4322 * or min_free_kbytes changes. 4323 */ 4324static void calculate_totalreserve_pages(void) 4325{ 4326 struct pglist_data *pgdat; 4327 unsigned long reserve_pages = 0; 4328 enum zone_type i, j; 4329 4330 for_each_online_pgdat(pgdat) { 4331 for (i = 0; i < MAX_NR_ZONES; i++) { 4332 struct zone *zone = pgdat->node_zones + i; 4333 unsigned long max = 0; 4334 4335 /* Find valid and maximum lowmem_reserve in the zone */ 4336 for (j = i; j < MAX_NR_ZONES; j++) { 4337 if (zone->lowmem_reserve[j] > max) 4338 max = zone->lowmem_reserve[j]; 4339 } 4340 4341 /* we treat the high watermark as reserved pages. */ 4342 max += high_wmark_pages(zone); 4343 4344 if (max > zone->present_pages) 4345 max = zone->present_pages; 4346 reserve_pages += max; 4347 } 4348 } 4349 totalreserve_pages = reserve_pages; 4350} 4351 4352/* 4353 * setup_per_zone_lowmem_reserve - called whenever 4354 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 4355 * has a correct pages reserved value, so an adequate number of 4356 * pages are left in the zone after a successful __alloc_pages(). 4357 */ 4358static void setup_per_zone_lowmem_reserve(void) 4359{ 4360 struct pglist_data *pgdat; 4361 enum zone_type j, idx; 4362 4363 for_each_online_pgdat(pgdat) { 4364 for (j = 0; j < MAX_NR_ZONES; j++) { 4365 struct zone *zone = pgdat->node_zones + j; 4366 unsigned long present_pages = zone->present_pages; 4367 4368 zone->lowmem_reserve[j] = 0; 4369 4370 idx = j; 4371 while (idx) { 4372 struct zone *lower_zone; 4373 4374 idx--; 4375 4376 if (sysctl_lowmem_reserve_ratio[idx] < 1) 4377 sysctl_lowmem_reserve_ratio[idx] = 1; 4378 4379 lower_zone = pgdat->node_zones + idx; 4380 lower_zone->lowmem_reserve[j] = present_pages / 4381 sysctl_lowmem_reserve_ratio[idx]; 4382 present_pages += lower_zone->present_pages; 4383 } 4384 } 4385 } 4386 4387 /* update totalreserve_pages */ 4388 calculate_totalreserve_pages(); 4389} 4390 4391/** 4392 * setup_per_zone_wmarks - called when min_free_kbytes changes 4393 * or when memory is hot-{added|removed} 4394 * 4395 * Ensures that the watermark[min,low,high] values for each zone are set 4396 * correctly with respect to min_free_kbytes. 4397 */ 4398void setup_per_zone_wmarks(void) 4399{ 4400 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 4401 unsigned long lowmem_pages = 0; 4402 struct zone *zone; 4403 unsigned long flags; 4404 4405 /* Calculate total number of !ZONE_HIGHMEM pages */ 4406 for_each_zone(zone) { 4407 if (!is_highmem(zone)) 4408 lowmem_pages += zone->present_pages; 4409 } 4410 4411 for_each_zone(zone) { 4412 u64 tmp; 4413 4414 spin_lock_irqsave(&zone->lock, flags); 4415 tmp = (u64)pages_min * zone->present_pages; 4416 do_div(tmp, lowmem_pages); 4417 if (is_highmem(zone)) { 4418 /* 4419 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 4420 * need highmem pages, so cap pages_min to a small 4421 * value here. 4422 * 4423 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 4424 * deltas controls asynch page reclaim, and so should 4425 * not be capped for highmem. 4426 */ 4427 int min_pages; 4428 4429 min_pages = zone->present_pages / 1024; 4430 if (min_pages < SWAP_CLUSTER_MAX) 4431 min_pages = SWAP_CLUSTER_MAX; 4432 if (min_pages > 128) 4433 min_pages = 128; 4434 zone->watermark[WMARK_MIN] = min_pages; 4435 } else { 4436 /* 4437 * If it's a lowmem zone, reserve a number of pages 4438 * proportionate to the zone's size. 4439 */ 4440 zone->watermark[WMARK_MIN] = tmp; 4441 } 4442 4443 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 4444 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 4445 setup_zone_migrate_reserve(zone); 4446 spin_unlock_irqrestore(&zone->lock, flags); 4447 } 4448 4449 /* update totalreserve_pages */ 4450 calculate_totalreserve_pages(); 4451} 4452 4453/** 4454 * The inactive anon list should be small enough that the VM never has to 4455 * do too much work, but large enough that each inactive page has a chance 4456 * to be referenced again before it is swapped out. 4457 * 4458 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to 4459 * INACTIVE_ANON pages on this zone's LRU, maintained by the 4460 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of 4461 * the anonymous pages are kept on the inactive list. 4462 * 4463 * total target max 4464 * memory ratio inactive anon 4465 * ------------------------------------- 4466 * 10MB 1 5MB 4467 * 100MB 1 50MB 4468 * 1GB 3 250MB 4469 * 10GB 10 0.9GB 4470 * 100GB 31 3GB 4471 * 1TB 101 10GB 4472 * 10TB 320 32GB 4473 */ 4474void calculate_zone_inactive_ratio(struct zone *zone) 4475{ 4476 unsigned int gb, ratio; 4477 4478 /* Zone size in gigabytes */ 4479 gb = zone->present_pages >> (30 - PAGE_SHIFT); 4480 if (gb) 4481 ratio = int_sqrt(10 * gb); 4482 else 4483 ratio = 1; 4484 4485 zone->inactive_ratio = ratio; 4486} 4487 4488static void __init setup_per_zone_inactive_ratio(void) 4489{ 4490 struct zone *zone; 4491 4492 for_each_zone(zone) 4493 calculate_zone_inactive_ratio(zone); 4494} 4495 4496/* 4497 * Initialise min_free_kbytes. 4498 * 4499 * For small machines we want it small (128k min). For large machines 4500 * we want it large (64MB max). But it is not linear, because network 4501 * bandwidth does not increase linearly with machine size. We use 4502 * 4503 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 4504 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 4505 * 4506 * which yields 4507 * 4508 * 16MB: 512k 4509 * 32MB: 724k 4510 * 64MB: 1024k 4511 * 128MB: 1448k 4512 * 256MB: 2048k 4513 * 512MB: 2896k 4514 * 1024MB: 4096k 4515 * 2048MB: 5792k 4516 * 4096MB: 8192k 4517 * 8192MB: 11584k 4518 * 16384MB: 16384k 4519 */ 4520static int __init init_per_zone_wmark_min(void) 4521{ 4522 unsigned long lowmem_kbytes; 4523 4524 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 4525 4526 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 4527 if (min_free_kbytes < 128) 4528 min_free_kbytes = 128; 4529 if (min_free_kbytes > 65536) 4530 min_free_kbytes = 65536; 4531 setup_per_zone_wmarks(); 4532 setup_per_zone_lowmem_reserve(); 4533 setup_per_zone_inactive_ratio(); 4534 return 0; 4535} 4536module_init(init_per_zone_wmark_min) 4537 4538/* 4539 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 4540 * that we can call two helper functions whenever min_free_kbytes 4541 * changes. 4542 */ 4543int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 4544 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4545{ 4546 proc_dointvec(table, write, file, buffer, length, ppos); 4547 if (write) 4548 setup_per_zone_wmarks(); 4549 return 0; 4550} 4551 4552#ifdef CONFIG_NUMA 4553int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 4554 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4555{ 4556 struct zone *zone; 4557 int rc; 4558 4559 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4560 if (rc) 4561 return rc; 4562 4563 for_each_zone(zone) 4564 zone->min_unmapped_pages = (zone->present_pages * 4565 sysctl_min_unmapped_ratio) / 100; 4566 return 0; 4567} 4568 4569int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 4570 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4571{ 4572 struct zone *zone; 4573 int rc; 4574 4575 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4576 if (rc) 4577 return rc; 4578 4579 for_each_zone(zone) 4580 zone->min_slab_pages = (zone->present_pages * 4581 sysctl_min_slab_ratio) / 100; 4582 return 0; 4583} 4584#endif 4585 4586/* 4587 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 4588 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 4589 * whenever sysctl_lowmem_reserve_ratio changes. 4590 * 4591 * The reserve ratio obviously has absolutely no relation with the 4592 * minimum watermarks. The lowmem reserve ratio can only make sense 4593 * if in function of the boot time zone sizes. 4594 */ 4595int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 4596 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4597{ 4598 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4599 setup_per_zone_lowmem_reserve(); 4600 return 0; 4601} 4602 4603/* 4604 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 4605 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 4606 * can have before it gets flushed back to buddy allocator. 4607 */ 4608 4609int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 4610 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4611{ 4612 struct zone *zone; 4613 unsigned int cpu; 4614 int ret; 4615 4616 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4617 if (!write || (ret == -EINVAL)) 4618 return ret; 4619 for_each_zone(zone) { 4620 for_each_online_cpu(cpu) { 4621 unsigned long high; 4622 high = zone->present_pages / percpu_pagelist_fraction; 4623 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 4624 } 4625 } 4626 return 0; 4627} 4628 4629int hashdist = HASHDIST_DEFAULT; 4630 4631#ifdef CONFIG_NUMA 4632static int __init set_hashdist(char *str) 4633{ 4634 if (!str) 4635 return 0; 4636 hashdist = simple_strtoul(str, &str, 0); 4637 return 1; 4638} 4639__setup("hashdist=", set_hashdist); 4640#endif 4641 4642/* 4643 * allocate a large system hash table from bootmem 4644 * - it is assumed that the hash table must contain an exact power-of-2 4645 * quantity of entries 4646 * - limit is the number of hash buckets, not the total allocation size 4647 */ 4648void *__init alloc_large_system_hash(const char *tablename, 4649 unsigned long bucketsize, 4650 unsigned long numentries, 4651 int scale, 4652 int flags, 4653 unsigned int *_hash_shift, 4654 unsigned int *_hash_mask, 4655 unsigned long limit) 4656{ 4657 unsigned long long max = limit; 4658 unsigned long log2qty, size; 4659 void *table = NULL; 4660 4661 /* allow the kernel cmdline to have a say */ 4662 if (!numentries) { 4663 /* round applicable memory size up to nearest megabyte */ 4664 numentries = nr_kernel_pages; 4665 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 4666 numentries >>= 20 - PAGE_SHIFT; 4667 numentries <<= 20 - PAGE_SHIFT; 4668 4669 /* limit to 1 bucket per 2^scale bytes of low memory */ 4670 if (scale > PAGE_SHIFT) 4671 numentries >>= (scale - PAGE_SHIFT); 4672 else 4673 numentries <<= (PAGE_SHIFT - scale); 4674 4675 /* Make sure we've got at least a 0-order allocation.. */ 4676 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 4677 numentries = PAGE_SIZE / bucketsize; 4678 } 4679 numentries = roundup_pow_of_two(numentries); 4680 4681 /* limit allocation size to 1/16 total memory by default */ 4682 if (max == 0) { 4683 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 4684 do_div(max, bucketsize); 4685 } 4686 4687 if (numentries > max) 4688 numentries = max; 4689 4690 log2qty = ilog2(numentries); 4691 4692 do { 4693 size = bucketsize << log2qty; 4694 if (flags & HASH_EARLY) 4695 table = alloc_bootmem_nopanic(size); 4696 else if (hashdist) 4697 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 4698 else { 4699 /* 4700 * If bucketsize is not a power-of-two, we may free 4701 * some pages at the end of hash table which 4702 * alloc_pages_exact() automatically does 4703 */ 4704 if (get_order(size) < MAX_ORDER) 4705 table = alloc_pages_exact(size, GFP_ATOMIC); 4706 } 4707 } while (!table && size > PAGE_SIZE && --log2qty); 4708 4709 if (!table) 4710 panic("Failed to allocate %s hash table\n", tablename); 4711 4712 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", 4713 tablename, 4714 (1U << log2qty), 4715 ilog2(size) - PAGE_SHIFT, 4716 size); 4717 4718 if (_hash_shift) 4719 *_hash_shift = log2qty; 4720 if (_hash_mask) 4721 *_hash_mask = (1 << log2qty) - 1; 4722 4723 /* 4724 * If hashdist is set, the table allocation is done with __vmalloc() 4725 * which invokes the kmemleak_alloc() callback. This function may also 4726 * be called before the slab and kmemleak are initialised when 4727 * kmemleak simply buffers the request to be executed later 4728 * (GFP_ATOMIC flag ignored in this case). 4729 */ 4730 if (!hashdist) 4731 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 4732 4733 return table; 4734} 4735 4736/* Return a pointer to the bitmap storing bits affecting a block of pages */ 4737static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 4738 unsigned long pfn) 4739{ 4740#ifdef CONFIG_SPARSEMEM 4741 return __pfn_to_section(pfn)->pageblock_flags; 4742#else 4743 return zone->pageblock_flags; 4744#endif /* CONFIG_SPARSEMEM */ 4745} 4746 4747static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 4748{ 4749#ifdef CONFIG_SPARSEMEM 4750 pfn &= (PAGES_PER_SECTION-1); 4751 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4752#else 4753 pfn = pfn - zone->zone_start_pfn; 4754 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4755#endif /* CONFIG_SPARSEMEM */ 4756} 4757 4758/** 4759 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 4760 * @page: The page within the block of interest 4761 * @start_bitidx: The first bit of interest to retrieve 4762 * @end_bitidx: The last bit of interest 4763 * returns pageblock_bits flags 4764 */ 4765unsigned long get_pageblock_flags_group(struct page *page, 4766 int start_bitidx, int end_bitidx) 4767{ 4768 struct zone *zone; 4769 unsigned long *bitmap; 4770 unsigned long pfn, bitidx; 4771 unsigned long flags = 0; 4772 unsigned long value = 1; 4773 4774 zone = page_zone(page); 4775 pfn = page_to_pfn(page); 4776 bitmap = get_pageblock_bitmap(zone, pfn); 4777 bitidx = pfn_to_bitidx(zone, pfn); 4778 4779 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4780 if (test_bit(bitidx + start_bitidx, bitmap)) 4781 flags |= value; 4782 4783 return flags; 4784} 4785 4786/** 4787 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages 4788 * @page: The page within the block of interest 4789 * @start_bitidx: The first bit of interest 4790 * @end_bitidx: The last bit of interest 4791 * @flags: The flags to set 4792 */ 4793void set_pageblock_flags_group(struct page *page, unsigned long flags, 4794 int start_bitidx, int end_bitidx) 4795{ 4796 struct zone *zone; 4797 unsigned long *bitmap; 4798 unsigned long pfn, bitidx; 4799 unsigned long value = 1; 4800 4801 zone = page_zone(page); 4802 pfn = page_to_pfn(page); 4803 bitmap = get_pageblock_bitmap(zone, pfn); 4804 bitidx = pfn_to_bitidx(zone, pfn); 4805 VM_BUG_ON(pfn < zone->zone_start_pfn); 4806 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); 4807 4808 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4809 if (flags & value) 4810 __set_bit(bitidx + start_bitidx, bitmap); 4811 else 4812 __clear_bit(bitidx + start_bitidx, bitmap); 4813} 4814 4815/* 4816 * This is designed as sub function...plz see page_isolation.c also. 4817 * set/clear page block's type to be ISOLATE. 4818 * page allocater never alloc memory from ISOLATE block. 4819 */ 4820 4821int set_migratetype_isolate(struct page *page) 4822{ 4823 struct zone *zone; 4824 unsigned long flags; 4825 int ret = -EBUSY; 4826 4827 zone = page_zone(page); 4828 spin_lock_irqsave(&zone->lock, flags); 4829 /* 4830 * In future, more migrate types will be able to be isolation target. 4831 */ 4832 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 4833 goto out; 4834 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 4835 move_freepages_block(zone, page, MIGRATE_ISOLATE); 4836 ret = 0; 4837out: 4838 spin_unlock_irqrestore(&zone->lock, flags); 4839 if (!ret) 4840 drain_all_pages(); 4841 return ret; 4842} 4843 4844void unset_migratetype_isolate(struct page *page) 4845{ 4846 struct zone *zone; 4847 unsigned long flags; 4848 zone = page_zone(page); 4849 spin_lock_irqsave(&zone->lock, flags); 4850 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 4851 goto out; 4852 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4853 move_freepages_block(zone, page, MIGRATE_MOVABLE); 4854out: 4855 spin_unlock_irqrestore(&zone->lock, flags); 4856} 4857 4858#ifdef CONFIG_MEMORY_HOTREMOVE 4859/* 4860 * All pages in the range must be isolated before calling this. 4861 */ 4862void 4863__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 4864{ 4865 struct page *page; 4866 struct zone *zone; 4867 int order, i; 4868 unsigned long pfn; 4869 unsigned long flags; 4870 /* find the first valid pfn */ 4871 for (pfn = start_pfn; pfn < end_pfn; pfn++) 4872 if (pfn_valid(pfn)) 4873 break; 4874 if (pfn == end_pfn) 4875 return; 4876 zone = page_zone(pfn_to_page(pfn)); 4877 spin_lock_irqsave(&zone->lock, flags); 4878 pfn = start_pfn; 4879 while (pfn < end_pfn) { 4880 if (!pfn_valid(pfn)) { 4881 pfn++; 4882 continue; 4883 } 4884 page = pfn_to_page(pfn); 4885 BUG_ON(page_count(page)); 4886 BUG_ON(!PageBuddy(page)); 4887 order = page_order(page); 4888#ifdef CONFIG_DEBUG_VM 4889 printk(KERN_INFO "remove from free list %lx %d %lx\n", 4890 pfn, 1 << order, end_pfn); 4891#endif 4892 list_del(&page->lru); 4893 rmv_page_order(page); 4894 zone->free_area[order].nr_free--; 4895 __mod_zone_page_state(zone, NR_FREE_PAGES, 4896 - (1UL << order)); 4897 for (i = 0; i < (1 << order); i++) 4898 SetPageReserved((page+i)); 4899 pfn += (1 << order); 4900 } 4901 spin_unlock_irqrestore(&zone->lock, flags); 4902} 4903#endif 4904