page_alloc.c revision 11e33f6a55ed7847d9c8ffe185ef87faf7806abe
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/jiffies.h> 23#include <linux/bootmem.h> 24#include <linux/compiler.h> 25#include <linux/kernel.h> 26#include <linux/module.h> 27#include <linux/suspend.h> 28#include <linux/pagevec.h> 29#include <linux/blkdev.h> 30#include <linux/slab.h> 31#include <linux/oom.h> 32#include <linux/notifier.h> 33#include <linux/topology.h> 34#include <linux/sysctl.h> 35#include <linux/cpu.h> 36#include <linux/cpuset.h> 37#include <linux/memory_hotplug.h> 38#include <linux/nodemask.h> 39#include <linux/vmalloc.h> 40#include <linux/mempolicy.h> 41#include <linux/stop_machine.h> 42#include <linux/sort.h> 43#include <linux/pfn.h> 44#include <linux/backing-dev.h> 45#include <linux/fault-inject.h> 46#include <linux/page-isolation.h> 47#include <linux/page_cgroup.h> 48#include <linux/debugobjects.h> 49#include <linux/kmemleak.h> 50 51#include <asm/tlbflush.h> 52#include <asm/div64.h> 53#include "internal.h" 54 55/* 56 * Array of node states. 57 */ 58nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 59 [N_POSSIBLE] = NODE_MASK_ALL, 60 [N_ONLINE] = { { [0] = 1UL } }, 61#ifndef CONFIG_NUMA 62 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 63#ifdef CONFIG_HIGHMEM 64 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 65#endif 66 [N_CPU] = { { [0] = 1UL } }, 67#endif /* NUMA */ 68}; 69EXPORT_SYMBOL(node_states); 70 71unsigned long totalram_pages __read_mostly; 72unsigned long totalreserve_pages __read_mostly; 73unsigned long highest_memmap_pfn __read_mostly; 74int percpu_pagelist_fraction; 75 76#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 77int pageblock_order __read_mostly; 78#endif 79 80static void __free_pages_ok(struct page *page, unsigned int order); 81 82/* 83 * results with 256, 32 in the lowmem_reserve sysctl: 84 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 85 * 1G machine -> (16M dma, 784M normal, 224M high) 86 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 87 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 88 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 89 * 90 * TBD: should special case ZONE_DMA32 machines here - in those we normally 91 * don't need any ZONE_NORMAL reservation 92 */ 93int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 94#ifdef CONFIG_ZONE_DMA 95 256, 96#endif 97#ifdef CONFIG_ZONE_DMA32 98 256, 99#endif 100#ifdef CONFIG_HIGHMEM 101 32, 102#endif 103 32, 104}; 105 106EXPORT_SYMBOL(totalram_pages); 107 108static char * const zone_names[MAX_NR_ZONES] = { 109#ifdef CONFIG_ZONE_DMA 110 "DMA", 111#endif 112#ifdef CONFIG_ZONE_DMA32 113 "DMA32", 114#endif 115 "Normal", 116#ifdef CONFIG_HIGHMEM 117 "HighMem", 118#endif 119 "Movable", 120}; 121 122int min_free_kbytes = 1024; 123 124unsigned long __meminitdata nr_kernel_pages; 125unsigned long __meminitdata nr_all_pages; 126static unsigned long __meminitdata dma_reserve; 127 128#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 129 /* 130 * MAX_ACTIVE_REGIONS determines the maximum number of distinct 131 * ranges of memory (RAM) that may be registered with add_active_range(). 132 * Ranges passed to add_active_range() will be merged if possible 133 * so the number of times add_active_range() can be called is 134 * related to the number of nodes and the number of holes 135 */ 136 #ifdef CONFIG_MAX_ACTIVE_REGIONS 137 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 138 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 139 #else 140 #if MAX_NUMNODES >= 32 141 /* If there can be many nodes, allow up to 50 holes per node */ 142 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 143 #else 144 /* By default, allow up to 256 distinct regions */ 145 #define MAX_ACTIVE_REGIONS 256 146 #endif 147 #endif 148 149 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; 150 static int __meminitdata nr_nodemap_entries; 151 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 152 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 153 static unsigned long __initdata required_kernelcore; 154 static unsigned long __initdata required_movablecore; 155 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 156 157 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 158 int movable_zone; 159 EXPORT_SYMBOL(movable_zone); 160#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 161 162#if MAX_NUMNODES > 1 163int nr_node_ids __read_mostly = MAX_NUMNODES; 164EXPORT_SYMBOL(nr_node_ids); 165#endif 166 167int page_group_by_mobility_disabled __read_mostly; 168 169static void set_pageblock_migratetype(struct page *page, int migratetype) 170{ 171 set_pageblock_flags_group(page, (unsigned long)migratetype, 172 PB_migrate, PB_migrate_end); 173} 174 175#ifdef CONFIG_DEBUG_VM 176static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 177{ 178 int ret = 0; 179 unsigned seq; 180 unsigned long pfn = page_to_pfn(page); 181 182 do { 183 seq = zone_span_seqbegin(zone); 184 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 185 ret = 1; 186 else if (pfn < zone->zone_start_pfn) 187 ret = 1; 188 } while (zone_span_seqretry(zone, seq)); 189 190 return ret; 191} 192 193static int page_is_consistent(struct zone *zone, struct page *page) 194{ 195 if (!pfn_valid_within(page_to_pfn(page))) 196 return 0; 197 if (zone != page_zone(page)) 198 return 0; 199 200 return 1; 201} 202/* 203 * Temporary debugging check for pages not lying within a given zone. 204 */ 205static int bad_range(struct zone *zone, struct page *page) 206{ 207 if (page_outside_zone_boundaries(zone, page)) 208 return 1; 209 if (!page_is_consistent(zone, page)) 210 return 1; 211 212 return 0; 213} 214#else 215static inline int bad_range(struct zone *zone, struct page *page) 216{ 217 return 0; 218} 219#endif 220 221static void bad_page(struct page *page) 222{ 223 static unsigned long resume; 224 static unsigned long nr_shown; 225 static unsigned long nr_unshown; 226 227 /* 228 * Allow a burst of 60 reports, then keep quiet for that minute; 229 * or allow a steady drip of one report per second. 230 */ 231 if (nr_shown == 60) { 232 if (time_before(jiffies, resume)) { 233 nr_unshown++; 234 goto out; 235 } 236 if (nr_unshown) { 237 printk(KERN_ALERT 238 "BUG: Bad page state: %lu messages suppressed\n", 239 nr_unshown); 240 nr_unshown = 0; 241 } 242 nr_shown = 0; 243 } 244 if (nr_shown++ == 0) 245 resume = jiffies + 60 * HZ; 246 247 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 248 current->comm, page_to_pfn(page)); 249 printk(KERN_ALERT 250 "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", 251 page, (void *)page->flags, page_count(page), 252 page_mapcount(page), page->mapping, page->index); 253 254 dump_stack(); 255out: 256 /* Leave bad fields for debug, except PageBuddy could make trouble */ 257 __ClearPageBuddy(page); 258 add_taint(TAINT_BAD_PAGE); 259} 260 261/* 262 * Higher-order pages are called "compound pages". They are structured thusly: 263 * 264 * The first PAGE_SIZE page is called the "head page". 265 * 266 * The remaining PAGE_SIZE pages are called "tail pages". 267 * 268 * All pages have PG_compound set. All pages have their ->private pointing at 269 * the head page (even the head page has this). 270 * 271 * The first tail page's ->lru.next holds the address of the compound page's 272 * put_page() function. Its ->lru.prev holds the order of allocation. 273 * This usage means that zero-order pages may not be compound. 274 */ 275 276static void free_compound_page(struct page *page) 277{ 278 __free_pages_ok(page, compound_order(page)); 279} 280 281void prep_compound_page(struct page *page, unsigned long order) 282{ 283 int i; 284 int nr_pages = 1 << order; 285 286 set_compound_page_dtor(page, free_compound_page); 287 set_compound_order(page, order); 288 __SetPageHead(page); 289 for (i = 1; i < nr_pages; i++) { 290 struct page *p = page + i; 291 292 __SetPageTail(p); 293 p->first_page = page; 294 } 295} 296 297#ifdef CONFIG_HUGETLBFS 298void prep_compound_gigantic_page(struct page *page, unsigned long order) 299{ 300 int i; 301 int nr_pages = 1 << order; 302 struct page *p = page + 1; 303 304 set_compound_page_dtor(page, free_compound_page); 305 set_compound_order(page, order); 306 __SetPageHead(page); 307 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 308 __SetPageTail(p); 309 p->first_page = page; 310 } 311} 312#endif 313 314static int destroy_compound_page(struct page *page, unsigned long order) 315{ 316 int i; 317 int nr_pages = 1 << order; 318 int bad = 0; 319 320 if (unlikely(compound_order(page) != order) || 321 unlikely(!PageHead(page))) { 322 bad_page(page); 323 bad++; 324 } 325 326 __ClearPageHead(page); 327 328 for (i = 1; i < nr_pages; i++) { 329 struct page *p = page + i; 330 331 if (unlikely(!PageTail(p) || (p->first_page != page))) { 332 bad_page(page); 333 bad++; 334 } 335 __ClearPageTail(p); 336 } 337 338 return bad; 339} 340 341static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 342{ 343 int i; 344 345 /* 346 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 347 * and __GFP_HIGHMEM from hard or soft interrupt context. 348 */ 349 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 350 for (i = 0; i < (1 << order); i++) 351 clear_highpage(page + i); 352} 353 354static inline void set_page_order(struct page *page, int order) 355{ 356 set_page_private(page, order); 357 __SetPageBuddy(page); 358} 359 360static inline void rmv_page_order(struct page *page) 361{ 362 __ClearPageBuddy(page); 363 set_page_private(page, 0); 364} 365 366/* 367 * Locate the struct page for both the matching buddy in our 368 * pair (buddy1) and the combined O(n+1) page they form (page). 369 * 370 * 1) Any buddy B1 will have an order O twin B2 which satisfies 371 * the following equation: 372 * B2 = B1 ^ (1 << O) 373 * For example, if the starting buddy (buddy2) is #8 its order 374 * 1 buddy is #10: 375 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 376 * 377 * 2) Any buddy B will have an order O+1 parent P which 378 * satisfies the following equation: 379 * P = B & ~(1 << O) 380 * 381 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 382 */ 383static inline struct page * 384__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 385{ 386 unsigned long buddy_idx = page_idx ^ (1 << order); 387 388 return page + (buddy_idx - page_idx); 389} 390 391static inline unsigned long 392__find_combined_index(unsigned long page_idx, unsigned int order) 393{ 394 return (page_idx & ~(1 << order)); 395} 396 397/* 398 * This function checks whether a page is free && is the buddy 399 * we can do coalesce a page and its buddy if 400 * (a) the buddy is not in a hole && 401 * (b) the buddy is in the buddy system && 402 * (c) a page and its buddy have the same order && 403 * (d) a page and its buddy are in the same zone. 404 * 405 * For recording whether a page is in the buddy system, we use PG_buddy. 406 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 407 * 408 * For recording page's order, we use page_private(page). 409 */ 410static inline int page_is_buddy(struct page *page, struct page *buddy, 411 int order) 412{ 413 if (!pfn_valid_within(page_to_pfn(buddy))) 414 return 0; 415 416 if (page_zone_id(page) != page_zone_id(buddy)) 417 return 0; 418 419 if (PageBuddy(buddy) && page_order(buddy) == order) { 420 BUG_ON(page_count(buddy) != 0); 421 return 1; 422 } 423 return 0; 424} 425 426/* 427 * Freeing function for a buddy system allocator. 428 * 429 * The concept of a buddy system is to maintain direct-mapped table 430 * (containing bit values) for memory blocks of various "orders". 431 * The bottom level table contains the map for the smallest allocatable 432 * units of memory (here, pages), and each level above it describes 433 * pairs of units from the levels below, hence, "buddies". 434 * At a high level, all that happens here is marking the table entry 435 * at the bottom level available, and propagating the changes upward 436 * as necessary, plus some accounting needed to play nicely with other 437 * parts of the VM system. 438 * At each level, we keep a list of pages, which are heads of continuous 439 * free pages of length of (1 << order) and marked with PG_buddy. Page's 440 * order is recorded in page_private(page) field. 441 * So when we are allocating or freeing one, we can derive the state of the 442 * other. That is, if we allocate a small block, and both were 443 * free, the remainder of the region must be split into blocks. 444 * If a block is freed, and its buddy is also free, then this 445 * triggers coalescing into a block of larger size. 446 * 447 * -- wli 448 */ 449 450static inline void __free_one_page(struct page *page, 451 struct zone *zone, unsigned int order) 452{ 453 unsigned long page_idx; 454 int order_size = 1 << order; 455 int migratetype = get_pageblock_migratetype(page); 456 457 if (unlikely(PageCompound(page))) 458 if (unlikely(destroy_compound_page(page, order))) 459 return; 460 461 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 462 463 VM_BUG_ON(page_idx & (order_size - 1)); 464 VM_BUG_ON(bad_range(zone, page)); 465 466 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); 467 while (order < MAX_ORDER-1) { 468 unsigned long combined_idx; 469 struct page *buddy; 470 471 buddy = __page_find_buddy(page, page_idx, order); 472 if (!page_is_buddy(page, buddy, order)) 473 break; 474 475 /* Our buddy is free, merge with it and move up one order. */ 476 list_del(&buddy->lru); 477 zone->free_area[order].nr_free--; 478 rmv_page_order(buddy); 479 combined_idx = __find_combined_index(page_idx, order); 480 page = page + (combined_idx - page_idx); 481 page_idx = combined_idx; 482 order++; 483 } 484 set_page_order(page, order); 485 list_add(&page->lru, 486 &zone->free_area[order].free_list[migratetype]); 487 zone->free_area[order].nr_free++; 488} 489 490static inline int free_pages_check(struct page *page) 491{ 492 free_page_mlock(page); 493 if (unlikely(page_mapcount(page) | 494 (page->mapping != NULL) | 495 (page_count(page) != 0) | 496 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) { 497 bad_page(page); 498 return 1; 499 } 500 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 501 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 502 return 0; 503} 504 505/* 506 * Frees a list of pages. 507 * Assumes all pages on list are in same zone, and of same order. 508 * count is the number of pages to free. 509 * 510 * If the zone was previously in an "all pages pinned" state then look to 511 * see if this freeing clears that state. 512 * 513 * And clear the zone's pages_scanned counter, to hold off the "all pages are 514 * pinned" detection logic. 515 */ 516static void free_pages_bulk(struct zone *zone, int count, 517 struct list_head *list, int order) 518{ 519 spin_lock(&zone->lock); 520 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 521 zone->pages_scanned = 0; 522 while (count--) { 523 struct page *page; 524 525 VM_BUG_ON(list_empty(list)); 526 page = list_entry(list->prev, struct page, lru); 527 /* have to delete it as __free_one_page list manipulates */ 528 list_del(&page->lru); 529 __free_one_page(page, zone, order); 530 } 531 spin_unlock(&zone->lock); 532} 533 534static void free_one_page(struct zone *zone, struct page *page, int order) 535{ 536 spin_lock(&zone->lock); 537 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 538 zone->pages_scanned = 0; 539 __free_one_page(page, zone, order); 540 spin_unlock(&zone->lock); 541} 542 543static void __free_pages_ok(struct page *page, unsigned int order) 544{ 545 unsigned long flags; 546 int i; 547 int bad = 0; 548 549 for (i = 0 ; i < (1 << order) ; ++i) 550 bad += free_pages_check(page + i); 551 if (bad) 552 return; 553 554 if (!PageHighMem(page)) { 555 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 556 debug_check_no_obj_freed(page_address(page), 557 PAGE_SIZE << order); 558 } 559 arch_free_page(page, order); 560 kernel_map_pages(page, 1 << order, 0); 561 562 local_irq_save(flags); 563 __count_vm_events(PGFREE, 1 << order); 564 free_one_page(page_zone(page), page, order); 565 local_irq_restore(flags); 566} 567 568/* 569 * permit the bootmem allocator to evade page validation on high-order frees 570 */ 571void __meminit __free_pages_bootmem(struct page *page, unsigned int order) 572{ 573 if (order == 0) { 574 __ClearPageReserved(page); 575 set_page_count(page, 0); 576 set_page_refcounted(page); 577 __free_page(page); 578 } else { 579 int loop; 580 581 prefetchw(page); 582 for (loop = 0; loop < BITS_PER_LONG; loop++) { 583 struct page *p = &page[loop]; 584 585 if (loop + 1 < BITS_PER_LONG) 586 prefetchw(p + 1); 587 __ClearPageReserved(p); 588 set_page_count(p, 0); 589 } 590 591 set_page_refcounted(page); 592 __free_pages(page, order); 593 } 594} 595 596 597/* 598 * The order of subdivision here is critical for the IO subsystem. 599 * Please do not alter this order without good reasons and regression 600 * testing. Specifically, as large blocks of memory are subdivided, 601 * the order in which smaller blocks are delivered depends on the order 602 * they're subdivided in this function. This is the primary factor 603 * influencing the order in which pages are delivered to the IO 604 * subsystem according to empirical testing, and this is also justified 605 * by considering the behavior of a buddy system containing a single 606 * large block of memory acted on by a series of small allocations. 607 * This behavior is a critical factor in sglist merging's success. 608 * 609 * -- wli 610 */ 611static inline void expand(struct zone *zone, struct page *page, 612 int low, int high, struct free_area *area, 613 int migratetype) 614{ 615 unsigned long size = 1 << high; 616 617 while (high > low) { 618 area--; 619 high--; 620 size >>= 1; 621 VM_BUG_ON(bad_range(zone, &page[size])); 622 list_add(&page[size].lru, &area->free_list[migratetype]); 623 area->nr_free++; 624 set_page_order(&page[size], high); 625 } 626} 627 628/* 629 * This page is about to be returned from the page allocator 630 */ 631static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 632{ 633 if (unlikely(page_mapcount(page) | 634 (page->mapping != NULL) | 635 (page_count(page) != 0) | 636 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) { 637 bad_page(page); 638 return 1; 639 } 640 641 set_page_private(page, 0); 642 set_page_refcounted(page); 643 644 arch_alloc_page(page, order); 645 kernel_map_pages(page, 1 << order, 1); 646 647 if (gfp_flags & __GFP_ZERO) 648 prep_zero_page(page, order, gfp_flags); 649 650 if (order && (gfp_flags & __GFP_COMP)) 651 prep_compound_page(page, order); 652 653 return 0; 654} 655 656/* 657 * Go through the free lists for the given migratetype and remove 658 * the smallest available page from the freelists 659 */ 660static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 661 int migratetype) 662{ 663 unsigned int current_order; 664 struct free_area * area; 665 struct page *page; 666 667 /* Find a page of the appropriate size in the preferred list */ 668 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 669 area = &(zone->free_area[current_order]); 670 if (list_empty(&area->free_list[migratetype])) 671 continue; 672 673 page = list_entry(area->free_list[migratetype].next, 674 struct page, lru); 675 list_del(&page->lru); 676 rmv_page_order(page); 677 area->nr_free--; 678 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); 679 expand(zone, page, order, current_order, area, migratetype); 680 return page; 681 } 682 683 return NULL; 684} 685 686 687/* 688 * This array describes the order lists are fallen back to when 689 * the free lists for the desirable migrate type are depleted 690 */ 691static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 692 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 693 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 694 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 695 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ 696}; 697 698/* 699 * Move the free pages in a range to the free lists of the requested type. 700 * Note that start_page and end_pages are not aligned on a pageblock 701 * boundary. If alignment is required, use move_freepages_block() 702 */ 703static int move_freepages(struct zone *zone, 704 struct page *start_page, struct page *end_page, 705 int migratetype) 706{ 707 struct page *page; 708 unsigned long order; 709 int pages_moved = 0; 710 711#ifndef CONFIG_HOLES_IN_ZONE 712 /* 713 * page_zone is not safe to call in this context when 714 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 715 * anyway as we check zone boundaries in move_freepages_block(). 716 * Remove at a later date when no bug reports exist related to 717 * grouping pages by mobility 718 */ 719 BUG_ON(page_zone(start_page) != page_zone(end_page)); 720#endif 721 722 for (page = start_page; page <= end_page;) { 723 /* Make sure we are not inadvertently changing nodes */ 724 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); 725 726 if (!pfn_valid_within(page_to_pfn(page))) { 727 page++; 728 continue; 729 } 730 731 if (!PageBuddy(page)) { 732 page++; 733 continue; 734 } 735 736 order = page_order(page); 737 list_del(&page->lru); 738 list_add(&page->lru, 739 &zone->free_area[order].free_list[migratetype]); 740 page += 1 << order; 741 pages_moved += 1 << order; 742 } 743 744 return pages_moved; 745} 746 747static int move_freepages_block(struct zone *zone, struct page *page, 748 int migratetype) 749{ 750 unsigned long start_pfn, end_pfn; 751 struct page *start_page, *end_page; 752 753 start_pfn = page_to_pfn(page); 754 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 755 start_page = pfn_to_page(start_pfn); 756 end_page = start_page + pageblock_nr_pages - 1; 757 end_pfn = start_pfn + pageblock_nr_pages - 1; 758 759 /* Do not cross zone boundaries */ 760 if (start_pfn < zone->zone_start_pfn) 761 start_page = page; 762 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) 763 return 0; 764 765 return move_freepages(zone, start_page, end_page, migratetype); 766} 767 768/* Remove an element from the buddy allocator from the fallback list */ 769static struct page *__rmqueue_fallback(struct zone *zone, int order, 770 int start_migratetype) 771{ 772 struct free_area * area; 773 int current_order; 774 struct page *page; 775 int migratetype, i; 776 777 /* Find the largest possible block of pages in the other list */ 778 for (current_order = MAX_ORDER-1; current_order >= order; 779 --current_order) { 780 for (i = 0; i < MIGRATE_TYPES - 1; i++) { 781 migratetype = fallbacks[start_migratetype][i]; 782 783 /* MIGRATE_RESERVE handled later if necessary */ 784 if (migratetype == MIGRATE_RESERVE) 785 continue; 786 787 area = &(zone->free_area[current_order]); 788 if (list_empty(&area->free_list[migratetype])) 789 continue; 790 791 page = list_entry(area->free_list[migratetype].next, 792 struct page, lru); 793 area->nr_free--; 794 795 /* 796 * If breaking a large block of pages, move all free 797 * pages to the preferred allocation list. If falling 798 * back for a reclaimable kernel allocation, be more 799 * agressive about taking ownership of free pages 800 */ 801 if (unlikely(current_order >= (pageblock_order >> 1)) || 802 start_migratetype == MIGRATE_RECLAIMABLE) { 803 unsigned long pages; 804 pages = move_freepages_block(zone, page, 805 start_migratetype); 806 807 /* Claim the whole block if over half of it is free */ 808 if (pages >= (1 << (pageblock_order-1))) 809 set_pageblock_migratetype(page, 810 start_migratetype); 811 812 migratetype = start_migratetype; 813 } 814 815 /* Remove the page from the freelists */ 816 list_del(&page->lru); 817 rmv_page_order(page); 818 __mod_zone_page_state(zone, NR_FREE_PAGES, 819 -(1UL << order)); 820 821 if (current_order == pageblock_order) 822 set_pageblock_migratetype(page, 823 start_migratetype); 824 825 expand(zone, page, order, current_order, area, migratetype); 826 return page; 827 } 828 } 829 830 /* Use MIGRATE_RESERVE rather than fail an allocation */ 831 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); 832} 833 834/* 835 * Do the hard work of removing an element from the buddy allocator. 836 * Call me with the zone->lock already held. 837 */ 838static struct page *__rmqueue(struct zone *zone, unsigned int order, 839 int migratetype) 840{ 841 struct page *page; 842 843 page = __rmqueue_smallest(zone, order, migratetype); 844 845 if (unlikely(!page)) 846 page = __rmqueue_fallback(zone, order, migratetype); 847 848 return page; 849} 850 851/* 852 * Obtain a specified number of elements from the buddy allocator, all under 853 * a single hold of the lock, for efficiency. Add them to the supplied list. 854 * Returns the number of new pages which were placed at *list. 855 */ 856static int rmqueue_bulk(struct zone *zone, unsigned int order, 857 unsigned long count, struct list_head *list, 858 int migratetype) 859{ 860 int i; 861 862 spin_lock(&zone->lock); 863 for (i = 0; i < count; ++i) { 864 struct page *page = __rmqueue(zone, order, migratetype); 865 if (unlikely(page == NULL)) 866 break; 867 868 /* 869 * Split buddy pages returned by expand() are received here 870 * in physical page order. The page is added to the callers and 871 * list and the list head then moves forward. From the callers 872 * perspective, the linked list is ordered by page number in 873 * some conditions. This is useful for IO devices that can 874 * merge IO requests if the physical pages are ordered 875 * properly. 876 */ 877 list_add(&page->lru, list); 878 set_page_private(page, migratetype); 879 list = &page->lru; 880 } 881 spin_unlock(&zone->lock); 882 return i; 883} 884 885#ifdef CONFIG_NUMA 886/* 887 * Called from the vmstat counter updater to drain pagesets of this 888 * currently executing processor on remote nodes after they have 889 * expired. 890 * 891 * Note that this function must be called with the thread pinned to 892 * a single processor. 893 */ 894void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 895{ 896 unsigned long flags; 897 int to_drain; 898 899 local_irq_save(flags); 900 if (pcp->count >= pcp->batch) 901 to_drain = pcp->batch; 902 else 903 to_drain = pcp->count; 904 free_pages_bulk(zone, to_drain, &pcp->list, 0); 905 pcp->count -= to_drain; 906 local_irq_restore(flags); 907} 908#endif 909 910/* 911 * Drain pages of the indicated processor. 912 * 913 * The processor must either be the current processor and the 914 * thread pinned to the current processor or a processor that 915 * is not online. 916 */ 917static void drain_pages(unsigned int cpu) 918{ 919 unsigned long flags; 920 struct zone *zone; 921 922 for_each_populated_zone(zone) { 923 struct per_cpu_pageset *pset; 924 struct per_cpu_pages *pcp; 925 926 pset = zone_pcp(zone, cpu); 927 928 pcp = &pset->pcp; 929 local_irq_save(flags); 930 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 931 pcp->count = 0; 932 local_irq_restore(flags); 933 } 934} 935 936/* 937 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 938 */ 939void drain_local_pages(void *arg) 940{ 941 drain_pages(smp_processor_id()); 942} 943 944/* 945 * Spill all the per-cpu pages from all CPUs back into the buddy allocator 946 */ 947void drain_all_pages(void) 948{ 949 on_each_cpu(drain_local_pages, NULL, 1); 950} 951 952#ifdef CONFIG_HIBERNATION 953 954void mark_free_pages(struct zone *zone) 955{ 956 unsigned long pfn, max_zone_pfn; 957 unsigned long flags; 958 int order, t; 959 struct list_head *curr; 960 961 if (!zone->spanned_pages) 962 return; 963 964 spin_lock_irqsave(&zone->lock, flags); 965 966 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 967 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 968 if (pfn_valid(pfn)) { 969 struct page *page = pfn_to_page(pfn); 970 971 if (!swsusp_page_is_forbidden(page)) 972 swsusp_unset_page_free(page); 973 } 974 975 for_each_migratetype_order(order, t) { 976 list_for_each(curr, &zone->free_area[order].free_list[t]) { 977 unsigned long i; 978 979 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 980 for (i = 0; i < (1UL << order); i++) 981 swsusp_set_page_free(pfn_to_page(pfn + i)); 982 } 983 } 984 spin_unlock_irqrestore(&zone->lock, flags); 985} 986#endif /* CONFIG_PM */ 987 988/* 989 * Free a 0-order page 990 */ 991static void free_hot_cold_page(struct page *page, int cold) 992{ 993 struct zone *zone = page_zone(page); 994 struct per_cpu_pages *pcp; 995 unsigned long flags; 996 997 if (PageAnon(page)) 998 page->mapping = NULL; 999 if (free_pages_check(page)) 1000 return; 1001 1002 if (!PageHighMem(page)) { 1003 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 1004 debug_check_no_obj_freed(page_address(page), PAGE_SIZE); 1005 } 1006 arch_free_page(page, 0); 1007 kernel_map_pages(page, 1, 0); 1008 1009 pcp = &zone_pcp(zone, get_cpu())->pcp; 1010 local_irq_save(flags); 1011 __count_vm_event(PGFREE); 1012 if (cold) 1013 list_add_tail(&page->lru, &pcp->list); 1014 else 1015 list_add(&page->lru, &pcp->list); 1016 set_page_private(page, get_pageblock_migratetype(page)); 1017 pcp->count++; 1018 if (pcp->count >= pcp->high) { 1019 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 1020 pcp->count -= pcp->batch; 1021 } 1022 local_irq_restore(flags); 1023 put_cpu(); 1024} 1025 1026void free_hot_page(struct page *page) 1027{ 1028 free_hot_cold_page(page, 0); 1029} 1030 1031void free_cold_page(struct page *page) 1032{ 1033 free_hot_cold_page(page, 1); 1034} 1035 1036/* 1037 * split_page takes a non-compound higher-order page, and splits it into 1038 * n (1<<order) sub-pages: page[0..n] 1039 * Each sub-page must be freed individually. 1040 * 1041 * Note: this is probably too low level an operation for use in drivers. 1042 * Please consult with lkml before using this in your driver. 1043 */ 1044void split_page(struct page *page, unsigned int order) 1045{ 1046 int i; 1047 1048 VM_BUG_ON(PageCompound(page)); 1049 VM_BUG_ON(!page_count(page)); 1050 for (i = 1; i < (1 << order); i++) 1051 set_page_refcounted(page + i); 1052} 1053 1054/* 1055 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1056 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1057 * or two. 1058 */ 1059static struct page *buffered_rmqueue(struct zone *preferred_zone, 1060 struct zone *zone, int order, gfp_t gfp_flags) 1061{ 1062 unsigned long flags; 1063 struct page *page; 1064 int cold = !!(gfp_flags & __GFP_COLD); 1065 int cpu; 1066 int migratetype = allocflags_to_migratetype(gfp_flags); 1067 1068again: 1069 cpu = get_cpu(); 1070 if (likely(order == 0)) { 1071 struct per_cpu_pages *pcp; 1072 1073 pcp = &zone_pcp(zone, cpu)->pcp; 1074 local_irq_save(flags); 1075 if (!pcp->count) { 1076 pcp->count = rmqueue_bulk(zone, 0, 1077 pcp->batch, &pcp->list, migratetype); 1078 if (unlikely(!pcp->count)) 1079 goto failed; 1080 } 1081 1082 /* Find a page of the appropriate migrate type */ 1083 if (cold) { 1084 list_for_each_entry_reverse(page, &pcp->list, lru) 1085 if (page_private(page) == migratetype) 1086 break; 1087 } else { 1088 list_for_each_entry(page, &pcp->list, lru) 1089 if (page_private(page) == migratetype) 1090 break; 1091 } 1092 1093 /* Allocate more to the pcp list if necessary */ 1094 if (unlikely(&page->lru == &pcp->list)) { 1095 pcp->count += rmqueue_bulk(zone, 0, 1096 pcp->batch, &pcp->list, migratetype); 1097 page = list_entry(pcp->list.next, struct page, lru); 1098 } 1099 1100 list_del(&page->lru); 1101 pcp->count--; 1102 } else { 1103 spin_lock_irqsave(&zone->lock, flags); 1104 page = __rmqueue(zone, order, migratetype); 1105 spin_unlock(&zone->lock); 1106 if (!page) 1107 goto failed; 1108 } 1109 1110 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1111 zone_statistics(preferred_zone, zone); 1112 local_irq_restore(flags); 1113 put_cpu(); 1114 1115 VM_BUG_ON(bad_range(zone, page)); 1116 if (prep_new_page(page, order, gfp_flags)) 1117 goto again; 1118 return page; 1119 1120failed: 1121 local_irq_restore(flags); 1122 put_cpu(); 1123 return NULL; 1124} 1125 1126#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 1127#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 1128#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 1129#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 1130#define ALLOC_HARDER 0x10 /* try to alloc harder */ 1131#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 1132#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1133 1134#ifdef CONFIG_FAIL_PAGE_ALLOC 1135 1136static struct fail_page_alloc_attr { 1137 struct fault_attr attr; 1138 1139 u32 ignore_gfp_highmem; 1140 u32 ignore_gfp_wait; 1141 u32 min_order; 1142 1143#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1144 1145 struct dentry *ignore_gfp_highmem_file; 1146 struct dentry *ignore_gfp_wait_file; 1147 struct dentry *min_order_file; 1148 1149#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1150 1151} fail_page_alloc = { 1152 .attr = FAULT_ATTR_INITIALIZER, 1153 .ignore_gfp_wait = 1, 1154 .ignore_gfp_highmem = 1, 1155 .min_order = 1, 1156}; 1157 1158static int __init setup_fail_page_alloc(char *str) 1159{ 1160 return setup_fault_attr(&fail_page_alloc.attr, str); 1161} 1162__setup("fail_page_alloc=", setup_fail_page_alloc); 1163 1164static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1165{ 1166 if (order < fail_page_alloc.min_order) 1167 return 0; 1168 if (gfp_mask & __GFP_NOFAIL) 1169 return 0; 1170 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1171 return 0; 1172 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1173 return 0; 1174 1175 return should_fail(&fail_page_alloc.attr, 1 << order); 1176} 1177 1178#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1179 1180static int __init fail_page_alloc_debugfs(void) 1181{ 1182 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1183 struct dentry *dir; 1184 int err; 1185 1186 err = init_fault_attr_dentries(&fail_page_alloc.attr, 1187 "fail_page_alloc"); 1188 if (err) 1189 return err; 1190 dir = fail_page_alloc.attr.dentries.dir; 1191 1192 fail_page_alloc.ignore_gfp_wait_file = 1193 debugfs_create_bool("ignore-gfp-wait", mode, dir, 1194 &fail_page_alloc.ignore_gfp_wait); 1195 1196 fail_page_alloc.ignore_gfp_highmem_file = 1197 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1198 &fail_page_alloc.ignore_gfp_highmem); 1199 fail_page_alloc.min_order_file = 1200 debugfs_create_u32("min-order", mode, dir, 1201 &fail_page_alloc.min_order); 1202 1203 if (!fail_page_alloc.ignore_gfp_wait_file || 1204 !fail_page_alloc.ignore_gfp_highmem_file || 1205 !fail_page_alloc.min_order_file) { 1206 err = -ENOMEM; 1207 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 1208 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 1209 debugfs_remove(fail_page_alloc.min_order_file); 1210 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 1211 } 1212 1213 return err; 1214} 1215 1216late_initcall(fail_page_alloc_debugfs); 1217 1218#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1219 1220#else /* CONFIG_FAIL_PAGE_ALLOC */ 1221 1222static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1223{ 1224 return 0; 1225} 1226 1227#endif /* CONFIG_FAIL_PAGE_ALLOC */ 1228 1229/* 1230 * Return 1 if free pages are above 'mark'. This takes into account the order 1231 * of the allocation. 1232 */ 1233int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1234 int classzone_idx, int alloc_flags) 1235{ 1236 /* free_pages my go negative - that's OK */ 1237 long min = mark; 1238 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 1239 int o; 1240 1241 if (alloc_flags & ALLOC_HIGH) 1242 min -= min / 2; 1243 if (alloc_flags & ALLOC_HARDER) 1244 min -= min / 4; 1245 1246 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1247 return 0; 1248 for (o = 0; o < order; o++) { 1249 /* At the next order, this order's pages become unavailable */ 1250 free_pages -= z->free_area[o].nr_free << o; 1251 1252 /* Require fewer higher order pages to be free */ 1253 min >>= 1; 1254 1255 if (free_pages <= min) 1256 return 0; 1257 } 1258 return 1; 1259} 1260 1261#ifdef CONFIG_NUMA 1262/* 1263 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1264 * skip over zones that are not allowed by the cpuset, or that have 1265 * been recently (in last second) found to be nearly full. See further 1266 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1267 * that have to skip over a lot of full or unallowed zones. 1268 * 1269 * If the zonelist cache is present in the passed in zonelist, then 1270 * returns a pointer to the allowed node mask (either the current 1271 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) 1272 * 1273 * If the zonelist cache is not available for this zonelist, does 1274 * nothing and returns NULL. 1275 * 1276 * If the fullzones BITMAP in the zonelist cache is stale (more than 1277 * a second since last zap'd) then we zap it out (clear its bits.) 1278 * 1279 * We hold off even calling zlc_setup, until after we've checked the 1280 * first zone in the zonelist, on the theory that most allocations will 1281 * be satisfied from that first zone, so best to examine that zone as 1282 * quickly as we can. 1283 */ 1284static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1285{ 1286 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1287 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1288 1289 zlc = zonelist->zlcache_ptr; 1290 if (!zlc) 1291 return NULL; 1292 1293 if (time_after(jiffies, zlc->last_full_zap + HZ)) { 1294 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1295 zlc->last_full_zap = jiffies; 1296 } 1297 1298 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1299 &cpuset_current_mems_allowed : 1300 &node_states[N_HIGH_MEMORY]; 1301 return allowednodes; 1302} 1303 1304/* 1305 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1306 * if it is worth looking at further for free memory: 1307 * 1) Check that the zone isn't thought to be full (doesn't have its 1308 * bit set in the zonelist_cache fullzones BITMAP). 1309 * 2) Check that the zones node (obtained from the zonelist_cache 1310 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1311 * Return true (non-zero) if zone is worth looking at further, or 1312 * else return false (zero) if it is not. 1313 * 1314 * This check -ignores- the distinction between various watermarks, 1315 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1316 * found to be full for any variation of these watermarks, it will 1317 * be considered full for up to one second by all requests, unless 1318 * we are so low on memory on all allowed nodes that we are forced 1319 * into the second scan of the zonelist. 1320 * 1321 * In the second scan we ignore this zonelist cache and exactly 1322 * apply the watermarks to all zones, even it is slower to do so. 1323 * We are low on memory in the second scan, and should leave no stone 1324 * unturned looking for a free page. 1325 */ 1326static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1327 nodemask_t *allowednodes) 1328{ 1329 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1330 int i; /* index of *z in zonelist zones */ 1331 int n; /* node that zone *z is on */ 1332 1333 zlc = zonelist->zlcache_ptr; 1334 if (!zlc) 1335 return 1; 1336 1337 i = z - zonelist->_zonerefs; 1338 n = zlc->z_to_n[i]; 1339 1340 /* This zone is worth trying if it is allowed but not full */ 1341 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1342} 1343 1344/* 1345 * Given 'z' scanning a zonelist, set the corresponding bit in 1346 * zlc->fullzones, so that subsequent attempts to allocate a page 1347 * from that zone don't waste time re-examining it. 1348 */ 1349static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1350{ 1351 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1352 int i; /* index of *z in zonelist zones */ 1353 1354 zlc = zonelist->zlcache_ptr; 1355 if (!zlc) 1356 return; 1357 1358 i = z - zonelist->_zonerefs; 1359 1360 set_bit(i, zlc->fullzones); 1361} 1362 1363#else /* CONFIG_NUMA */ 1364 1365static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1366{ 1367 return NULL; 1368} 1369 1370static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1371 nodemask_t *allowednodes) 1372{ 1373 return 1; 1374} 1375 1376static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1377{ 1378} 1379#endif /* CONFIG_NUMA */ 1380 1381/* 1382 * get_page_from_freelist goes through the zonelist trying to allocate 1383 * a page. 1384 */ 1385static struct page * 1386get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1387 struct zonelist *zonelist, int high_zoneidx, int alloc_flags) 1388{ 1389 struct zoneref *z; 1390 struct page *page = NULL; 1391 int classzone_idx; 1392 struct zone *zone, *preferred_zone; 1393 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1394 int zlc_active = 0; /* set if using zonelist_cache */ 1395 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1396 1397 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, 1398 &preferred_zone); 1399 if (!preferred_zone) 1400 return NULL; 1401 1402 classzone_idx = zone_idx(preferred_zone); 1403 1404 if (WARN_ON_ONCE(order >= MAX_ORDER)) 1405 return NULL; 1406 1407zonelist_scan: 1408 /* 1409 * Scan zonelist, looking for a zone with enough free. 1410 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1411 */ 1412 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1413 high_zoneidx, nodemask) { 1414 if (NUMA_BUILD && zlc_active && 1415 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1416 continue; 1417 if ((alloc_flags & ALLOC_CPUSET) && 1418 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1419 goto try_next_zone; 1420 1421 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1422 unsigned long mark; 1423 if (alloc_flags & ALLOC_WMARK_MIN) 1424 mark = zone->pages_min; 1425 else if (alloc_flags & ALLOC_WMARK_LOW) 1426 mark = zone->pages_low; 1427 else 1428 mark = zone->pages_high; 1429 if (!zone_watermark_ok(zone, order, mark, 1430 classzone_idx, alloc_flags)) { 1431 if (!zone_reclaim_mode || 1432 !zone_reclaim(zone, gfp_mask, order)) 1433 goto this_zone_full; 1434 } 1435 } 1436 1437 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask); 1438 if (page) 1439 break; 1440this_zone_full: 1441 if (NUMA_BUILD) 1442 zlc_mark_zone_full(zonelist, z); 1443try_next_zone: 1444 if (NUMA_BUILD && !did_zlc_setup) { 1445 /* we do zlc_setup after the first zone is tried */ 1446 allowednodes = zlc_setup(zonelist, alloc_flags); 1447 zlc_active = 1; 1448 did_zlc_setup = 1; 1449 } 1450 } 1451 1452 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1453 /* Disable zlc cache for second zonelist scan */ 1454 zlc_active = 0; 1455 goto zonelist_scan; 1456 } 1457 return page; 1458} 1459 1460static inline int 1461should_alloc_retry(gfp_t gfp_mask, unsigned int order, 1462 unsigned long pages_reclaimed) 1463{ 1464 /* Do not loop if specifically requested */ 1465 if (gfp_mask & __GFP_NORETRY) 1466 return 0; 1467 1468 /* 1469 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER 1470 * means __GFP_NOFAIL, but that may not be true in other 1471 * implementations. 1472 */ 1473 if (order <= PAGE_ALLOC_COSTLY_ORDER) 1474 return 1; 1475 1476 /* 1477 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is 1478 * specified, then we retry until we no longer reclaim any pages 1479 * (above), or we've reclaimed an order of pages at least as 1480 * large as the allocation's order. In both cases, if the 1481 * allocation still fails, we stop retrying. 1482 */ 1483 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) 1484 return 1; 1485 1486 /* 1487 * Don't let big-order allocations loop unless the caller 1488 * explicitly requests that. 1489 */ 1490 if (gfp_mask & __GFP_NOFAIL) 1491 return 1; 1492 1493 return 0; 1494} 1495 1496static inline struct page * 1497__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 1498 struct zonelist *zonelist, enum zone_type high_zoneidx, 1499 nodemask_t *nodemask) 1500{ 1501 struct page *page; 1502 1503 /* Acquire the OOM killer lock for the zones in zonelist */ 1504 if (!try_set_zone_oom(zonelist, gfp_mask)) { 1505 schedule_timeout_uninterruptible(1); 1506 return NULL; 1507 } 1508 1509 /* 1510 * Go through the zonelist yet one more time, keep very high watermark 1511 * here, this is only to catch a parallel oom killing, we must fail if 1512 * we're still under heavy pressure. 1513 */ 1514 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 1515 order, zonelist, high_zoneidx, 1516 ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1517 if (page) 1518 goto out; 1519 1520 /* The OOM killer will not help higher order allocs */ 1521 if (order > PAGE_ALLOC_COSTLY_ORDER) 1522 goto out; 1523 1524 /* Exhausted what can be done so it's blamo time */ 1525 out_of_memory(zonelist, gfp_mask, order); 1526 1527out: 1528 clear_zonelist_oom(zonelist, gfp_mask); 1529 return page; 1530} 1531 1532/* The really slow allocator path where we enter direct reclaim */ 1533static inline struct page * 1534__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 1535 struct zonelist *zonelist, enum zone_type high_zoneidx, 1536 nodemask_t *nodemask, int alloc_flags, unsigned long *did_some_progress) 1537{ 1538 struct page *page = NULL; 1539 struct reclaim_state reclaim_state; 1540 struct task_struct *p = current; 1541 1542 cond_resched(); 1543 1544 /* We now go into synchronous reclaim */ 1545 cpuset_memory_pressure_bump(); 1546 1547 /* 1548 * The task's cpuset might have expanded its set of allowable nodes 1549 */ 1550 p->flags |= PF_MEMALLOC; 1551 lockdep_set_current_reclaim_state(gfp_mask); 1552 reclaim_state.reclaimed_slab = 0; 1553 p->reclaim_state = &reclaim_state; 1554 1555 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 1556 1557 p->reclaim_state = NULL; 1558 lockdep_clear_current_reclaim_state(); 1559 p->flags &= ~PF_MEMALLOC; 1560 1561 cond_resched(); 1562 1563 if (order != 0) 1564 drain_all_pages(); 1565 1566 if (likely(*did_some_progress)) 1567 page = get_page_from_freelist(gfp_mask, nodemask, order, 1568 zonelist, high_zoneidx, alloc_flags); 1569 return page; 1570} 1571 1572static inline int 1573is_allocation_high_priority(struct task_struct *p, gfp_t gfp_mask) 1574{ 1575 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1576 && !in_interrupt()) 1577 return 1; 1578 return 0; 1579} 1580 1581/* 1582 * This is called in the allocator slow-path if the allocation request is of 1583 * sufficient urgency to ignore watermarks and take other desperate measures 1584 */ 1585static inline struct page * 1586__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 1587 struct zonelist *zonelist, enum zone_type high_zoneidx, 1588 nodemask_t *nodemask) 1589{ 1590 struct page *page; 1591 1592 do { 1593 page = get_page_from_freelist(gfp_mask, nodemask, order, 1594 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS); 1595 1596 if (!page && gfp_mask & __GFP_NOFAIL) 1597 congestion_wait(WRITE, HZ/50); 1598 } while (!page && (gfp_mask & __GFP_NOFAIL)); 1599 1600 return page; 1601} 1602 1603static inline 1604void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, 1605 enum zone_type high_zoneidx) 1606{ 1607 struct zoneref *z; 1608 struct zone *zone; 1609 1610 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 1611 wakeup_kswapd(zone, order); 1612} 1613 1614static inline struct page * 1615__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 1616 struct zonelist *zonelist, enum zone_type high_zoneidx, 1617 nodemask_t *nodemask) 1618{ 1619 const gfp_t wait = gfp_mask & __GFP_WAIT; 1620 struct page *page = NULL; 1621 int alloc_flags; 1622 unsigned long pages_reclaimed = 0; 1623 unsigned long did_some_progress; 1624 struct task_struct *p = current; 1625 1626 /* 1627 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1628 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1629 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1630 * using a larger set of nodes after it has established that the 1631 * allowed per node queues are empty and that nodes are 1632 * over allocated. 1633 */ 1634 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1635 goto nopage; 1636 1637 wake_all_kswapd(order, zonelist, high_zoneidx); 1638 1639 /* 1640 * OK, we're below the kswapd watermark and have kicked background 1641 * reclaim. Now things get more complex, so set up alloc_flags according 1642 * to how we want to proceed. 1643 * 1644 * The caller may dip into page reserves a bit more if the caller 1645 * cannot run direct reclaim, or if the caller has realtime scheduling 1646 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1647 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1648 */ 1649 alloc_flags = ALLOC_WMARK_MIN; 1650 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 1651 alloc_flags |= ALLOC_HARDER; 1652 if (gfp_mask & __GFP_HIGH) 1653 alloc_flags |= ALLOC_HIGH; 1654 if (wait) 1655 alloc_flags |= ALLOC_CPUSET; 1656 1657restart: 1658 /* 1659 * Go through the zonelist again. Let __GFP_HIGH and allocations 1660 * coming from realtime tasks go deeper into reserves. 1661 * 1662 * This is the last chance, in general, before the goto nopage. 1663 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1664 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1665 */ 1666 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 1667 high_zoneidx, alloc_flags); 1668 if (page) 1669 goto got_pg; 1670 1671rebalance: 1672 /* Allocate without watermarks if the context allows */ 1673 if (is_allocation_high_priority(p, gfp_mask)) { 1674 /* Do not dip into emergency reserves if specified */ 1675 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1676 page = __alloc_pages_high_priority(gfp_mask, order, 1677 zonelist, high_zoneidx, nodemask); 1678 if (page) 1679 goto got_pg; 1680 } 1681 1682 /* Ensure no recursion into the allocator */ 1683 goto nopage; 1684 } 1685 1686 /* Atomic allocations - we can't balance anything */ 1687 if (!wait) 1688 goto nopage; 1689 1690 /* Try direct reclaim and then allocating */ 1691 page = __alloc_pages_direct_reclaim(gfp_mask, order, 1692 zonelist, high_zoneidx, 1693 nodemask, 1694 alloc_flags, &did_some_progress); 1695 if (page) 1696 goto got_pg; 1697 1698 /* 1699 * If we failed to make any progress reclaiming, then we are 1700 * running out of options and have to consider going OOM 1701 */ 1702 if (!did_some_progress) { 1703 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1704 page = __alloc_pages_may_oom(gfp_mask, order, 1705 zonelist, high_zoneidx, 1706 nodemask); 1707 if (page) 1708 goto got_pg; 1709 1710 /* 1711 * The OOM killer does not trigger for high-order allocations 1712 * but if no progress is being made, there are no other 1713 * options and retrying is unlikely to help 1714 */ 1715 if (order > PAGE_ALLOC_COSTLY_ORDER) 1716 goto nopage; 1717 1718 goto restart; 1719 } 1720 } 1721 1722 /* Check if we should retry the allocation */ 1723 pages_reclaimed += did_some_progress; 1724 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { 1725 /* Wait for some write requests to complete then retry */ 1726 congestion_wait(WRITE, HZ/50); 1727 goto rebalance; 1728 } 1729 1730nopage: 1731 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1732 printk(KERN_WARNING "%s: page allocation failure." 1733 " order:%d, mode:0x%x\n", 1734 p->comm, order, gfp_mask); 1735 dump_stack(); 1736 show_mem(); 1737 } 1738got_pg: 1739 return page; 1740 1741} 1742 1743/* 1744 * This is the 'heart' of the zoned buddy allocator. 1745 */ 1746struct page * 1747__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 1748 struct zonelist *zonelist, nodemask_t *nodemask) 1749{ 1750 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1751 struct page *page; 1752 1753 lockdep_trace_alloc(gfp_mask); 1754 1755 might_sleep_if(gfp_mask & __GFP_WAIT); 1756 1757 if (should_fail_alloc_page(gfp_mask, order)) 1758 return NULL; 1759 1760 /* 1761 * Check the zones suitable for the gfp_mask contain at least one 1762 * valid zone. It's possible to have an empty zonelist as a result 1763 * of GFP_THISNODE and a memoryless node 1764 */ 1765 if (unlikely(!zonelist->_zonerefs->zone)) 1766 return NULL; 1767 1768 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 1769 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1770 if (unlikely(!page)) 1771 page = __alloc_pages_slowpath(gfp_mask, order, 1772 zonelist, high_zoneidx, nodemask); 1773 1774 return page; 1775} 1776EXPORT_SYMBOL(__alloc_pages_nodemask); 1777 1778/* 1779 * Common helper functions. 1780 */ 1781unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1782{ 1783 struct page * page; 1784 page = alloc_pages(gfp_mask, order); 1785 if (!page) 1786 return 0; 1787 return (unsigned long) page_address(page); 1788} 1789 1790EXPORT_SYMBOL(__get_free_pages); 1791 1792unsigned long get_zeroed_page(gfp_t gfp_mask) 1793{ 1794 struct page * page; 1795 1796 /* 1797 * get_zeroed_page() returns a 32-bit address, which cannot represent 1798 * a highmem page 1799 */ 1800 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1801 1802 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1803 if (page) 1804 return (unsigned long) page_address(page); 1805 return 0; 1806} 1807 1808EXPORT_SYMBOL(get_zeroed_page); 1809 1810void __pagevec_free(struct pagevec *pvec) 1811{ 1812 int i = pagevec_count(pvec); 1813 1814 while (--i >= 0) 1815 free_hot_cold_page(pvec->pages[i], pvec->cold); 1816} 1817 1818void __free_pages(struct page *page, unsigned int order) 1819{ 1820 if (put_page_testzero(page)) { 1821 if (order == 0) 1822 free_hot_page(page); 1823 else 1824 __free_pages_ok(page, order); 1825 } 1826} 1827 1828EXPORT_SYMBOL(__free_pages); 1829 1830void free_pages(unsigned long addr, unsigned int order) 1831{ 1832 if (addr != 0) { 1833 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1834 __free_pages(virt_to_page((void *)addr), order); 1835 } 1836} 1837 1838EXPORT_SYMBOL(free_pages); 1839 1840/** 1841 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 1842 * @size: the number of bytes to allocate 1843 * @gfp_mask: GFP flags for the allocation 1844 * 1845 * This function is similar to alloc_pages(), except that it allocates the 1846 * minimum number of pages to satisfy the request. alloc_pages() can only 1847 * allocate memory in power-of-two pages. 1848 * 1849 * This function is also limited by MAX_ORDER. 1850 * 1851 * Memory allocated by this function must be released by free_pages_exact(). 1852 */ 1853void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 1854{ 1855 unsigned int order = get_order(size); 1856 unsigned long addr; 1857 1858 addr = __get_free_pages(gfp_mask, order); 1859 if (addr) { 1860 unsigned long alloc_end = addr + (PAGE_SIZE << order); 1861 unsigned long used = addr + PAGE_ALIGN(size); 1862 1863 split_page(virt_to_page(addr), order); 1864 while (used < alloc_end) { 1865 free_page(used); 1866 used += PAGE_SIZE; 1867 } 1868 } 1869 1870 return (void *)addr; 1871} 1872EXPORT_SYMBOL(alloc_pages_exact); 1873 1874/** 1875 * free_pages_exact - release memory allocated via alloc_pages_exact() 1876 * @virt: the value returned by alloc_pages_exact. 1877 * @size: size of allocation, same value as passed to alloc_pages_exact(). 1878 * 1879 * Release the memory allocated by a previous call to alloc_pages_exact. 1880 */ 1881void free_pages_exact(void *virt, size_t size) 1882{ 1883 unsigned long addr = (unsigned long)virt; 1884 unsigned long end = addr + PAGE_ALIGN(size); 1885 1886 while (addr < end) { 1887 free_page(addr); 1888 addr += PAGE_SIZE; 1889 } 1890} 1891EXPORT_SYMBOL(free_pages_exact); 1892 1893static unsigned int nr_free_zone_pages(int offset) 1894{ 1895 struct zoneref *z; 1896 struct zone *zone; 1897 1898 /* Just pick one node, since fallback list is circular */ 1899 unsigned int sum = 0; 1900 1901 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 1902 1903 for_each_zone_zonelist(zone, z, zonelist, offset) { 1904 unsigned long size = zone->present_pages; 1905 unsigned long high = zone->pages_high; 1906 if (size > high) 1907 sum += size - high; 1908 } 1909 1910 return sum; 1911} 1912 1913/* 1914 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1915 */ 1916unsigned int nr_free_buffer_pages(void) 1917{ 1918 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1919} 1920EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 1921 1922/* 1923 * Amount of free RAM allocatable within all zones 1924 */ 1925unsigned int nr_free_pagecache_pages(void) 1926{ 1927 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 1928} 1929 1930static inline void show_node(struct zone *zone) 1931{ 1932 if (NUMA_BUILD) 1933 printk("Node %d ", zone_to_nid(zone)); 1934} 1935 1936void si_meminfo(struct sysinfo *val) 1937{ 1938 val->totalram = totalram_pages; 1939 val->sharedram = 0; 1940 val->freeram = global_page_state(NR_FREE_PAGES); 1941 val->bufferram = nr_blockdev_pages(); 1942 val->totalhigh = totalhigh_pages; 1943 val->freehigh = nr_free_highpages(); 1944 val->mem_unit = PAGE_SIZE; 1945} 1946 1947EXPORT_SYMBOL(si_meminfo); 1948 1949#ifdef CONFIG_NUMA 1950void si_meminfo_node(struct sysinfo *val, int nid) 1951{ 1952 pg_data_t *pgdat = NODE_DATA(nid); 1953 1954 val->totalram = pgdat->node_present_pages; 1955 val->freeram = node_page_state(nid, NR_FREE_PAGES); 1956#ifdef CONFIG_HIGHMEM 1957 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1958 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 1959 NR_FREE_PAGES); 1960#else 1961 val->totalhigh = 0; 1962 val->freehigh = 0; 1963#endif 1964 val->mem_unit = PAGE_SIZE; 1965} 1966#endif 1967 1968#define K(x) ((x) << (PAGE_SHIFT-10)) 1969 1970/* 1971 * Show free area list (used inside shift_scroll-lock stuff) 1972 * We also calculate the percentage fragmentation. We do this by counting the 1973 * memory on each free list with the exception of the first item on the list. 1974 */ 1975void show_free_areas(void) 1976{ 1977 int cpu; 1978 struct zone *zone; 1979 1980 for_each_populated_zone(zone) { 1981 show_node(zone); 1982 printk("%s per-cpu:\n", zone->name); 1983 1984 for_each_online_cpu(cpu) { 1985 struct per_cpu_pageset *pageset; 1986 1987 pageset = zone_pcp(zone, cpu); 1988 1989 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 1990 cpu, pageset->pcp.high, 1991 pageset->pcp.batch, pageset->pcp.count); 1992 } 1993 } 1994 1995 printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" 1996 " inactive_file:%lu" 1997//TODO: check/adjust line lengths 1998#ifdef CONFIG_UNEVICTABLE_LRU 1999 " unevictable:%lu" 2000#endif 2001 " dirty:%lu writeback:%lu unstable:%lu\n" 2002 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 2003 global_page_state(NR_ACTIVE_ANON), 2004 global_page_state(NR_ACTIVE_FILE), 2005 global_page_state(NR_INACTIVE_ANON), 2006 global_page_state(NR_INACTIVE_FILE), 2007#ifdef CONFIG_UNEVICTABLE_LRU 2008 global_page_state(NR_UNEVICTABLE), 2009#endif 2010 global_page_state(NR_FILE_DIRTY), 2011 global_page_state(NR_WRITEBACK), 2012 global_page_state(NR_UNSTABLE_NFS), 2013 global_page_state(NR_FREE_PAGES), 2014 global_page_state(NR_SLAB_RECLAIMABLE) + 2015 global_page_state(NR_SLAB_UNRECLAIMABLE), 2016 global_page_state(NR_FILE_MAPPED), 2017 global_page_state(NR_PAGETABLE), 2018 global_page_state(NR_BOUNCE)); 2019 2020 for_each_populated_zone(zone) { 2021 int i; 2022 2023 show_node(zone); 2024 printk("%s" 2025 " free:%lukB" 2026 " min:%lukB" 2027 " low:%lukB" 2028 " high:%lukB" 2029 " active_anon:%lukB" 2030 " inactive_anon:%lukB" 2031 " active_file:%lukB" 2032 " inactive_file:%lukB" 2033#ifdef CONFIG_UNEVICTABLE_LRU 2034 " unevictable:%lukB" 2035#endif 2036 " present:%lukB" 2037 " pages_scanned:%lu" 2038 " all_unreclaimable? %s" 2039 "\n", 2040 zone->name, 2041 K(zone_page_state(zone, NR_FREE_PAGES)), 2042 K(zone->pages_min), 2043 K(zone->pages_low), 2044 K(zone->pages_high), 2045 K(zone_page_state(zone, NR_ACTIVE_ANON)), 2046 K(zone_page_state(zone, NR_INACTIVE_ANON)), 2047 K(zone_page_state(zone, NR_ACTIVE_FILE)), 2048 K(zone_page_state(zone, NR_INACTIVE_FILE)), 2049#ifdef CONFIG_UNEVICTABLE_LRU 2050 K(zone_page_state(zone, NR_UNEVICTABLE)), 2051#endif 2052 K(zone->present_pages), 2053 zone->pages_scanned, 2054 (zone_is_all_unreclaimable(zone) ? "yes" : "no") 2055 ); 2056 printk("lowmem_reserve[]:"); 2057 for (i = 0; i < MAX_NR_ZONES; i++) 2058 printk(" %lu", zone->lowmem_reserve[i]); 2059 printk("\n"); 2060 } 2061 2062 for_each_populated_zone(zone) { 2063 unsigned long nr[MAX_ORDER], flags, order, total = 0; 2064 2065 show_node(zone); 2066 printk("%s: ", zone->name); 2067 2068 spin_lock_irqsave(&zone->lock, flags); 2069 for (order = 0; order < MAX_ORDER; order++) { 2070 nr[order] = zone->free_area[order].nr_free; 2071 total += nr[order] << order; 2072 } 2073 spin_unlock_irqrestore(&zone->lock, flags); 2074 for (order = 0; order < MAX_ORDER; order++) 2075 printk("%lu*%lukB ", nr[order], K(1UL) << order); 2076 printk("= %lukB\n", K(total)); 2077 } 2078 2079 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 2080 2081 show_swap_cache_info(); 2082} 2083 2084static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 2085{ 2086 zoneref->zone = zone; 2087 zoneref->zone_idx = zone_idx(zone); 2088} 2089 2090/* 2091 * Builds allocation fallback zone lists. 2092 * 2093 * Add all populated zones of a node to the zonelist. 2094 */ 2095static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 2096 int nr_zones, enum zone_type zone_type) 2097{ 2098 struct zone *zone; 2099 2100 BUG_ON(zone_type >= MAX_NR_ZONES); 2101 zone_type++; 2102 2103 do { 2104 zone_type--; 2105 zone = pgdat->node_zones + zone_type; 2106 if (populated_zone(zone)) { 2107 zoneref_set_zone(zone, 2108 &zonelist->_zonerefs[nr_zones++]); 2109 check_highest_zone(zone_type); 2110 } 2111 2112 } while (zone_type); 2113 return nr_zones; 2114} 2115 2116 2117/* 2118 * zonelist_order: 2119 * 0 = automatic detection of better ordering. 2120 * 1 = order by ([node] distance, -zonetype) 2121 * 2 = order by (-zonetype, [node] distance) 2122 * 2123 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 2124 * the same zonelist. So only NUMA can configure this param. 2125 */ 2126#define ZONELIST_ORDER_DEFAULT 0 2127#define ZONELIST_ORDER_NODE 1 2128#define ZONELIST_ORDER_ZONE 2 2129 2130/* zonelist order in the kernel. 2131 * set_zonelist_order() will set this to NODE or ZONE. 2132 */ 2133static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 2134static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 2135 2136 2137#ifdef CONFIG_NUMA 2138/* The value user specified ....changed by config */ 2139static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 2140/* string for sysctl */ 2141#define NUMA_ZONELIST_ORDER_LEN 16 2142char numa_zonelist_order[16] = "default"; 2143 2144/* 2145 * interface for configure zonelist ordering. 2146 * command line option "numa_zonelist_order" 2147 * = "[dD]efault - default, automatic configuration. 2148 * = "[nN]ode - order by node locality, then by zone within node 2149 * = "[zZ]one - order by zone, then by locality within zone 2150 */ 2151 2152static int __parse_numa_zonelist_order(char *s) 2153{ 2154 if (*s == 'd' || *s == 'D') { 2155 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 2156 } else if (*s == 'n' || *s == 'N') { 2157 user_zonelist_order = ZONELIST_ORDER_NODE; 2158 } else if (*s == 'z' || *s == 'Z') { 2159 user_zonelist_order = ZONELIST_ORDER_ZONE; 2160 } else { 2161 printk(KERN_WARNING 2162 "Ignoring invalid numa_zonelist_order value: " 2163 "%s\n", s); 2164 return -EINVAL; 2165 } 2166 return 0; 2167} 2168 2169static __init int setup_numa_zonelist_order(char *s) 2170{ 2171 if (s) 2172 return __parse_numa_zonelist_order(s); 2173 return 0; 2174} 2175early_param("numa_zonelist_order", setup_numa_zonelist_order); 2176 2177/* 2178 * sysctl handler for numa_zonelist_order 2179 */ 2180int numa_zonelist_order_handler(ctl_table *table, int write, 2181 struct file *file, void __user *buffer, size_t *length, 2182 loff_t *ppos) 2183{ 2184 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 2185 int ret; 2186 2187 if (write) 2188 strncpy(saved_string, (char*)table->data, 2189 NUMA_ZONELIST_ORDER_LEN); 2190 ret = proc_dostring(table, write, file, buffer, length, ppos); 2191 if (ret) 2192 return ret; 2193 if (write) { 2194 int oldval = user_zonelist_order; 2195 if (__parse_numa_zonelist_order((char*)table->data)) { 2196 /* 2197 * bogus value. restore saved string 2198 */ 2199 strncpy((char*)table->data, saved_string, 2200 NUMA_ZONELIST_ORDER_LEN); 2201 user_zonelist_order = oldval; 2202 } else if (oldval != user_zonelist_order) 2203 build_all_zonelists(); 2204 } 2205 return 0; 2206} 2207 2208 2209#define MAX_NODE_LOAD (num_online_nodes()) 2210static int node_load[MAX_NUMNODES]; 2211 2212/** 2213 * find_next_best_node - find the next node that should appear in a given node's fallback list 2214 * @node: node whose fallback list we're appending 2215 * @used_node_mask: nodemask_t of already used nodes 2216 * 2217 * We use a number of factors to determine which is the next node that should 2218 * appear on a given node's fallback list. The node should not have appeared 2219 * already in @node's fallback list, and it should be the next closest node 2220 * according to the distance array (which contains arbitrary distance values 2221 * from each node to each node in the system), and should also prefer nodes 2222 * with no CPUs, since presumably they'll have very little allocation pressure 2223 * on them otherwise. 2224 * It returns -1 if no node is found. 2225 */ 2226static int find_next_best_node(int node, nodemask_t *used_node_mask) 2227{ 2228 int n, val; 2229 int min_val = INT_MAX; 2230 int best_node = -1; 2231 const struct cpumask *tmp = cpumask_of_node(0); 2232 2233 /* Use the local node if we haven't already */ 2234 if (!node_isset(node, *used_node_mask)) { 2235 node_set(node, *used_node_mask); 2236 return node; 2237 } 2238 2239 for_each_node_state(n, N_HIGH_MEMORY) { 2240 2241 /* Don't want a node to appear more than once */ 2242 if (node_isset(n, *used_node_mask)) 2243 continue; 2244 2245 /* Use the distance array to find the distance */ 2246 val = node_distance(node, n); 2247 2248 /* Penalize nodes under us ("prefer the next node") */ 2249 val += (n < node); 2250 2251 /* Give preference to headless and unused nodes */ 2252 tmp = cpumask_of_node(n); 2253 if (!cpumask_empty(tmp)) 2254 val += PENALTY_FOR_NODE_WITH_CPUS; 2255 2256 /* Slight preference for less loaded node */ 2257 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 2258 val += node_load[n]; 2259 2260 if (val < min_val) { 2261 min_val = val; 2262 best_node = n; 2263 } 2264 } 2265 2266 if (best_node >= 0) 2267 node_set(best_node, *used_node_mask); 2268 2269 return best_node; 2270} 2271 2272 2273/* 2274 * Build zonelists ordered by node and zones within node. 2275 * This results in maximum locality--normal zone overflows into local 2276 * DMA zone, if any--but risks exhausting DMA zone. 2277 */ 2278static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 2279{ 2280 int j; 2281 struct zonelist *zonelist; 2282 2283 zonelist = &pgdat->node_zonelists[0]; 2284 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 2285 ; 2286 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2287 MAX_NR_ZONES - 1); 2288 zonelist->_zonerefs[j].zone = NULL; 2289 zonelist->_zonerefs[j].zone_idx = 0; 2290} 2291 2292/* 2293 * Build gfp_thisnode zonelists 2294 */ 2295static void build_thisnode_zonelists(pg_data_t *pgdat) 2296{ 2297 int j; 2298 struct zonelist *zonelist; 2299 2300 zonelist = &pgdat->node_zonelists[1]; 2301 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2302 zonelist->_zonerefs[j].zone = NULL; 2303 zonelist->_zonerefs[j].zone_idx = 0; 2304} 2305 2306/* 2307 * Build zonelists ordered by zone and nodes within zones. 2308 * This results in conserving DMA zone[s] until all Normal memory is 2309 * exhausted, but results in overflowing to remote node while memory 2310 * may still exist in local DMA zone. 2311 */ 2312static int node_order[MAX_NUMNODES]; 2313 2314static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 2315{ 2316 int pos, j, node; 2317 int zone_type; /* needs to be signed */ 2318 struct zone *z; 2319 struct zonelist *zonelist; 2320 2321 zonelist = &pgdat->node_zonelists[0]; 2322 pos = 0; 2323 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 2324 for (j = 0; j < nr_nodes; j++) { 2325 node = node_order[j]; 2326 z = &NODE_DATA(node)->node_zones[zone_type]; 2327 if (populated_zone(z)) { 2328 zoneref_set_zone(z, 2329 &zonelist->_zonerefs[pos++]); 2330 check_highest_zone(zone_type); 2331 } 2332 } 2333 } 2334 zonelist->_zonerefs[pos].zone = NULL; 2335 zonelist->_zonerefs[pos].zone_idx = 0; 2336} 2337 2338static int default_zonelist_order(void) 2339{ 2340 int nid, zone_type; 2341 unsigned long low_kmem_size,total_size; 2342 struct zone *z; 2343 int average_size; 2344 /* 2345 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem. 2346 * If they are really small and used heavily, the system can fall 2347 * into OOM very easily. 2348 * This function detect ZONE_DMA/DMA32 size and confgigures zone order. 2349 */ 2350 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ 2351 low_kmem_size = 0; 2352 total_size = 0; 2353 for_each_online_node(nid) { 2354 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2355 z = &NODE_DATA(nid)->node_zones[zone_type]; 2356 if (populated_zone(z)) { 2357 if (zone_type < ZONE_NORMAL) 2358 low_kmem_size += z->present_pages; 2359 total_size += z->present_pages; 2360 } 2361 } 2362 } 2363 if (!low_kmem_size || /* there are no DMA area. */ 2364 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ 2365 return ZONELIST_ORDER_NODE; 2366 /* 2367 * look into each node's config. 2368 * If there is a node whose DMA/DMA32 memory is very big area on 2369 * local memory, NODE_ORDER may be suitable. 2370 */ 2371 average_size = total_size / 2372 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); 2373 for_each_online_node(nid) { 2374 low_kmem_size = 0; 2375 total_size = 0; 2376 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2377 z = &NODE_DATA(nid)->node_zones[zone_type]; 2378 if (populated_zone(z)) { 2379 if (zone_type < ZONE_NORMAL) 2380 low_kmem_size += z->present_pages; 2381 total_size += z->present_pages; 2382 } 2383 } 2384 if (low_kmem_size && 2385 total_size > average_size && /* ignore small node */ 2386 low_kmem_size > total_size * 70/100) 2387 return ZONELIST_ORDER_NODE; 2388 } 2389 return ZONELIST_ORDER_ZONE; 2390} 2391 2392static void set_zonelist_order(void) 2393{ 2394 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 2395 current_zonelist_order = default_zonelist_order(); 2396 else 2397 current_zonelist_order = user_zonelist_order; 2398} 2399 2400static void build_zonelists(pg_data_t *pgdat) 2401{ 2402 int j, node, load; 2403 enum zone_type i; 2404 nodemask_t used_mask; 2405 int local_node, prev_node; 2406 struct zonelist *zonelist; 2407 int order = current_zonelist_order; 2408 2409 /* initialize zonelists */ 2410 for (i = 0; i < MAX_ZONELISTS; i++) { 2411 zonelist = pgdat->node_zonelists + i; 2412 zonelist->_zonerefs[0].zone = NULL; 2413 zonelist->_zonerefs[0].zone_idx = 0; 2414 } 2415 2416 /* NUMA-aware ordering of nodes */ 2417 local_node = pgdat->node_id; 2418 load = num_online_nodes(); 2419 prev_node = local_node; 2420 nodes_clear(used_mask); 2421 2422 memset(node_load, 0, sizeof(node_load)); 2423 memset(node_order, 0, sizeof(node_order)); 2424 j = 0; 2425 2426 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 2427 int distance = node_distance(local_node, node); 2428 2429 /* 2430 * If another node is sufficiently far away then it is better 2431 * to reclaim pages in a zone before going off node. 2432 */ 2433 if (distance > RECLAIM_DISTANCE) 2434 zone_reclaim_mode = 1; 2435 2436 /* 2437 * We don't want to pressure a particular node. 2438 * So adding penalty to the first node in same 2439 * distance group to make it round-robin. 2440 */ 2441 if (distance != node_distance(local_node, prev_node)) 2442 node_load[node] = load; 2443 2444 prev_node = node; 2445 load--; 2446 if (order == ZONELIST_ORDER_NODE) 2447 build_zonelists_in_node_order(pgdat, node); 2448 else 2449 node_order[j++] = node; /* remember order */ 2450 } 2451 2452 if (order == ZONELIST_ORDER_ZONE) { 2453 /* calculate node order -- i.e., DMA last! */ 2454 build_zonelists_in_zone_order(pgdat, j); 2455 } 2456 2457 build_thisnode_zonelists(pgdat); 2458} 2459 2460/* Construct the zonelist performance cache - see further mmzone.h */ 2461static void build_zonelist_cache(pg_data_t *pgdat) 2462{ 2463 struct zonelist *zonelist; 2464 struct zonelist_cache *zlc; 2465 struct zoneref *z; 2466 2467 zonelist = &pgdat->node_zonelists[0]; 2468 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 2469 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 2470 for (z = zonelist->_zonerefs; z->zone; z++) 2471 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 2472} 2473 2474 2475#else /* CONFIG_NUMA */ 2476 2477static void set_zonelist_order(void) 2478{ 2479 current_zonelist_order = ZONELIST_ORDER_ZONE; 2480} 2481 2482static void build_zonelists(pg_data_t *pgdat) 2483{ 2484 int node, local_node; 2485 enum zone_type j; 2486 struct zonelist *zonelist; 2487 2488 local_node = pgdat->node_id; 2489 2490 zonelist = &pgdat->node_zonelists[0]; 2491 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2492 2493 /* 2494 * Now we build the zonelist so that it contains the zones 2495 * of all the other nodes. 2496 * We don't want to pressure a particular node, so when 2497 * building the zones for node N, we make sure that the 2498 * zones coming right after the local ones are those from 2499 * node N+1 (modulo N) 2500 */ 2501 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 2502 if (!node_online(node)) 2503 continue; 2504 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2505 MAX_NR_ZONES - 1); 2506 } 2507 for (node = 0; node < local_node; node++) { 2508 if (!node_online(node)) 2509 continue; 2510 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2511 MAX_NR_ZONES - 1); 2512 } 2513 2514 zonelist->_zonerefs[j].zone = NULL; 2515 zonelist->_zonerefs[j].zone_idx = 0; 2516} 2517 2518/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 2519static void build_zonelist_cache(pg_data_t *pgdat) 2520{ 2521 pgdat->node_zonelists[0].zlcache_ptr = NULL; 2522} 2523 2524#endif /* CONFIG_NUMA */ 2525 2526/* return values int ....just for stop_machine() */ 2527static int __build_all_zonelists(void *dummy) 2528{ 2529 int nid; 2530 2531 for_each_online_node(nid) { 2532 pg_data_t *pgdat = NODE_DATA(nid); 2533 2534 build_zonelists(pgdat); 2535 build_zonelist_cache(pgdat); 2536 } 2537 return 0; 2538} 2539 2540void build_all_zonelists(void) 2541{ 2542 set_zonelist_order(); 2543 2544 if (system_state == SYSTEM_BOOTING) { 2545 __build_all_zonelists(NULL); 2546 mminit_verify_zonelist(); 2547 cpuset_init_current_mems_allowed(); 2548 } else { 2549 /* we have to stop all cpus to guarantee there is no user 2550 of zonelist */ 2551 stop_machine(__build_all_zonelists, NULL, NULL); 2552 /* cpuset refresh routine should be here */ 2553 } 2554 vm_total_pages = nr_free_pagecache_pages(); 2555 /* 2556 * Disable grouping by mobility if the number of pages in the 2557 * system is too low to allow the mechanism to work. It would be 2558 * more accurate, but expensive to check per-zone. This check is 2559 * made on memory-hotadd so a system can start with mobility 2560 * disabled and enable it later 2561 */ 2562 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 2563 page_group_by_mobility_disabled = 1; 2564 else 2565 page_group_by_mobility_disabled = 0; 2566 2567 printk("Built %i zonelists in %s order, mobility grouping %s. " 2568 "Total pages: %ld\n", 2569 num_online_nodes(), 2570 zonelist_order_name[current_zonelist_order], 2571 page_group_by_mobility_disabled ? "off" : "on", 2572 vm_total_pages); 2573#ifdef CONFIG_NUMA 2574 printk("Policy zone: %s\n", zone_names[policy_zone]); 2575#endif 2576} 2577 2578/* 2579 * Helper functions to size the waitqueue hash table. 2580 * Essentially these want to choose hash table sizes sufficiently 2581 * large so that collisions trying to wait on pages are rare. 2582 * But in fact, the number of active page waitqueues on typical 2583 * systems is ridiculously low, less than 200. So this is even 2584 * conservative, even though it seems large. 2585 * 2586 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 2587 * waitqueues, i.e. the size of the waitq table given the number of pages. 2588 */ 2589#define PAGES_PER_WAITQUEUE 256 2590 2591#ifndef CONFIG_MEMORY_HOTPLUG 2592static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2593{ 2594 unsigned long size = 1; 2595 2596 pages /= PAGES_PER_WAITQUEUE; 2597 2598 while (size < pages) 2599 size <<= 1; 2600 2601 /* 2602 * Once we have dozens or even hundreds of threads sleeping 2603 * on IO we've got bigger problems than wait queue collision. 2604 * Limit the size of the wait table to a reasonable size. 2605 */ 2606 size = min(size, 4096UL); 2607 2608 return max(size, 4UL); 2609} 2610#else 2611/* 2612 * A zone's size might be changed by hot-add, so it is not possible to determine 2613 * a suitable size for its wait_table. So we use the maximum size now. 2614 * 2615 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 2616 * 2617 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 2618 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 2619 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 2620 * 2621 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 2622 * or more by the traditional way. (See above). It equals: 2623 * 2624 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 2625 * ia64(16K page size) : = ( 8G + 4M)byte. 2626 * powerpc (64K page size) : = (32G +16M)byte. 2627 */ 2628static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2629{ 2630 return 4096UL; 2631} 2632#endif 2633 2634/* 2635 * This is an integer logarithm so that shifts can be used later 2636 * to extract the more random high bits from the multiplicative 2637 * hash function before the remainder is taken. 2638 */ 2639static inline unsigned long wait_table_bits(unsigned long size) 2640{ 2641 return ffz(~size); 2642} 2643 2644#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 2645 2646/* 2647 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 2648 * of blocks reserved is based on zone->pages_min. The memory within the 2649 * reserve will tend to store contiguous free pages. Setting min_free_kbytes 2650 * higher will lead to a bigger reserve which will get freed as contiguous 2651 * blocks as reclaim kicks in 2652 */ 2653static void setup_zone_migrate_reserve(struct zone *zone) 2654{ 2655 unsigned long start_pfn, pfn, end_pfn; 2656 struct page *page; 2657 unsigned long reserve, block_migratetype; 2658 2659 /* Get the start pfn, end pfn and the number of blocks to reserve */ 2660 start_pfn = zone->zone_start_pfn; 2661 end_pfn = start_pfn + zone->spanned_pages; 2662 reserve = roundup(zone->pages_min, pageblock_nr_pages) >> 2663 pageblock_order; 2664 2665 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 2666 if (!pfn_valid(pfn)) 2667 continue; 2668 page = pfn_to_page(pfn); 2669 2670 /* Watch out for overlapping nodes */ 2671 if (page_to_nid(page) != zone_to_nid(zone)) 2672 continue; 2673 2674 /* Blocks with reserved pages will never free, skip them. */ 2675 if (PageReserved(page)) 2676 continue; 2677 2678 block_migratetype = get_pageblock_migratetype(page); 2679 2680 /* If this block is reserved, account for it */ 2681 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { 2682 reserve--; 2683 continue; 2684 } 2685 2686 /* Suitable for reserving if this block is movable */ 2687 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { 2688 set_pageblock_migratetype(page, MIGRATE_RESERVE); 2689 move_freepages_block(zone, page, MIGRATE_RESERVE); 2690 reserve--; 2691 continue; 2692 } 2693 2694 /* 2695 * If the reserve is met and this is a previous reserved block, 2696 * take it back 2697 */ 2698 if (block_migratetype == MIGRATE_RESERVE) { 2699 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2700 move_freepages_block(zone, page, MIGRATE_MOVABLE); 2701 } 2702 } 2703} 2704 2705/* 2706 * Initially all pages are reserved - free ones are freed 2707 * up by free_all_bootmem() once the early boot process is 2708 * done. Non-atomic initialization, single-pass. 2709 */ 2710void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 2711 unsigned long start_pfn, enum memmap_context context) 2712{ 2713 struct page *page; 2714 unsigned long end_pfn = start_pfn + size; 2715 unsigned long pfn; 2716 struct zone *z; 2717 2718 if (highest_memmap_pfn < end_pfn - 1) 2719 highest_memmap_pfn = end_pfn - 1; 2720 2721 z = &NODE_DATA(nid)->node_zones[zone]; 2722 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 2723 /* 2724 * There can be holes in boot-time mem_map[]s 2725 * handed to this function. They do not 2726 * exist on hotplugged memory. 2727 */ 2728 if (context == MEMMAP_EARLY) { 2729 if (!early_pfn_valid(pfn)) 2730 continue; 2731 if (!early_pfn_in_nid(pfn, nid)) 2732 continue; 2733 } 2734 page = pfn_to_page(pfn); 2735 set_page_links(page, zone, nid, pfn); 2736 mminit_verify_page_links(page, zone, nid, pfn); 2737 init_page_count(page); 2738 reset_page_mapcount(page); 2739 SetPageReserved(page); 2740 /* 2741 * Mark the block movable so that blocks are reserved for 2742 * movable at startup. This will force kernel allocations 2743 * to reserve their blocks rather than leaking throughout 2744 * the address space during boot when many long-lived 2745 * kernel allocations are made. Later some blocks near 2746 * the start are marked MIGRATE_RESERVE by 2747 * setup_zone_migrate_reserve() 2748 * 2749 * bitmap is created for zone's valid pfn range. but memmap 2750 * can be created for invalid pages (for alignment) 2751 * check here not to call set_pageblock_migratetype() against 2752 * pfn out of zone. 2753 */ 2754 if ((z->zone_start_pfn <= pfn) 2755 && (pfn < z->zone_start_pfn + z->spanned_pages) 2756 && !(pfn & (pageblock_nr_pages - 1))) 2757 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2758 2759 INIT_LIST_HEAD(&page->lru); 2760#ifdef WANT_PAGE_VIRTUAL 2761 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 2762 if (!is_highmem_idx(zone)) 2763 set_page_address(page, __va(pfn << PAGE_SHIFT)); 2764#endif 2765 } 2766} 2767 2768static void __meminit zone_init_free_lists(struct zone *zone) 2769{ 2770 int order, t; 2771 for_each_migratetype_order(order, t) { 2772 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 2773 zone->free_area[order].nr_free = 0; 2774 } 2775} 2776 2777#ifndef __HAVE_ARCH_MEMMAP_INIT 2778#define memmap_init(size, nid, zone, start_pfn) \ 2779 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 2780#endif 2781 2782static int zone_batchsize(struct zone *zone) 2783{ 2784#ifdef CONFIG_MMU 2785 int batch; 2786 2787 /* 2788 * The per-cpu-pages pools are set to around 1000th of the 2789 * size of the zone. But no more than 1/2 of a meg. 2790 * 2791 * OK, so we don't know how big the cache is. So guess. 2792 */ 2793 batch = zone->present_pages / 1024; 2794 if (batch * PAGE_SIZE > 512 * 1024) 2795 batch = (512 * 1024) / PAGE_SIZE; 2796 batch /= 4; /* We effectively *= 4 below */ 2797 if (batch < 1) 2798 batch = 1; 2799 2800 /* 2801 * Clamp the batch to a 2^n - 1 value. Having a power 2802 * of 2 value was found to be more likely to have 2803 * suboptimal cache aliasing properties in some cases. 2804 * 2805 * For example if 2 tasks are alternately allocating 2806 * batches of pages, one task can end up with a lot 2807 * of pages of one half of the possible page colors 2808 * and the other with pages of the other colors. 2809 */ 2810 batch = rounddown_pow_of_two(batch + batch/2) - 1; 2811 2812 return batch; 2813 2814#else 2815 /* The deferral and batching of frees should be suppressed under NOMMU 2816 * conditions. 2817 * 2818 * The problem is that NOMMU needs to be able to allocate large chunks 2819 * of contiguous memory as there's no hardware page translation to 2820 * assemble apparent contiguous memory from discontiguous pages. 2821 * 2822 * Queueing large contiguous runs of pages for batching, however, 2823 * causes the pages to actually be freed in smaller chunks. As there 2824 * can be a significant delay between the individual batches being 2825 * recycled, this leads to the once large chunks of space being 2826 * fragmented and becoming unavailable for high-order allocations. 2827 */ 2828 return 0; 2829#endif 2830} 2831 2832static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2833{ 2834 struct per_cpu_pages *pcp; 2835 2836 memset(p, 0, sizeof(*p)); 2837 2838 pcp = &p->pcp; 2839 pcp->count = 0; 2840 pcp->high = 6 * batch; 2841 pcp->batch = max(1UL, 1 * batch); 2842 INIT_LIST_HEAD(&pcp->list); 2843} 2844 2845/* 2846 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 2847 * to the value high for the pageset p. 2848 */ 2849 2850static void setup_pagelist_highmark(struct per_cpu_pageset *p, 2851 unsigned long high) 2852{ 2853 struct per_cpu_pages *pcp; 2854 2855 pcp = &p->pcp; 2856 pcp->high = high; 2857 pcp->batch = max(1UL, high/4); 2858 if ((high/4) > (PAGE_SHIFT * 8)) 2859 pcp->batch = PAGE_SHIFT * 8; 2860} 2861 2862 2863#ifdef CONFIG_NUMA 2864/* 2865 * Boot pageset table. One per cpu which is going to be used for all 2866 * zones and all nodes. The parameters will be set in such a way 2867 * that an item put on a list will immediately be handed over to 2868 * the buddy list. This is safe since pageset manipulation is done 2869 * with interrupts disabled. 2870 * 2871 * Some NUMA counter updates may also be caught by the boot pagesets. 2872 * 2873 * The boot_pagesets must be kept even after bootup is complete for 2874 * unused processors and/or zones. They do play a role for bootstrapping 2875 * hotplugged processors. 2876 * 2877 * zoneinfo_show() and maybe other functions do 2878 * not check if the processor is online before following the pageset pointer. 2879 * Other parts of the kernel may not check if the zone is available. 2880 */ 2881static struct per_cpu_pageset boot_pageset[NR_CPUS]; 2882 2883/* 2884 * Dynamically allocate memory for the 2885 * per cpu pageset array in struct zone. 2886 */ 2887static int __cpuinit process_zones(int cpu) 2888{ 2889 struct zone *zone, *dzone; 2890 int node = cpu_to_node(cpu); 2891 2892 node_set_state(node, N_CPU); /* this node has a cpu */ 2893 2894 for_each_populated_zone(zone) { 2895 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2896 GFP_KERNEL, node); 2897 if (!zone_pcp(zone, cpu)) 2898 goto bad; 2899 2900 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 2901 2902 if (percpu_pagelist_fraction) 2903 setup_pagelist_highmark(zone_pcp(zone, cpu), 2904 (zone->present_pages / percpu_pagelist_fraction)); 2905 } 2906 2907 return 0; 2908bad: 2909 for_each_zone(dzone) { 2910 if (!populated_zone(dzone)) 2911 continue; 2912 if (dzone == zone) 2913 break; 2914 kfree(zone_pcp(dzone, cpu)); 2915 zone_pcp(dzone, cpu) = NULL; 2916 } 2917 return -ENOMEM; 2918} 2919 2920static inline void free_zone_pagesets(int cpu) 2921{ 2922 struct zone *zone; 2923 2924 for_each_zone(zone) { 2925 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 2926 2927 /* Free per_cpu_pageset if it is slab allocated */ 2928 if (pset != &boot_pageset[cpu]) 2929 kfree(pset); 2930 zone_pcp(zone, cpu) = NULL; 2931 } 2932} 2933 2934static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 2935 unsigned long action, 2936 void *hcpu) 2937{ 2938 int cpu = (long)hcpu; 2939 int ret = NOTIFY_OK; 2940 2941 switch (action) { 2942 case CPU_UP_PREPARE: 2943 case CPU_UP_PREPARE_FROZEN: 2944 if (process_zones(cpu)) 2945 ret = NOTIFY_BAD; 2946 break; 2947 case CPU_UP_CANCELED: 2948 case CPU_UP_CANCELED_FROZEN: 2949 case CPU_DEAD: 2950 case CPU_DEAD_FROZEN: 2951 free_zone_pagesets(cpu); 2952 break; 2953 default: 2954 break; 2955 } 2956 return ret; 2957} 2958 2959static struct notifier_block __cpuinitdata pageset_notifier = 2960 { &pageset_cpuup_callback, NULL, 0 }; 2961 2962void __init setup_per_cpu_pageset(void) 2963{ 2964 int err; 2965 2966 /* Initialize per_cpu_pageset for cpu 0. 2967 * A cpuup callback will do this for every cpu 2968 * as it comes online 2969 */ 2970 err = process_zones(smp_processor_id()); 2971 BUG_ON(err); 2972 register_cpu_notifier(&pageset_notifier); 2973} 2974 2975#endif 2976 2977static noinline __init_refok 2978int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2979{ 2980 int i; 2981 struct pglist_data *pgdat = zone->zone_pgdat; 2982 size_t alloc_size; 2983 2984 /* 2985 * The per-page waitqueue mechanism uses hashed waitqueues 2986 * per zone. 2987 */ 2988 zone->wait_table_hash_nr_entries = 2989 wait_table_hash_nr_entries(zone_size_pages); 2990 zone->wait_table_bits = 2991 wait_table_bits(zone->wait_table_hash_nr_entries); 2992 alloc_size = zone->wait_table_hash_nr_entries 2993 * sizeof(wait_queue_head_t); 2994 2995 if (!slab_is_available()) { 2996 zone->wait_table = (wait_queue_head_t *) 2997 alloc_bootmem_node(pgdat, alloc_size); 2998 } else { 2999 /* 3000 * This case means that a zone whose size was 0 gets new memory 3001 * via memory hot-add. 3002 * But it may be the case that a new node was hot-added. In 3003 * this case vmalloc() will not be able to use this new node's 3004 * memory - this wait_table must be initialized to use this new 3005 * node itself as well. 3006 * To use this new node's memory, further consideration will be 3007 * necessary. 3008 */ 3009 zone->wait_table = vmalloc(alloc_size); 3010 } 3011 if (!zone->wait_table) 3012 return -ENOMEM; 3013 3014 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 3015 init_waitqueue_head(zone->wait_table + i); 3016 3017 return 0; 3018} 3019 3020static __meminit void zone_pcp_init(struct zone *zone) 3021{ 3022 int cpu; 3023 unsigned long batch = zone_batchsize(zone); 3024 3025 for (cpu = 0; cpu < NR_CPUS; cpu++) { 3026#ifdef CONFIG_NUMA 3027 /* Early boot. Slab allocator not functional yet */ 3028 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 3029 setup_pageset(&boot_pageset[cpu],0); 3030#else 3031 setup_pageset(zone_pcp(zone,cpu), batch); 3032#endif 3033 } 3034 if (zone->present_pages) 3035 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 3036 zone->name, zone->present_pages, batch); 3037} 3038 3039__meminit int init_currently_empty_zone(struct zone *zone, 3040 unsigned long zone_start_pfn, 3041 unsigned long size, 3042 enum memmap_context context) 3043{ 3044 struct pglist_data *pgdat = zone->zone_pgdat; 3045 int ret; 3046 ret = zone_wait_table_init(zone, size); 3047 if (ret) 3048 return ret; 3049 pgdat->nr_zones = zone_idx(zone) + 1; 3050 3051 zone->zone_start_pfn = zone_start_pfn; 3052 3053 mminit_dprintk(MMINIT_TRACE, "memmap_init", 3054 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 3055 pgdat->node_id, 3056 (unsigned long)zone_idx(zone), 3057 zone_start_pfn, (zone_start_pfn + size)); 3058 3059 zone_init_free_lists(zone); 3060 3061 return 0; 3062} 3063 3064#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3065/* 3066 * Basic iterator support. Return the first range of PFNs for a node 3067 * Note: nid == MAX_NUMNODES returns first region regardless of node 3068 */ 3069static int __meminit first_active_region_index_in_nid(int nid) 3070{ 3071 int i; 3072 3073 for (i = 0; i < nr_nodemap_entries; i++) 3074 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 3075 return i; 3076 3077 return -1; 3078} 3079 3080/* 3081 * Basic iterator support. Return the next active range of PFNs for a node 3082 * Note: nid == MAX_NUMNODES returns next region regardless of node 3083 */ 3084static int __meminit next_active_region_index_in_nid(int index, int nid) 3085{ 3086 for (index = index + 1; index < nr_nodemap_entries; index++) 3087 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 3088 return index; 3089 3090 return -1; 3091} 3092 3093#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 3094/* 3095 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 3096 * Architectures may implement their own version but if add_active_range() 3097 * was used and there are no special requirements, this is a convenient 3098 * alternative 3099 */ 3100int __meminit __early_pfn_to_nid(unsigned long pfn) 3101{ 3102 int i; 3103 3104 for (i = 0; i < nr_nodemap_entries; i++) { 3105 unsigned long start_pfn = early_node_map[i].start_pfn; 3106 unsigned long end_pfn = early_node_map[i].end_pfn; 3107 3108 if (start_pfn <= pfn && pfn < end_pfn) 3109 return early_node_map[i].nid; 3110 } 3111 /* This is a memory hole */ 3112 return -1; 3113} 3114#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 3115 3116int __meminit early_pfn_to_nid(unsigned long pfn) 3117{ 3118 int nid; 3119 3120 nid = __early_pfn_to_nid(pfn); 3121 if (nid >= 0) 3122 return nid; 3123 /* just returns 0 */ 3124 return 0; 3125} 3126 3127#ifdef CONFIG_NODES_SPAN_OTHER_NODES 3128bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 3129{ 3130 int nid; 3131 3132 nid = __early_pfn_to_nid(pfn); 3133 if (nid >= 0 && nid != node) 3134 return false; 3135 return true; 3136} 3137#endif 3138 3139/* Basic iterator support to walk early_node_map[] */ 3140#define for_each_active_range_index_in_nid(i, nid) \ 3141 for (i = first_active_region_index_in_nid(nid); i != -1; \ 3142 i = next_active_region_index_in_nid(i, nid)) 3143 3144/** 3145 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 3146 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 3147 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 3148 * 3149 * If an architecture guarantees that all ranges registered with 3150 * add_active_ranges() contain no holes and may be freed, this 3151 * this function may be used instead of calling free_bootmem() manually. 3152 */ 3153void __init free_bootmem_with_active_regions(int nid, 3154 unsigned long max_low_pfn) 3155{ 3156 int i; 3157 3158 for_each_active_range_index_in_nid(i, nid) { 3159 unsigned long size_pages = 0; 3160 unsigned long end_pfn = early_node_map[i].end_pfn; 3161 3162 if (early_node_map[i].start_pfn >= max_low_pfn) 3163 continue; 3164 3165 if (end_pfn > max_low_pfn) 3166 end_pfn = max_low_pfn; 3167 3168 size_pages = end_pfn - early_node_map[i].start_pfn; 3169 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 3170 PFN_PHYS(early_node_map[i].start_pfn), 3171 size_pages << PAGE_SHIFT); 3172 } 3173} 3174 3175void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) 3176{ 3177 int i; 3178 int ret; 3179 3180 for_each_active_range_index_in_nid(i, nid) { 3181 ret = work_fn(early_node_map[i].start_pfn, 3182 early_node_map[i].end_pfn, data); 3183 if (ret) 3184 break; 3185 } 3186} 3187/** 3188 * sparse_memory_present_with_active_regions - Call memory_present for each active range 3189 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 3190 * 3191 * If an architecture guarantees that all ranges registered with 3192 * add_active_ranges() contain no holes and may be freed, this 3193 * function may be used instead of calling memory_present() manually. 3194 */ 3195void __init sparse_memory_present_with_active_regions(int nid) 3196{ 3197 int i; 3198 3199 for_each_active_range_index_in_nid(i, nid) 3200 memory_present(early_node_map[i].nid, 3201 early_node_map[i].start_pfn, 3202 early_node_map[i].end_pfn); 3203} 3204 3205/** 3206 * get_pfn_range_for_nid - Return the start and end page frames for a node 3207 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 3208 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 3209 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 3210 * 3211 * It returns the start and end page frame of a node based on information 3212 * provided by an arch calling add_active_range(). If called for a node 3213 * with no available memory, a warning is printed and the start and end 3214 * PFNs will be 0. 3215 */ 3216void __meminit get_pfn_range_for_nid(unsigned int nid, 3217 unsigned long *start_pfn, unsigned long *end_pfn) 3218{ 3219 int i; 3220 *start_pfn = -1UL; 3221 *end_pfn = 0; 3222 3223 for_each_active_range_index_in_nid(i, nid) { 3224 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 3225 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 3226 } 3227 3228 if (*start_pfn == -1UL) 3229 *start_pfn = 0; 3230} 3231 3232/* 3233 * This finds a zone that can be used for ZONE_MOVABLE pages. The 3234 * assumption is made that zones within a node are ordered in monotonic 3235 * increasing memory addresses so that the "highest" populated zone is used 3236 */ 3237static void __init find_usable_zone_for_movable(void) 3238{ 3239 int zone_index; 3240 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 3241 if (zone_index == ZONE_MOVABLE) 3242 continue; 3243 3244 if (arch_zone_highest_possible_pfn[zone_index] > 3245 arch_zone_lowest_possible_pfn[zone_index]) 3246 break; 3247 } 3248 3249 VM_BUG_ON(zone_index == -1); 3250 movable_zone = zone_index; 3251} 3252 3253/* 3254 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 3255 * because it is sized independant of architecture. Unlike the other zones, 3256 * the starting point for ZONE_MOVABLE is not fixed. It may be different 3257 * in each node depending on the size of each node and how evenly kernelcore 3258 * is distributed. This helper function adjusts the zone ranges 3259 * provided by the architecture for a given node by using the end of the 3260 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 3261 * zones within a node are in order of monotonic increases memory addresses 3262 */ 3263static void __meminit adjust_zone_range_for_zone_movable(int nid, 3264 unsigned long zone_type, 3265 unsigned long node_start_pfn, 3266 unsigned long node_end_pfn, 3267 unsigned long *zone_start_pfn, 3268 unsigned long *zone_end_pfn) 3269{ 3270 /* Only adjust if ZONE_MOVABLE is on this node */ 3271 if (zone_movable_pfn[nid]) { 3272 /* Size ZONE_MOVABLE */ 3273 if (zone_type == ZONE_MOVABLE) { 3274 *zone_start_pfn = zone_movable_pfn[nid]; 3275 *zone_end_pfn = min(node_end_pfn, 3276 arch_zone_highest_possible_pfn[movable_zone]); 3277 3278 /* Adjust for ZONE_MOVABLE starting within this range */ 3279 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 3280 *zone_end_pfn > zone_movable_pfn[nid]) { 3281 *zone_end_pfn = zone_movable_pfn[nid]; 3282 3283 /* Check if this whole range is within ZONE_MOVABLE */ 3284 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 3285 *zone_start_pfn = *zone_end_pfn; 3286 } 3287} 3288 3289/* 3290 * Return the number of pages a zone spans in a node, including holes 3291 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 3292 */ 3293static unsigned long __meminit zone_spanned_pages_in_node(int nid, 3294 unsigned long zone_type, 3295 unsigned long *ignored) 3296{ 3297 unsigned long node_start_pfn, node_end_pfn; 3298 unsigned long zone_start_pfn, zone_end_pfn; 3299 3300 /* Get the start and end of the node and zone */ 3301 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3302 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 3303 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 3304 adjust_zone_range_for_zone_movable(nid, zone_type, 3305 node_start_pfn, node_end_pfn, 3306 &zone_start_pfn, &zone_end_pfn); 3307 3308 /* Check that this node has pages within the zone's required range */ 3309 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 3310 return 0; 3311 3312 /* Move the zone boundaries inside the node if necessary */ 3313 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 3314 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 3315 3316 /* Return the spanned pages */ 3317 return zone_end_pfn - zone_start_pfn; 3318} 3319 3320/* 3321 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 3322 * then all holes in the requested range will be accounted for. 3323 */ 3324static unsigned long __meminit __absent_pages_in_range(int nid, 3325 unsigned long range_start_pfn, 3326 unsigned long range_end_pfn) 3327{ 3328 int i = 0; 3329 unsigned long prev_end_pfn = 0, hole_pages = 0; 3330 unsigned long start_pfn; 3331 3332 /* Find the end_pfn of the first active range of pfns in the node */ 3333 i = first_active_region_index_in_nid(nid); 3334 if (i == -1) 3335 return 0; 3336 3337 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3338 3339 /* Account for ranges before physical memory on this node */ 3340 if (early_node_map[i].start_pfn > range_start_pfn) 3341 hole_pages = prev_end_pfn - range_start_pfn; 3342 3343 /* Find all holes for the zone within the node */ 3344 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 3345 3346 /* No need to continue if prev_end_pfn is outside the zone */ 3347 if (prev_end_pfn >= range_end_pfn) 3348 break; 3349 3350 /* Make sure the end of the zone is not within the hole */ 3351 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3352 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 3353 3354 /* Update the hole size cound and move on */ 3355 if (start_pfn > range_start_pfn) { 3356 BUG_ON(prev_end_pfn > start_pfn); 3357 hole_pages += start_pfn - prev_end_pfn; 3358 } 3359 prev_end_pfn = early_node_map[i].end_pfn; 3360 } 3361 3362 /* Account for ranges past physical memory on this node */ 3363 if (range_end_pfn > prev_end_pfn) 3364 hole_pages += range_end_pfn - 3365 max(range_start_pfn, prev_end_pfn); 3366 3367 return hole_pages; 3368} 3369 3370/** 3371 * absent_pages_in_range - Return number of page frames in holes within a range 3372 * @start_pfn: The start PFN to start searching for holes 3373 * @end_pfn: The end PFN to stop searching for holes 3374 * 3375 * It returns the number of pages frames in memory holes within a range. 3376 */ 3377unsigned long __init absent_pages_in_range(unsigned long start_pfn, 3378 unsigned long end_pfn) 3379{ 3380 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 3381} 3382 3383/* Return the number of page frames in holes in a zone on a node */ 3384static unsigned long __meminit zone_absent_pages_in_node(int nid, 3385 unsigned long zone_type, 3386 unsigned long *ignored) 3387{ 3388 unsigned long node_start_pfn, node_end_pfn; 3389 unsigned long zone_start_pfn, zone_end_pfn; 3390 3391 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3392 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 3393 node_start_pfn); 3394 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 3395 node_end_pfn); 3396 3397 adjust_zone_range_for_zone_movable(nid, zone_type, 3398 node_start_pfn, node_end_pfn, 3399 &zone_start_pfn, &zone_end_pfn); 3400 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 3401} 3402 3403#else 3404static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 3405 unsigned long zone_type, 3406 unsigned long *zones_size) 3407{ 3408 return zones_size[zone_type]; 3409} 3410 3411static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 3412 unsigned long zone_type, 3413 unsigned long *zholes_size) 3414{ 3415 if (!zholes_size) 3416 return 0; 3417 3418 return zholes_size[zone_type]; 3419} 3420 3421#endif 3422 3423static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 3424 unsigned long *zones_size, unsigned long *zholes_size) 3425{ 3426 unsigned long realtotalpages, totalpages = 0; 3427 enum zone_type i; 3428 3429 for (i = 0; i < MAX_NR_ZONES; i++) 3430 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 3431 zones_size); 3432 pgdat->node_spanned_pages = totalpages; 3433 3434 realtotalpages = totalpages; 3435 for (i = 0; i < MAX_NR_ZONES; i++) 3436 realtotalpages -= 3437 zone_absent_pages_in_node(pgdat->node_id, i, 3438 zholes_size); 3439 pgdat->node_present_pages = realtotalpages; 3440 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 3441 realtotalpages); 3442} 3443 3444#ifndef CONFIG_SPARSEMEM 3445/* 3446 * Calculate the size of the zone->blockflags rounded to an unsigned long 3447 * Start by making sure zonesize is a multiple of pageblock_order by rounding 3448 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 3449 * round what is now in bits to nearest long in bits, then return it in 3450 * bytes. 3451 */ 3452static unsigned long __init usemap_size(unsigned long zonesize) 3453{ 3454 unsigned long usemapsize; 3455 3456 usemapsize = roundup(zonesize, pageblock_nr_pages); 3457 usemapsize = usemapsize >> pageblock_order; 3458 usemapsize *= NR_PAGEBLOCK_BITS; 3459 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 3460 3461 return usemapsize / 8; 3462} 3463 3464static void __init setup_usemap(struct pglist_data *pgdat, 3465 struct zone *zone, unsigned long zonesize) 3466{ 3467 unsigned long usemapsize = usemap_size(zonesize); 3468 zone->pageblock_flags = NULL; 3469 if (usemapsize) 3470 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 3471} 3472#else 3473static void inline setup_usemap(struct pglist_data *pgdat, 3474 struct zone *zone, unsigned long zonesize) {} 3475#endif /* CONFIG_SPARSEMEM */ 3476 3477#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 3478 3479/* Return a sensible default order for the pageblock size. */ 3480static inline int pageblock_default_order(void) 3481{ 3482 if (HPAGE_SHIFT > PAGE_SHIFT) 3483 return HUGETLB_PAGE_ORDER; 3484 3485 return MAX_ORDER-1; 3486} 3487 3488/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 3489static inline void __init set_pageblock_order(unsigned int order) 3490{ 3491 /* Check that pageblock_nr_pages has not already been setup */ 3492 if (pageblock_order) 3493 return; 3494 3495 /* 3496 * Assume the largest contiguous order of interest is a huge page. 3497 * This value may be variable depending on boot parameters on IA64 3498 */ 3499 pageblock_order = order; 3500} 3501#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3502 3503/* 3504 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 3505 * and pageblock_default_order() are unused as pageblock_order is set 3506 * at compile-time. See include/linux/pageblock-flags.h for the values of 3507 * pageblock_order based on the kernel config 3508 */ 3509static inline int pageblock_default_order(unsigned int order) 3510{ 3511 return MAX_ORDER-1; 3512} 3513#define set_pageblock_order(x) do {} while (0) 3514 3515#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3516 3517/* 3518 * Set up the zone data structures: 3519 * - mark all pages reserved 3520 * - mark all memory queues empty 3521 * - clear the memory bitmaps 3522 */ 3523static void __paginginit free_area_init_core(struct pglist_data *pgdat, 3524 unsigned long *zones_size, unsigned long *zholes_size) 3525{ 3526 enum zone_type j; 3527 int nid = pgdat->node_id; 3528 unsigned long zone_start_pfn = pgdat->node_start_pfn; 3529 int ret; 3530 3531 pgdat_resize_init(pgdat); 3532 pgdat->nr_zones = 0; 3533 init_waitqueue_head(&pgdat->kswapd_wait); 3534 pgdat->kswapd_max_order = 0; 3535 pgdat_page_cgroup_init(pgdat); 3536 3537 for (j = 0; j < MAX_NR_ZONES; j++) { 3538 struct zone *zone = pgdat->node_zones + j; 3539 unsigned long size, realsize, memmap_pages; 3540 enum lru_list l; 3541 3542 size = zone_spanned_pages_in_node(nid, j, zones_size); 3543 realsize = size - zone_absent_pages_in_node(nid, j, 3544 zholes_size); 3545 3546 /* 3547 * Adjust realsize so that it accounts for how much memory 3548 * is used by this zone for memmap. This affects the watermark 3549 * and per-cpu initialisations 3550 */ 3551 memmap_pages = 3552 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; 3553 if (realsize >= memmap_pages) { 3554 realsize -= memmap_pages; 3555 if (memmap_pages) 3556 printk(KERN_DEBUG 3557 " %s zone: %lu pages used for memmap\n", 3558 zone_names[j], memmap_pages); 3559 } else 3560 printk(KERN_WARNING 3561 " %s zone: %lu pages exceeds realsize %lu\n", 3562 zone_names[j], memmap_pages, realsize); 3563 3564 /* Account for reserved pages */ 3565 if (j == 0 && realsize > dma_reserve) { 3566 realsize -= dma_reserve; 3567 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 3568 zone_names[0], dma_reserve); 3569 } 3570 3571 if (!is_highmem_idx(j)) 3572 nr_kernel_pages += realsize; 3573 nr_all_pages += realsize; 3574 3575 zone->spanned_pages = size; 3576 zone->present_pages = realsize; 3577#ifdef CONFIG_NUMA 3578 zone->node = nid; 3579 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 3580 / 100; 3581 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 3582#endif 3583 zone->name = zone_names[j]; 3584 spin_lock_init(&zone->lock); 3585 spin_lock_init(&zone->lru_lock); 3586 zone_seqlock_init(zone); 3587 zone->zone_pgdat = pgdat; 3588 3589 zone->prev_priority = DEF_PRIORITY; 3590 3591 zone_pcp_init(zone); 3592 for_each_lru(l) { 3593 INIT_LIST_HEAD(&zone->lru[l].list); 3594 zone->lru[l].nr_scan = 0; 3595 } 3596 zone->reclaim_stat.recent_rotated[0] = 0; 3597 zone->reclaim_stat.recent_rotated[1] = 0; 3598 zone->reclaim_stat.recent_scanned[0] = 0; 3599 zone->reclaim_stat.recent_scanned[1] = 0; 3600 zap_zone_vm_stats(zone); 3601 zone->flags = 0; 3602 if (!size) 3603 continue; 3604 3605 set_pageblock_order(pageblock_default_order()); 3606 setup_usemap(pgdat, zone, size); 3607 ret = init_currently_empty_zone(zone, zone_start_pfn, 3608 size, MEMMAP_EARLY); 3609 BUG_ON(ret); 3610 memmap_init(size, nid, j, zone_start_pfn); 3611 zone_start_pfn += size; 3612 } 3613} 3614 3615static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 3616{ 3617 /* Skip empty nodes */ 3618 if (!pgdat->node_spanned_pages) 3619 return; 3620 3621#ifdef CONFIG_FLAT_NODE_MEM_MAP 3622 /* ia64 gets its own node_mem_map, before this, without bootmem */ 3623 if (!pgdat->node_mem_map) { 3624 unsigned long size, start, end; 3625 struct page *map; 3626 3627 /* 3628 * The zone's endpoints aren't required to be MAX_ORDER 3629 * aligned but the node_mem_map endpoints must be in order 3630 * for the buddy allocator to function correctly. 3631 */ 3632 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 3633 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 3634 end = ALIGN(end, MAX_ORDER_NR_PAGES); 3635 size = (end - start) * sizeof(struct page); 3636 map = alloc_remap(pgdat->node_id, size); 3637 if (!map) 3638 map = alloc_bootmem_node(pgdat, size); 3639 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 3640 } 3641#ifndef CONFIG_NEED_MULTIPLE_NODES 3642 /* 3643 * With no DISCONTIG, the global mem_map is just set as node 0's 3644 */ 3645 if (pgdat == NODE_DATA(0)) { 3646 mem_map = NODE_DATA(0)->node_mem_map; 3647#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3648 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 3649 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 3650#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3651 } 3652#endif 3653#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 3654} 3655 3656void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 3657 unsigned long node_start_pfn, unsigned long *zholes_size) 3658{ 3659 pg_data_t *pgdat = NODE_DATA(nid); 3660 3661 pgdat->node_id = nid; 3662 pgdat->node_start_pfn = node_start_pfn; 3663 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3664 3665 alloc_node_mem_map(pgdat); 3666#ifdef CONFIG_FLAT_NODE_MEM_MAP 3667 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 3668 nid, (unsigned long)pgdat, 3669 (unsigned long)pgdat->node_mem_map); 3670#endif 3671 3672 free_area_init_core(pgdat, zones_size, zholes_size); 3673} 3674 3675#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3676 3677#if MAX_NUMNODES > 1 3678/* 3679 * Figure out the number of possible node ids. 3680 */ 3681static void __init setup_nr_node_ids(void) 3682{ 3683 unsigned int node; 3684 unsigned int highest = 0; 3685 3686 for_each_node_mask(node, node_possible_map) 3687 highest = node; 3688 nr_node_ids = highest + 1; 3689} 3690#else 3691static inline void setup_nr_node_ids(void) 3692{ 3693} 3694#endif 3695 3696/** 3697 * add_active_range - Register a range of PFNs backed by physical memory 3698 * @nid: The node ID the range resides on 3699 * @start_pfn: The start PFN of the available physical memory 3700 * @end_pfn: The end PFN of the available physical memory 3701 * 3702 * These ranges are stored in an early_node_map[] and later used by 3703 * free_area_init_nodes() to calculate zone sizes and holes. If the 3704 * range spans a memory hole, it is up to the architecture to ensure 3705 * the memory is not freed by the bootmem allocator. If possible 3706 * the range being registered will be merged with existing ranges. 3707 */ 3708void __init add_active_range(unsigned int nid, unsigned long start_pfn, 3709 unsigned long end_pfn) 3710{ 3711 int i; 3712 3713 mminit_dprintk(MMINIT_TRACE, "memory_register", 3714 "Entering add_active_range(%d, %#lx, %#lx) " 3715 "%d entries of %d used\n", 3716 nid, start_pfn, end_pfn, 3717 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3718 3719 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); 3720 3721 /* Merge with existing active regions if possible */ 3722 for (i = 0; i < nr_nodemap_entries; i++) { 3723 if (early_node_map[i].nid != nid) 3724 continue; 3725 3726 /* Skip if an existing region covers this new one */ 3727 if (start_pfn >= early_node_map[i].start_pfn && 3728 end_pfn <= early_node_map[i].end_pfn) 3729 return; 3730 3731 /* Merge forward if suitable */ 3732 if (start_pfn <= early_node_map[i].end_pfn && 3733 end_pfn > early_node_map[i].end_pfn) { 3734 early_node_map[i].end_pfn = end_pfn; 3735 return; 3736 } 3737 3738 /* Merge backward if suitable */ 3739 if (start_pfn < early_node_map[i].end_pfn && 3740 end_pfn >= early_node_map[i].start_pfn) { 3741 early_node_map[i].start_pfn = start_pfn; 3742 return; 3743 } 3744 } 3745 3746 /* Check that early_node_map is large enough */ 3747 if (i >= MAX_ACTIVE_REGIONS) { 3748 printk(KERN_CRIT "More than %d memory regions, truncating\n", 3749 MAX_ACTIVE_REGIONS); 3750 return; 3751 } 3752 3753 early_node_map[i].nid = nid; 3754 early_node_map[i].start_pfn = start_pfn; 3755 early_node_map[i].end_pfn = end_pfn; 3756 nr_nodemap_entries = i + 1; 3757} 3758 3759/** 3760 * remove_active_range - Shrink an existing registered range of PFNs 3761 * @nid: The node id the range is on that should be shrunk 3762 * @start_pfn: The new PFN of the range 3763 * @end_pfn: The new PFN of the range 3764 * 3765 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3766 * The map is kept near the end physical page range that has already been 3767 * registered. This function allows an arch to shrink an existing registered 3768 * range. 3769 */ 3770void __init remove_active_range(unsigned int nid, unsigned long start_pfn, 3771 unsigned long end_pfn) 3772{ 3773 int i, j; 3774 int removed = 0; 3775 3776 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n", 3777 nid, start_pfn, end_pfn); 3778 3779 /* Find the old active region end and shrink */ 3780 for_each_active_range_index_in_nid(i, nid) { 3781 if (early_node_map[i].start_pfn >= start_pfn && 3782 early_node_map[i].end_pfn <= end_pfn) { 3783 /* clear it */ 3784 early_node_map[i].start_pfn = 0; 3785 early_node_map[i].end_pfn = 0; 3786 removed = 1; 3787 continue; 3788 } 3789 if (early_node_map[i].start_pfn < start_pfn && 3790 early_node_map[i].end_pfn > start_pfn) { 3791 unsigned long temp_end_pfn = early_node_map[i].end_pfn; 3792 early_node_map[i].end_pfn = start_pfn; 3793 if (temp_end_pfn > end_pfn) 3794 add_active_range(nid, end_pfn, temp_end_pfn); 3795 continue; 3796 } 3797 if (early_node_map[i].start_pfn >= start_pfn && 3798 early_node_map[i].end_pfn > end_pfn && 3799 early_node_map[i].start_pfn < end_pfn) { 3800 early_node_map[i].start_pfn = end_pfn; 3801 continue; 3802 } 3803 } 3804 3805 if (!removed) 3806 return; 3807 3808 /* remove the blank ones */ 3809 for (i = nr_nodemap_entries - 1; i > 0; i--) { 3810 if (early_node_map[i].nid != nid) 3811 continue; 3812 if (early_node_map[i].end_pfn) 3813 continue; 3814 /* we found it, get rid of it */ 3815 for (j = i; j < nr_nodemap_entries - 1; j++) 3816 memcpy(&early_node_map[j], &early_node_map[j+1], 3817 sizeof(early_node_map[j])); 3818 j = nr_nodemap_entries - 1; 3819 memset(&early_node_map[j], 0, sizeof(early_node_map[j])); 3820 nr_nodemap_entries--; 3821 } 3822} 3823 3824/** 3825 * remove_all_active_ranges - Remove all currently registered regions 3826 * 3827 * During discovery, it may be found that a table like SRAT is invalid 3828 * and an alternative discovery method must be used. This function removes 3829 * all currently registered regions. 3830 */ 3831void __init remove_all_active_ranges(void) 3832{ 3833 memset(early_node_map, 0, sizeof(early_node_map)); 3834 nr_nodemap_entries = 0; 3835} 3836 3837/* Compare two active node_active_regions */ 3838static int __init cmp_node_active_region(const void *a, const void *b) 3839{ 3840 struct node_active_region *arange = (struct node_active_region *)a; 3841 struct node_active_region *brange = (struct node_active_region *)b; 3842 3843 /* Done this way to avoid overflows */ 3844 if (arange->start_pfn > brange->start_pfn) 3845 return 1; 3846 if (arange->start_pfn < brange->start_pfn) 3847 return -1; 3848 3849 return 0; 3850} 3851 3852/* sort the node_map by start_pfn */ 3853static void __init sort_node_map(void) 3854{ 3855 sort(early_node_map, (size_t)nr_nodemap_entries, 3856 sizeof(struct node_active_region), 3857 cmp_node_active_region, NULL); 3858} 3859 3860/* Find the lowest pfn for a node */ 3861static unsigned long __init find_min_pfn_for_node(int nid) 3862{ 3863 int i; 3864 unsigned long min_pfn = ULONG_MAX; 3865 3866 /* Assuming a sorted map, the first range found has the starting pfn */ 3867 for_each_active_range_index_in_nid(i, nid) 3868 min_pfn = min(min_pfn, early_node_map[i].start_pfn); 3869 3870 if (min_pfn == ULONG_MAX) { 3871 printk(KERN_WARNING 3872 "Could not find start_pfn for node %d\n", nid); 3873 return 0; 3874 } 3875 3876 return min_pfn; 3877} 3878 3879/** 3880 * find_min_pfn_with_active_regions - Find the minimum PFN registered 3881 * 3882 * It returns the minimum PFN based on information provided via 3883 * add_active_range(). 3884 */ 3885unsigned long __init find_min_pfn_with_active_regions(void) 3886{ 3887 return find_min_pfn_for_node(MAX_NUMNODES); 3888} 3889 3890/* 3891 * early_calculate_totalpages() 3892 * Sum pages in active regions for movable zone. 3893 * Populate N_HIGH_MEMORY for calculating usable_nodes. 3894 */ 3895static unsigned long __init early_calculate_totalpages(void) 3896{ 3897 int i; 3898 unsigned long totalpages = 0; 3899 3900 for (i = 0; i < nr_nodemap_entries; i++) { 3901 unsigned long pages = early_node_map[i].end_pfn - 3902 early_node_map[i].start_pfn; 3903 totalpages += pages; 3904 if (pages) 3905 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); 3906 } 3907 return totalpages; 3908} 3909 3910/* 3911 * Find the PFN the Movable zone begins in each node. Kernel memory 3912 * is spread evenly between nodes as long as the nodes have enough 3913 * memory. When they don't, some nodes will have more kernelcore than 3914 * others 3915 */ 3916static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) 3917{ 3918 int i, nid; 3919 unsigned long usable_startpfn; 3920 unsigned long kernelcore_node, kernelcore_remaining; 3921 unsigned long totalpages = early_calculate_totalpages(); 3922 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 3923 3924 /* 3925 * If movablecore was specified, calculate what size of 3926 * kernelcore that corresponds so that memory usable for 3927 * any allocation type is evenly spread. If both kernelcore 3928 * and movablecore are specified, then the value of kernelcore 3929 * will be used for required_kernelcore if it's greater than 3930 * what movablecore would have allowed. 3931 */ 3932 if (required_movablecore) { 3933 unsigned long corepages; 3934 3935 /* 3936 * Round-up so that ZONE_MOVABLE is at least as large as what 3937 * was requested by the user 3938 */ 3939 required_movablecore = 3940 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 3941 corepages = totalpages - required_movablecore; 3942 3943 required_kernelcore = max(required_kernelcore, corepages); 3944 } 3945 3946 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 3947 if (!required_kernelcore) 3948 return; 3949 3950 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 3951 find_usable_zone_for_movable(); 3952 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 3953 3954restart: 3955 /* Spread kernelcore memory as evenly as possible throughout nodes */ 3956 kernelcore_node = required_kernelcore / usable_nodes; 3957 for_each_node_state(nid, N_HIGH_MEMORY) { 3958 /* 3959 * Recalculate kernelcore_node if the division per node 3960 * now exceeds what is necessary to satisfy the requested 3961 * amount of memory for the kernel 3962 */ 3963 if (required_kernelcore < kernelcore_node) 3964 kernelcore_node = required_kernelcore / usable_nodes; 3965 3966 /* 3967 * As the map is walked, we track how much memory is usable 3968 * by the kernel using kernelcore_remaining. When it is 3969 * 0, the rest of the node is usable by ZONE_MOVABLE 3970 */ 3971 kernelcore_remaining = kernelcore_node; 3972 3973 /* Go through each range of PFNs within this node */ 3974 for_each_active_range_index_in_nid(i, nid) { 3975 unsigned long start_pfn, end_pfn; 3976 unsigned long size_pages; 3977 3978 start_pfn = max(early_node_map[i].start_pfn, 3979 zone_movable_pfn[nid]); 3980 end_pfn = early_node_map[i].end_pfn; 3981 if (start_pfn >= end_pfn) 3982 continue; 3983 3984 /* Account for what is only usable for kernelcore */ 3985 if (start_pfn < usable_startpfn) { 3986 unsigned long kernel_pages; 3987 kernel_pages = min(end_pfn, usable_startpfn) 3988 - start_pfn; 3989 3990 kernelcore_remaining -= min(kernel_pages, 3991 kernelcore_remaining); 3992 required_kernelcore -= min(kernel_pages, 3993 required_kernelcore); 3994 3995 /* Continue if range is now fully accounted */ 3996 if (end_pfn <= usable_startpfn) { 3997 3998 /* 3999 * Push zone_movable_pfn to the end so 4000 * that if we have to rebalance 4001 * kernelcore across nodes, we will 4002 * not double account here 4003 */ 4004 zone_movable_pfn[nid] = end_pfn; 4005 continue; 4006 } 4007 start_pfn = usable_startpfn; 4008 } 4009 4010 /* 4011 * The usable PFN range for ZONE_MOVABLE is from 4012 * start_pfn->end_pfn. Calculate size_pages as the 4013 * number of pages used as kernelcore 4014 */ 4015 size_pages = end_pfn - start_pfn; 4016 if (size_pages > kernelcore_remaining) 4017 size_pages = kernelcore_remaining; 4018 zone_movable_pfn[nid] = start_pfn + size_pages; 4019 4020 /* 4021 * Some kernelcore has been met, update counts and 4022 * break if the kernelcore for this node has been 4023 * satisified 4024 */ 4025 required_kernelcore -= min(required_kernelcore, 4026 size_pages); 4027 kernelcore_remaining -= size_pages; 4028 if (!kernelcore_remaining) 4029 break; 4030 } 4031 } 4032 4033 /* 4034 * If there is still required_kernelcore, we do another pass with one 4035 * less node in the count. This will push zone_movable_pfn[nid] further 4036 * along on the nodes that still have memory until kernelcore is 4037 * satisified 4038 */ 4039 usable_nodes--; 4040 if (usable_nodes && required_kernelcore > usable_nodes) 4041 goto restart; 4042 4043 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 4044 for (nid = 0; nid < MAX_NUMNODES; nid++) 4045 zone_movable_pfn[nid] = 4046 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 4047} 4048 4049/* Any regular memory on that node ? */ 4050static void check_for_regular_memory(pg_data_t *pgdat) 4051{ 4052#ifdef CONFIG_HIGHMEM 4053 enum zone_type zone_type; 4054 4055 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { 4056 struct zone *zone = &pgdat->node_zones[zone_type]; 4057 if (zone->present_pages) 4058 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); 4059 } 4060#endif 4061} 4062 4063/** 4064 * free_area_init_nodes - Initialise all pg_data_t and zone data 4065 * @max_zone_pfn: an array of max PFNs for each zone 4066 * 4067 * This will call free_area_init_node() for each active node in the system. 4068 * Using the page ranges provided by add_active_range(), the size of each 4069 * zone in each node and their holes is calculated. If the maximum PFN 4070 * between two adjacent zones match, it is assumed that the zone is empty. 4071 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 4072 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 4073 * starts where the previous one ended. For example, ZONE_DMA32 starts 4074 * at arch_max_dma_pfn. 4075 */ 4076void __init free_area_init_nodes(unsigned long *max_zone_pfn) 4077{ 4078 unsigned long nid; 4079 int i; 4080 4081 /* Sort early_node_map as initialisation assumes it is sorted */ 4082 sort_node_map(); 4083 4084 /* Record where the zone boundaries are */ 4085 memset(arch_zone_lowest_possible_pfn, 0, 4086 sizeof(arch_zone_lowest_possible_pfn)); 4087 memset(arch_zone_highest_possible_pfn, 0, 4088 sizeof(arch_zone_highest_possible_pfn)); 4089 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 4090 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 4091 for (i = 1; i < MAX_NR_ZONES; i++) { 4092 if (i == ZONE_MOVABLE) 4093 continue; 4094 arch_zone_lowest_possible_pfn[i] = 4095 arch_zone_highest_possible_pfn[i-1]; 4096 arch_zone_highest_possible_pfn[i] = 4097 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 4098 } 4099 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 4100 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 4101 4102 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 4103 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 4104 find_zone_movable_pfns_for_nodes(zone_movable_pfn); 4105 4106 /* Print out the zone ranges */ 4107 printk("Zone PFN ranges:\n"); 4108 for (i = 0; i < MAX_NR_ZONES; i++) { 4109 if (i == ZONE_MOVABLE) 4110 continue; 4111 printk(" %-8s %0#10lx -> %0#10lx\n", 4112 zone_names[i], 4113 arch_zone_lowest_possible_pfn[i], 4114 arch_zone_highest_possible_pfn[i]); 4115 } 4116 4117 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 4118 printk("Movable zone start PFN for each node\n"); 4119 for (i = 0; i < MAX_NUMNODES; i++) { 4120 if (zone_movable_pfn[i]) 4121 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); 4122 } 4123 4124 /* Print out the early_node_map[] */ 4125 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 4126 for (i = 0; i < nr_nodemap_entries; i++) 4127 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid, 4128 early_node_map[i].start_pfn, 4129 early_node_map[i].end_pfn); 4130 4131 /* Initialise every node */ 4132 mminit_verify_pageflags_layout(); 4133 setup_nr_node_ids(); 4134 for_each_online_node(nid) { 4135 pg_data_t *pgdat = NODE_DATA(nid); 4136 free_area_init_node(nid, NULL, 4137 find_min_pfn_for_node(nid), NULL); 4138 4139 /* Any memory on that node */ 4140 if (pgdat->node_present_pages) 4141 node_set_state(nid, N_HIGH_MEMORY); 4142 check_for_regular_memory(pgdat); 4143 } 4144} 4145 4146static int __init cmdline_parse_core(char *p, unsigned long *core) 4147{ 4148 unsigned long long coremem; 4149 if (!p) 4150 return -EINVAL; 4151 4152 coremem = memparse(p, &p); 4153 *core = coremem >> PAGE_SHIFT; 4154 4155 /* Paranoid check that UL is enough for the coremem value */ 4156 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 4157 4158 return 0; 4159} 4160 4161/* 4162 * kernelcore=size sets the amount of memory for use for allocations that 4163 * cannot be reclaimed or migrated. 4164 */ 4165static int __init cmdline_parse_kernelcore(char *p) 4166{ 4167 return cmdline_parse_core(p, &required_kernelcore); 4168} 4169 4170/* 4171 * movablecore=size sets the amount of memory for use for allocations that 4172 * can be reclaimed or migrated. 4173 */ 4174static int __init cmdline_parse_movablecore(char *p) 4175{ 4176 return cmdline_parse_core(p, &required_movablecore); 4177} 4178 4179early_param("kernelcore", cmdline_parse_kernelcore); 4180early_param("movablecore", cmdline_parse_movablecore); 4181 4182#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 4183 4184/** 4185 * set_dma_reserve - set the specified number of pages reserved in the first zone 4186 * @new_dma_reserve: The number of pages to mark reserved 4187 * 4188 * The per-cpu batchsize and zone watermarks are determined by present_pages. 4189 * In the DMA zone, a significant percentage may be consumed by kernel image 4190 * and other unfreeable allocations which can skew the watermarks badly. This 4191 * function may optionally be used to account for unfreeable pages in the 4192 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 4193 * smaller per-cpu batchsize. 4194 */ 4195void __init set_dma_reserve(unsigned long new_dma_reserve) 4196{ 4197 dma_reserve = new_dma_reserve; 4198} 4199 4200#ifndef CONFIG_NEED_MULTIPLE_NODES 4201struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] }; 4202EXPORT_SYMBOL(contig_page_data); 4203#endif 4204 4205void __init free_area_init(unsigned long *zones_size) 4206{ 4207 free_area_init_node(0, zones_size, 4208 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 4209} 4210 4211static int page_alloc_cpu_notify(struct notifier_block *self, 4212 unsigned long action, void *hcpu) 4213{ 4214 int cpu = (unsigned long)hcpu; 4215 4216 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 4217 drain_pages(cpu); 4218 4219 /* 4220 * Spill the event counters of the dead processor 4221 * into the current processors event counters. 4222 * This artificially elevates the count of the current 4223 * processor. 4224 */ 4225 vm_events_fold_cpu(cpu); 4226 4227 /* 4228 * Zero the differential counters of the dead processor 4229 * so that the vm statistics are consistent. 4230 * 4231 * This is only okay since the processor is dead and cannot 4232 * race with what we are doing. 4233 */ 4234 refresh_cpu_vm_stats(cpu); 4235 } 4236 return NOTIFY_OK; 4237} 4238 4239void __init page_alloc_init(void) 4240{ 4241 hotcpu_notifier(page_alloc_cpu_notify, 0); 4242} 4243 4244/* 4245 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 4246 * or min_free_kbytes changes. 4247 */ 4248static void calculate_totalreserve_pages(void) 4249{ 4250 struct pglist_data *pgdat; 4251 unsigned long reserve_pages = 0; 4252 enum zone_type i, j; 4253 4254 for_each_online_pgdat(pgdat) { 4255 for (i = 0; i < MAX_NR_ZONES; i++) { 4256 struct zone *zone = pgdat->node_zones + i; 4257 unsigned long max = 0; 4258 4259 /* Find valid and maximum lowmem_reserve in the zone */ 4260 for (j = i; j < MAX_NR_ZONES; j++) { 4261 if (zone->lowmem_reserve[j] > max) 4262 max = zone->lowmem_reserve[j]; 4263 } 4264 4265 /* we treat pages_high as reserved pages. */ 4266 max += zone->pages_high; 4267 4268 if (max > zone->present_pages) 4269 max = zone->present_pages; 4270 reserve_pages += max; 4271 } 4272 } 4273 totalreserve_pages = reserve_pages; 4274} 4275 4276/* 4277 * setup_per_zone_lowmem_reserve - called whenever 4278 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 4279 * has a correct pages reserved value, so an adequate number of 4280 * pages are left in the zone after a successful __alloc_pages(). 4281 */ 4282static void setup_per_zone_lowmem_reserve(void) 4283{ 4284 struct pglist_data *pgdat; 4285 enum zone_type j, idx; 4286 4287 for_each_online_pgdat(pgdat) { 4288 for (j = 0; j < MAX_NR_ZONES; j++) { 4289 struct zone *zone = pgdat->node_zones + j; 4290 unsigned long present_pages = zone->present_pages; 4291 4292 zone->lowmem_reserve[j] = 0; 4293 4294 idx = j; 4295 while (idx) { 4296 struct zone *lower_zone; 4297 4298 idx--; 4299 4300 if (sysctl_lowmem_reserve_ratio[idx] < 1) 4301 sysctl_lowmem_reserve_ratio[idx] = 1; 4302 4303 lower_zone = pgdat->node_zones + idx; 4304 lower_zone->lowmem_reserve[j] = present_pages / 4305 sysctl_lowmem_reserve_ratio[idx]; 4306 present_pages += lower_zone->present_pages; 4307 } 4308 } 4309 } 4310 4311 /* update totalreserve_pages */ 4312 calculate_totalreserve_pages(); 4313} 4314 4315/** 4316 * setup_per_zone_pages_min - called when min_free_kbytes changes. 4317 * 4318 * Ensures that the pages_{min,low,high} values for each zone are set correctly 4319 * with respect to min_free_kbytes. 4320 */ 4321void setup_per_zone_pages_min(void) 4322{ 4323 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 4324 unsigned long lowmem_pages = 0; 4325 struct zone *zone; 4326 unsigned long flags; 4327 4328 /* Calculate total number of !ZONE_HIGHMEM pages */ 4329 for_each_zone(zone) { 4330 if (!is_highmem(zone)) 4331 lowmem_pages += zone->present_pages; 4332 } 4333 4334 for_each_zone(zone) { 4335 u64 tmp; 4336 4337 spin_lock_irqsave(&zone->lock, flags); 4338 tmp = (u64)pages_min * zone->present_pages; 4339 do_div(tmp, lowmem_pages); 4340 if (is_highmem(zone)) { 4341 /* 4342 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 4343 * need highmem pages, so cap pages_min to a small 4344 * value here. 4345 * 4346 * The (pages_high-pages_low) and (pages_low-pages_min) 4347 * deltas controls asynch page reclaim, and so should 4348 * not be capped for highmem. 4349 */ 4350 int min_pages; 4351 4352 min_pages = zone->present_pages / 1024; 4353 if (min_pages < SWAP_CLUSTER_MAX) 4354 min_pages = SWAP_CLUSTER_MAX; 4355 if (min_pages > 128) 4356 min_pages = 128; 4357 zone->pages_min = min_pages; 4358 } else { 4359 /* 4360 * If it's a lowmem zone, reserve a number of pages 4361 * proportionate to the zone's size. 4362 */ 4363 zone->pages_min = tmp; 4364 } 4365 4366 zone->pages_low = zone->pages_min + (tmp >> 2); 4367 zone->pages_high = zone->pages_min + (tmp >> 1); 4368 setup_zone_migrate_reserve(zone); 4369 spin_unlock_irqrestore(&zone->lock, flags); 4370 } 4371 4372 /* update totalreserve_pages */ 4373 calculate_totalreserve_pages(); 4374} 4375 4376/** 4377 * setup_per_zone_inactive_ratio - called when min_free_kbytes changes. 4378 * 4379 * The inactive anon list should be small enough that the VM never has to 4380 * do too much work, but large enough that each inactive page has a chance 4381 * to be referenced again before it is swapped out. 4382 * 4383 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to 4384 * INACTIVE_ANON pages on this zone's LRU, maintained by the 4385 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of 4386 * the anonymous pages are kept on the inactive list. 4387 * 4388 * total target max 4389 * memory ratio inactive anon 4390 * ------------------------------------- 4391 * 10MB 1 5MB 4392 * 100MB 1 50MB 4393 * 1GB 3 250MB 4394 * 10GB 10 0.9GB 4395 * 100GB 31 3GB 4396 * 1TB 101 10GB 4397 * 10TB 320 32GB 4398 */ 4399static void setup_per_zone_inactive_ratio(void) 4400{ 4401 struct zone *zone; 4402 4403 for_each_zone(zone) { 4404 unsigned int gb, ratio; 4405 4406 /* Zone size in gigabytes */ 4407 gb = zone->present_pages >> (30 - PAGE_SHIFT); 4408 ratio = int_sqrt(10 * gb); 4409 if (!ratio) 4410 ratio = 1; 4411 4412 zone->inactive_ratio = ratio; 4413 } 4414} 4415 4416/* 4417 * Initialise min_free_kbytes. 4418 * 4419 * For small machines we want it small (128k min). For large machines 4420 * we want it large (64MB max). But it is not linear, because network 4421 * bandwidth does not increase linearly with machine size. We use 4422 * 4423 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 4424 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 4425 * 4426 * which yields 4427 * 4428 * 16MB: 512k 4429 * 32MB: 724k 4430 * 64MB: 1024k 4431 * 128MB: 1448k 4432 * 256MB: 2048k 4433 * 512MB: 2896k 4434 * 1024MB: 4096k 4435 * 2048MB: 5792k 4436 * 4096MB: 8192k 4437 * 8192MB: 11584k 4438 * 16384MB: 16384k 4439 */ 4440static int __init init_per_zone_pages_min(void) 4441{ 4442 unsigned long lowmem_kbytes; 4443 4444 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 4445 4446 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 4447 if (min_free_kbytes < 128) 4448 min_free_kbytes = 128; 4449 if (min_free_kbytes > 65536) 4450 min_free_kbytes = 65536; 4451 setup_per_zone_pages_min(); 4452 setup_per_zone_lowmem_reserve(); 4453 setup_per_zone_inactive_ratio(); 4454 return 0; 4455} 4456module_init(init_per_zone_pages_min) 4457 4458/* 4459 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 4460 * that we can call two helper functions whenever min_free_kbytes 4461 * changes. 4462 */ 4463int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 4464 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4465{ 4466 proc_dointvec(table, write, file, buffer, length, ppos); 4467 if (write) 4468 setup_per_zone_pages_min(); 4469 return 0; 4470} 4471 4472#ifdef CONFIG_NUMA 4473int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 4474 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4475{ 4476 struct zone *zone; 4477 int rc; 4478 4479 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4480 if (rc) 4481 return rc; 4482 4483 for_each_zone(zone) 4484 zone->min_unmapped_pages = (zone->present_pages * 4485 sysctl_min_unmapped_ratio) / 100; 4486 return 0; 4487} 4488 4489int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 4490 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4491{ 4492 struct zone *zone; 4493 int rc; 4494 4495 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4496 if (rc) 4497 return rc; 4498 4499 for_each_zone(zone) 4500 zone->min_slab_pages = (zone->present_pages * 4501 sysctl_min_slab_ratio) / 100; 4502 return 0; 4503} 4504#endif 4505 4506/* 4507 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 4508 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 4509 * whenever sysctl_lowmem_reserve_ratio changes. 4510 * 4511 * The reserve ratio obviously has absolutely no relation with the 4512 * pages_min watermarks. The lowmem reserve ratio can only make sense 4513 * if in function of the boot time zone sizes. 4514 */ 4515int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 4516 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4517{ 4518 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4519 setup_per_zone_lowmem_reserve(); 4520 return 0; 4521} 4522 4523/* 4524 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 4525 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 4526 * can have before it gets flushed back to buddy allocator. 4527 */ 4528 4529int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 4530 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4531{ 4532 struct zone *zone; 4533 unsigned int cpu; 4534 int ret; 4535 4536 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4537 if (!write || (ret == -EINVAL)) 4538 return ret; 4539 for_each_zone(zone) { 4540 for_each_online_cpu(cpu) { 4541 unsigned long high; 4542 high = zone->present_pages / percpu_pagelist_fraction; 4543 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 4544 } 4545 } 4546 return 0; 4547} 4548 4549int hashdist = HASHDIST_DEFAULT; 4550 4551#ifdef CONFIG_NUMA 4552static int __init set_hashdist(char *str) 4553{ 4554 if (!str) 4555 return 0; 4556 hashdist = simple_strtoul(str, &str, 0); 4557 return 1; 4558} 4559__setup("hashdist=", set_hashdist); 4560#endif 4561 4562/* 4563 * allocate a large system hash table from bootmem 4564 * - it is assumed that the hash table must contain an exact power-of-2 4565 * quantity of entries 4566 * - limit is the number of hash buckets, not the total allocation size 4567 */ 4568void *__init alloc_large_system_hash(const char *tablename, 4569 unsigned long bucketsize, 4570 unsigned long numentries, 4571 int scale, 4572 int flags, 4573 unsigned int *_hash_shift, 4574 unsigned int *_hash_mask, 4575 unsigned long limit) 4576{ 4577 unsigned long long max = limit; 4578 unsigned long log2qty, size; 4579 void *table = NULL; 4580 4581 /* allow the kernel cmdline to have a say */ 4582 if (!numentries) { 4583 /* round applicable memory size up to nearest megabyte */ 4584 numentries = nr_kernel_pages; 4585 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 4586 numentries >>= 20 - PAGE_SHIFT; 4587 numentries <<= 20 - PAGE_SHIFT; 4588 4589 /* limit to 1 bucket per 2^scale bytes of low memory */ 4590 if (scale > PAGE_SHIFT) 4591 numentries >>= (scale - PAGE_SHIFT); 4592 else 4593 numentries <<= (PAGE_SHIFT - scale); 4594 4595 /* Make sure we've got at least a 0-order allocation.. */ 4596 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 4597 numentries = PAGE_SIZE / bucketsize; 4598 } 4599 numentries = roundup_pow_of_two(numentries); 4600 4601 /* limit allocation size to 1/16 total memory by default */ 4602 if (max == 0) { 4603 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 4604 do_div(max, bucketsize); 4605 } 4606 4607 if (numentries > max) 4608 numentries = max; 4609 4610 log2qty = ilog2(numentries); 4611 4612 do { 4613 size = bucketsize << log2qty; 4614 if (flags & HASH_EARLY) 4615 table = alloc_bootmem_nopanic(size); 4616 else if (hashdist) 4617 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 4618 else { 4619 unsigned long order = get_order(size); 4620 4621 if (order < MAX_ORDER) 4622 table = (void *)__get_free_pages(GFP_ATOMIC, 4623 order); 4624 /* 4625 * If bucketsize is not a power-of-two, we may free 4626 * some pages at the end of hash table. 4627 */ 4628 if (table) { 4629 unsigned long alloc_end = (unsigned long)table + 4630 (PAGE_SIZE << order); 4631 unsigned long used = (unsigned long)table + 4632 PAGE_ALIGN(size); 4633 split_page(virt_to_page(table), order); 4634 while (used < alloc_end) { 4635 free_page(used); 4636 used += PAGE_SIZE; 4637 } 4638 } 4639 } 4640 } while (!table && size > PAGE_SIZE && --log2qty); 4641 4642 if (!table) 4643 panic("Failed to allocate %s hash table\n", tablename); 4644 4645 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", 4646 tablename, 4647 (1U << log2qty), 4648 ilog2(size) - PAGE_SHIFT, 4649 size); 4650 4651 if (_hash_shift) 4652 *_hash_shift = log2qty; 4653 if (_hash_mask) 4654 *_hash_mask = (1 << log2qty) - 1; 4655 4656 /* 4657 * If hashdist is set, the table allocation is done with __vmalloc() 4658 * which invokes the kmemleak_alloc() callback. This function may also 4659 * be called before the slab and kmemleak are initialised when 4660 * kmemleak simply buffers the request to be executed later 4661 * (GFP_ATOMIC flag ignored in this case). 4662 */ 4663 if (!hashdist) 4664 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 4665 4666 return table; 4667} 4668 4669/* Return a pointer to the bitmap storing bits affecting a block of pages */ 4670static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 4671 unsigned long pfn) 4672{ 4673#ifdef CONFIG_SPARSEMEM 4674 return __pfn_to_section(pfn)->pageblock_flags; 4675#else 4676 return zone->pageblock_flags; 4677#endif /* CONFIG_SPARSEMEM */ 4678} 4679 4680static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 4681{ 4682#ifdef CONFIG_SPARSEMEM 4683 pfn &= (PAGES_PER_SECTION-1); 4684 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4685#else 4686 pfn = pfn - zone->zone_start_pfn; 4687 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4688#endif /* CONFIG_SPARSEMEM */ 4689} 4690 4691/** 4692 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 4693 * @page: The page within the block of interest 4694 * @start_bitidx: The first bit of interest to retrieve 4695 * @end_bitidx: The last bit of interest 4696 * returns pageblock_bits flags 4697 */ 4698unsigned long get_pageblock_flags_group(struct page *page, 4699 int start_bitidx, int end_bitidx) 4700{ 4701 struct zone *zone; 4702 unsigned long *bitmap; 4703 unsigned long pfn, bitidx; 4704 unsigned long flags = 0; 4705 unsigned long value = 1; 4706 4707 zone = page_zone(page); 4708 pfn = page_to_pfn(page); 4709 bitmap = get_pageblock_bitmap(zone, pfn); 4710 bitidx = pfn_to_bitidx(zone, pfn); 4711 4712 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4713 if (test_bit(bitidx + start_bitidx, bitmap)) 4714 flags |= value; 4715 4716 return flags; 4717} 4718 4719/** 4720 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages 4721 * @page: The page within the block of interest 4722 * @start_bitidx: The first bit of interest 4723 * @end_bitidx: The last bit of interest 4724 * @flags: The flags to set 4725 */ 4726void set_pageblock_flags_group(struct page *page, unsigned long flags, 4727 int start_bitidx, int end_bitidx) 4728{ 4729 struct zone *zone; 4730 unsigned long *bitmap; 4731 unsigned long pfn, bitidx; 4732 unsigned long value = 1; 4733 4734 zone = page_zone(page); 4735 pfn = page_to_pfn(page); 4736 bitmap = get_pageblock_bitmap(zone, pfn); 4737 bitidx = pfn_to_bitidx(zone, pfn); 4738 VM_BUG_ON(pfn < zone->zone_start_pfn); 4739 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); 4740 4741 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4742 if (flags & value) 4743 __set_bit(bitidx + start_bitidx, bitmap); 4744 else 4745 __clear_bit(bitidx + start_bitidx, bitmap); 4746} 4747 4748/* 4749 * This is designed as sub function...plz see page_isolation.c also. 4750 * set/clear page block's type to be ISOLATE. 4751 * page allocater never alloc memory from ISOLATE block. 4752 */ 4753 4754int set_migratetype_isolate(struct page *page) 4755{ 4756 struct zone *zone; 4757 unsigned long flags; 4758 int ret = -EBUSY; 4759 4760 zone = page_zone(page); 4761 spin_lock_irqsave(&zone->lock, flags); 4762 /* 4763 * In future, more migrate types will be able to be isolation target. 4764 */ 4765 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 4766 goto out; 4767 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 4768 move_freepages_block(zone, page, MIGRATE_ISOLATE); 4769 ret = 0; 4770out: 4771 spin_unlock_irqrestore(&zone->lock, flags); 4772 if (!ret) 4773 drain_all_pages(); 4774 return ret; 4775} 4776 4777void unset_migratetype_isolate(struct page *page) 4778{ 4779 struct zone *zone; 4780 unsigned long flags; 4781 zone = page_zone(page); 4782 spin_lock_irqsave(&zone->lock, flags); 4783 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 4784 goto out; 4785 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4786 move_freepages_block(zone, page, MIGRATE_MOVABLE); 4787out: 4788 spin_unlock_irqrestore(&zone->lock, flags); 4789} 4790 4791#ifdef CONFIG_MEMORY_HOTREMOVE 4792/* 4793 * All pages in the range must be isolated before calling this. 4794 */ 4795void 4796__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 4797{ 4798 struct page *page; 4799 struct zone *zone; 4800 int order, i; 4801 unsigned long pfn; 4802 unsigned long flags; 4803 /* find the first valid pfn */ 4804 for (pfn = start_pfn; pfn < end_pfn; pfn++) 4805 if (pfn_valid(pfn)) 4806 break; 4807 if (pfn == end_pfn) 4808 return; 4809 zone = page_zone(pfn_to_page(pfn)); 4810 spin_lock_irqsave(&zone->lock, flags); 4811 pfn = start_pfn; 4812 while (pfn < end_pfn) { 4813 if (!pfn_valid(pfn)) { 4814 pfn++; 4815 continue; 4816 } 4817 page = pfn_to_page(pfn); 4818 BUG_ON(page_count(page)); 4819 BUG_ON(!PageBuddy(page)); 4820 order = page_order(page); 4821#ifdef CONFIG_DEBUG_VM 4822 printk(KERN_INFO "remove from free list %lx %d %lx\n", 4823 pfn, 1 << order, end_pfn); 4824#endif 4825 list_del(&page->lru); 4826 rmv_page_order(page); 4827 zone->free_area[order].nr_free--; 4828 __mod_zone_page_state(zone, NR_FREE_PAGES, 4829 - (1UL << order)); 4830 for (i = 0; i < (1 << order); i++) 4831 SetPageReserved((page+i)); 4832 pfn += (1 << order); 4833 } 4834 spin_unlock_irqrestore(&zone->lock, flags); 4835} 4836#endif 4837