page_alloc.c revision b2e185384f534781fd22f5ce170b2ad26f97df70
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/jiffies.h> 23#include <linux/bootmem.h> 24#include <linux/compiler.h> 25#include <linux/kernel.h> 26#include <linux/module.h> 27#include <linux/suspend.h> 28#include <linux/pagevec.h> 29#include <linux/blkdev.h> 30#include <linux/slab.h> 31#include <linux/oom.h> 32#include <linux/notifier.h> 33#include <linux/topology.h> 34#include <linux/sysctl.h> 35#include <linux/cpu.h> 36#include <linux/cpuset.h> 37#include <linux/memory_hotplug.h> 38#include <linux/nodemask.h> 39#include <linux/vmalloc.h> 40#include <linux/mempolicy.h> 41#include <linux/stop_machine.h> 42#include <linux/sort.h> 43#include <linux/pfn.h> 44#include <linux/backing-dev.h> 45#include <linux/fault-inject.h> 46#include <linux/page-isolation.h> 47#include <linux/memcontrol.h> 48#include <linux/debugobjects.h> 49 50#include <asm/tlbflush.h> 51#include <asm/div64.h> 52#include "internal.h" 53 54/* 55 * Array of node states. 56 */ 57nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 58 [N_POSSIBLE] = NODE_MASK_ALL, 59 [N_ONLINE] = { { [0] = 1UL } }, 60#ifndef CONFIG_NUMA 61 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 62#ifdef CONFIG_HIGHMEM 63 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 64#endif 65 [N_CPU] = { { [0] = 1UL } }, 66#endif /* NUMA */ 67}; 68EXPORT_SYMBOL(node_states); 69 70unsigned long totalram_pages __read_mostly; 71unsigned long totalreserve_pages __read_mostly; 72long nr_swap_pages; 73int percpu_pagelist_fraction; 74 75#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 76int pageblock_order __read_mostly; 77#endif 78 79static void __free_pages_ok(struct page *page, unsigned int order); 80 81/* 82 * results with 256, 32 in the lowmem_reserve sysctl: 83 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 84 * 1G machine -> (16M dma, 784M normal, 224M high) 85 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 86 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 87 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 88 * 89 * TBD: should special case ZONE_DMA32 machines here - in those we normally 90 * don't need any ZONE_NORMAL reservation 91 */ 92int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 93#ifdef CONFIG_ZONE_DMA 94 256, 95#endif 96#ifdef CONFIG_ZONE_DMA32 97 256, 98#endif 99#ifdef CONFIG_HIGHMEM 100 32, 101#endif 102 32, 103}; 104 105EXPORT_SYMBOL(totalram_pages); 106 107static char * const zone_names[MAX_NR_ZONES] = { 108#ifdef CONFIG_ZONE_DMA 109 "DMA", 110#endif 111#ifdef CONFIG_ZONE_DMA32 112 "DMA32", 113#endif 114 "Normal", 115#ifdef CONFIG_HIGHMEM 116 "HighMem", 117#endif 118 "Movable", 119}; 120 121int min_free_kbytes = 1024; 122 123unsigned long __meminitdata nr_kernel_pages; 124unsigned long __meminitdata nr_all_pages; 125static unsigned long __meminitdata dma_reserve; 126 127#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 128 /* 129 * MAX_ACTIVE_REGIONS determines the maximum number of distinct 130 * ranges of memory (RAM) that may be registered with add_active_range(). 131 * Ranges passed to add_active_range() will be merged if possible 132 * so the number of times add_active_range() can be called is 133 * related to the number of nodes and the number of holes 134 */ 135 #ifdef CONFIG_MAX_ACTIVE_REGIONS 136 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 137 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 138 #else 139 #if MAX_NUMNODES >= 32 140 /* If there can be many nodes, allow up to 50 holes per node */ 141 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 142 #else 143 /* By default, allow up to 256 distinct regions */ 144 #define MAX_ACTIVE_REGIONS 256 145 #endif 146 #endif 147 148 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; 149 static int __meminitdata nr_nodemap_entries; 150 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 151 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 152#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 153 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; 154 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; 155#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 156 static unsigned long __initdata required_kernelcore; 157 static unsigned long __initdata required_movablecore; 158 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 159 160 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 161 int movable_zone; 162 EXPORT_SYMBOL(movable_zone); 163#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 164 165#if MAX_NUMNODES > 1 166int nr_node_ids __read_mostly = MAX_NUMNODES; 167EXPORT_SYMBOL(nr_node_ids); 168#endif 169 170int page_group_by_mobility_disabled __read_mostly; 171 172static void set_pageblock_migratetype(struct page *page, int migratetype) 173{ 174 set_pageblock_flags_group(page, (unsigned long)migratetype, 175 PB_migrate, PB_migrate_end); 176} 177 178#ifdef CONFIG_DEBUG_VM 179static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 180{ 181 int ret = 0; 182 unsigned seq; 183 unsigned long pfn = page_to_pfn(page); 184 185 do { 186 seq = zone_span_seqbegin(zone); 187 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 188 ret = 1; 189 else if (pfn < zone->zone_start_pfn) 190 ret = 1; 191 } while (zone_span_seqretry(zone, seq)); 192 193 return ret; 194} 195 196static int page_is_consistent(struct zone *zone, struct page *page) 197{ 198 if (!pfn_valid_within(page_to_pfn(page))) 199 return 0; 200 if (zone != page_zone(page)) 201 return 0; 202 203 return 1; 204} 205/* 206 * Temporary debugging check for pages not lying within a given zone. 207 */ 208static int bad_range(struct zone *zone, struct page *page) 209{ 210 if (page_outside_zone_boundaries(zone, page)) 211 return 1; 212 if (!page_is_consistent(zone, page)) 213 return 1; 214 215 return 0; 216} 217#else 218static inline int bad_range(struct zone *zone, struct page *page) 219{ 220 return 0; 221} 222#endif 223 224static void bad_page(struct page *page) 225{ 226 void *pc = page_get_page_cgroup(page); 227 228 printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG 229 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", 230 current->comm, page, (int)(2*sizeof(unsigned long)), 231 (unsigned long)page->flags, page->mapping, 232 page_mapcount(page), page_count(page)); 233 if (pc) { 234 printk(KERN_EMERG "cgroup:%p\n", pc); 235 page_reset_bad_cgroup(page); 236 } 237 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 238 KERN_EMERG "Backtrace:\n"); 239 dump_stack(); 240 page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD; 241 set_page_count(page, 0); 242 reset_page_mapcount(page); 243 page->mapping = NULL; 244 add_taint(TAINT_BAD_PAGE); 245} 246 247/* 248 * Higher-order pages are called "compound pages". They are structured thusly: 249 * 250 * The first PAGE_SIZE page is called the "head page". 251 * 252 * The remaining PAGE_SIZE pages are called "tail pages". 253 * 254 * All pages have PG_compound set. All pages have their ->private pointing at 255 * the head page (even the head page has this). 256 * 257 * The first tail page's ->lru.next holds the address of the compound page's 258 * put_page() function. Its ->lru.prev holds the order of allocation. 259 * This usage means that zero-order pages may not be compound. 260 */ 261 262static void free_compound_page(struct page *page) 263{ 264 __free_pages_ok(page, compound_order(page)); 265} 266 267void prep_compound_page(struct page *page, unsigned long order) 268{ 269 int i; 270 int nr_pages = 1 << order; 271 struct page *p = page + 1; 272 273 set_compound_page_dtor(page, free_compound_page); 274 set_compound_order(page, order); 275 __SetPageHead(page); 276 for (i = 1; i < nr_pages; i++, p++) { 277 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) 278 p = pfn_to_page(page_to_pfn(page) + i); 279 __SetPageTail(p); 280 p->first_page = page; 281 } 282} 283 284static void destroy_compound_page(struct page *page, unsigned long order) 285{ 286 int i; 287 int nr_pages = 1 << order; 288 struct page *p = page + 1; 289 290 if (unlikely(compound_order(page) != order)) 291 bad_page(page); 292 293 if (unlikely(!PageHead(page))) 294 bad_page(page); 295 __ClearPageHead(page); 296 for (i = 1; i < nr_pages; i++, p++) { 297 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) 298 p = pfn_to_page(page_to_pfn(page) + i); 299 300 if (unlikely(!PageTail(p) | 301 (p->first_page != page))) 302 bad_page(page); 303 __ClearPageTail(p); 304 } 305} 306 307static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 308{ 309 int i; 310 311 /* 312 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 313 * and __GFP_HIGHMEM from hard or soft interrupt context. 314 */ 315 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 316 for (i = 0; i < (1 << order); i++) 317 clear_highpage(page + i); 318} 319 320static inline void set_page_order(struct page *page, int order) 321{ 322 set_page_private(page, order); 323 __SetPageBuddy(page); 324} 325 326static inline void rmv_page_order(struct page *page) 327{ 328 __ClearPageBuddy(page); 329 set_page_private(page, 0); 330} 331 332/* 333 * Locate the struct page for both the matching buddy in our 334 * pair (buddy1) and the combined O(n+1) page they form (page). 335 * 336 * 1) Any buddy B1 will have an order O twin B2 which satisfies 337 * the following equation: 338 * B2 = B1 ^ (1 << O) 339 * For example, if the starting buddy (buddy2) is #8 its order 340 * 1 buddy is #10: 341 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 342 * 343 * 2) Any buddy B will have an order O+1 parent P which 344 * satisfies the following equation: 345 * P = B & ~(1 << O) 346 * 347 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 348 */ 349static inline struct page * 350__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 351{ 352 unsigned long buddy_idx = page_idx ^ (1 << order); 353 354 return page + (buddy_idx - page_idx); 355} 356 357static inline unsigned long 358__find_combined_index(unsigned long page_idx, unsigned int order) 359{ 360 return (page_idx & ~(1 << order)); 361} 362 363/* 364 * This function checks whether a page is free && is the buddy 365 * we can do coalesce a page and its buddy if 366 * (a) the buddy is not in a hole && 367 * (b) the buddy is in the buddy system && 368 * (c) a page and its buddy have the same order && 369 * (d) a page and its buddy are in the same zone. 370 * 371 * For recording whether a page is in the buddy system, we use PG_buddy. 372 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 373 * 374 * For recording page's order, we use page_private(page). 375 */ 376static inline int page_is_buddy(struct page *page, struct page *buddy, 377 int order) 378{ 379 if (!pfn_valid_within(page_to_pfn(buddy))) 380 return 0; 381 382 if (page_zone_id(page) != page_zone_id(buddy)) 383 return 0; 384 385 if (PageBuddy(buddy) && page_order(buddy) == order) { 386 BUG_ON(page_count(buddy) != 0); 387 return 1; 388 } 389 return 0; 390} 391 392/* 393 * Freeing function for a buddy system allocator. 394 * 395 * The concept of a buddy system is to maintain direct-mapped table 396 * (containing bit values) for memory blocks of various "orders". 397 * The bottom level table contains the map for the smallest allocatable 398 * units of memory (here, pages), and each level above it describes 399 * pairs of units from the levels below, hence, "buddies". 400 * At a high level, all that happens here is marking the table entry 401 * at the bottom level available, and propagating the changes upward 402 * as necessary, plus some accounting needed to play nicely with other 403 * parts of the VM system. 404 * At each level, we keep a list of pages, which are heads of continuous 405 * free pages of length of (1 << order) and marked with PG_buddy. Page's 406 * order is recorded in page_private(page) field. 407 * So when we are allocating or freeing one, we can derive the state of the 408 * other. That is, if we allocate a small block, and both were 409 * free, the remainder of the region must be split into blocks. 410 * If a block is freed, and its buddy is also free, then this 411 * triggers coalescing into a block of larger size. 412 * 413 * -- wli 414 */ 415 416static inline void __free_one_page(struct page *page, 417 struct zone *zone, unsigned int order) 418{ 419 unsigned long page_idx; 420 int order_size = 1 << order; 421 int migratetype = get_pageblock_migratetype(page); 422 423 if (unlikely(PageCompound(page))) 424 destroy_compound_page(page, order); 425 426 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 427 428 VM_BUG_ON(page_idx & (order_size - 1)); 429 VM_BUG_ON(bad_range(zone, page)); 430 431 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); 432 while (order < MAX_ORDER-1) { 433 unsigned long combined_idx; 434 struct page *buddy; 435 436 buddy = __page_find_buddy(page, page_idx, order); 437 if (!page_is_buddy(page, buddy, order)) 438 break; 439 440 /* Our buddy is free, merge with it and move up one order. */ 441 list_del(&buddy->lru); 442 zone->free_area[order].nr_free--; 443 rmv_page_order(buddy); 444 combined_idx = __find_combined_index(page_idx, order); 445 page = page + (combined_idx - page_idx); 446 page_idx = combined_idx; 447 order++; 448 } 449 set_page_order(page, order); 450 list_add(&page->lru, 451 &zone->free_area[order].free_list[migratetype]); 452 zone->free_area[order].nr_free++; 453} 454 455static inline int free_pages_check(struct page *page) 456{ 457 if (unlikely(page_mapcount(page) | 458 (page->mapping != NULL) | 459 (page_get_page_cgroup(page) != NULL) | 460 (page_count(page) != 0) | 461 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) 462 bad_page(page); 463 if (PageDirty(page)) 464 __ClearPageDirty(page); 465 if (PageSwapBacked(page)) 466 __ClearPageSwapBacked(page); 467 /* 468 * For now, we report if PG_reserved was found set, but do not 469 * clear it, and do not free the page. But we shall soon need 470 * to do more, for when the ZERO_PAGE count wraps negative. 471 */ 472 return PageReserved(page); 473} 474 475/* 476 * Frees a list of pages. 477 * Assumes all pages on list are in same zone, and of same order. 478 * count is the number of pages to free. 479 * 480 * If the zone was previously in an "all pages pinned" state then look to 481 * see if this freeing clears that state. 482 * 483 * And clear the zone's pages_scanned counter, to hold off the "all pages are 484 * pinned" detection logic. 485 */ 486static void free_pages_bulk(struct zone *zone, int count, 487 struct list_head *list, int order) 488{ 489 spin_lock(&zone->lock); 490 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 491 zone->pages_scanned = 0; 492 while (count--) { 493 struct page *page; 494 495 VM_BUG_ON(list_empty(list)); 496 page = list_entry(list->prev, struct page, lru); 497 /* have to delete it as __free_one_page list manipulates */ 498 list_del(&page->lru); 499 __free_one_page(page, zone, order); 500 } 501 spin_unlock(&zone->lock); 502} 503 504static void free_one_page(struct zone *zone, struct page *page, int order) 505{ 506 spin_lock(&zone->lock); 507 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 508 zone->pages_scanned = 0; 509 __free_one_page(page, zone, order); 510 spin_unlock(&zone->lock); 511} 512 513static void __free_pages_ok(struct page *page, unsigned int order) 514{ 515 unsigned long flags; 516 int i; 517 int reserved = 0; 518 519 for (i = 0 ; i < (1 << order) ; ++i) 520 reserved += free_pages_check(page + i); 521 if (reserved) 522 return; 523 524 if (!PageHighMem(page)) { 525 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 526 debug_check_no_obj_freed(page_address(page), 527 PAGE_SIZE << order); 528 } 529 arch_free_page(page, order); 530 kernel_map_pages(page, 1 << order, 0); 531 532 local_irq_save(flags); 533 __count_vm_events(PGFREE, 1 << order); 534 free_one_page(page_zone(page), page, order); 535 local_irq_restore(flags); 536} 537 538/* 539 * permit the bootmem allocator to evade page validation on high-order frees 540 */ 541void __meminit __free_pages_bootmem(struct page *page, unsigned int order) 542{ 543 if (order == 0) { 544 __ClearPageReserved(page); 545 set_page_count(page, 0); 546 set_page_refcounted(page); 547 __free_page(page); 548 } else { 549 int loop; 550 551 prefetchw(page); 552 for (loop = 0; loop < BITS_PER_LONG; loop++) { 553 struct page *p = &page[loop]; 554 555 if (loop + 1 < BITS_PER_LONG) 556 prefetchw(p + 1); 557 __ClearPageReserved(p); 558 set_page_count(p, 0); 559 } 560 561 set_page_refcounted(page); 562 __free_pages(page, order); 563 } 564} 565 566 567/* 568 * The order of subdivision here is critical for the IO subsystem. 569 * Please do not alter this order without good reasons and regression 570 * testing. Specifically, as large blocks of memory are subdivided, 571 * the order in which smaller blocks are delivered depends on the order 572 * they're subdivided in this function. This is the primary factor 573 * influencing the order in which pages are delivered to the IO 574 * subsystem according to empirical testing, and this is also justified 575 * by considering the behavior of a buddy system containing a single 576 * large block of memory acted on by a series of small allocations. 577 * This behavior is a critical factor in sglist merging's success. 578 * 579 * -- wli 580 */ 581static inline void expand(struct zone *zone, struct page *page, 582 int low, int high, struct free_area *area, 583 int migratetype) 584{ 585 unsigned long size = 1 << high; 586 587 while (high > low) { 588 area--; 589 high--; 590 size >>= 1; 591 VM_BUG_ON(bad_range(zone, &page[size])); 592 list_add(&page[size].lru, &area->free_list[migratetype]); 593 area->nr_free++; 594 set_page_order(&page[size], high); 595 } 596} 597 598/* 599 * This page is about to be returned from the page allocator 600 */ 601static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 602{ 603 if (unlikely(page_mapcount(page) | 604 (page->mapping != NULL) | 605 (page_get_page_cgroup(page) != NULL) | 606 (page_count(page) != 0) | 607 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) 608 bad_page(page); 609 610 /* 611 * For now, we report if PG_reserved was found set, but do not 612 * clear it, and do not allocate the page: as a safety net. 613 */ 614 if (PageReserved(page)) 615 return 1; 616 617 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim | 618 1 << PG_referenced | 1 << PG_arch_1 | 619 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk); 620 set_page_private(page, 0); 621 set_page_refcounted(page); 622 623 arch_alloc_page(page, order); 624 kernel_map_pages(page, 1 << order, 1); 625 626 if (gfp_flags & __GFP_ZERO) 627 prep_zero_page(page, order, gfp_flags); 628 629 if (order && (gfp_flags & __GFP_COMP)) 630 prep_compound_page(page, order); 631 632 return 0; 633} 634 635/* 636 * Go through the free lists for the given migratetype and remove 637 * the smallest available page from the freelists 638 */ 639static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 640 int migratetype) 641{ 642 unsigned int current_order; 643 struct free_area * area; 644 struct page *page; 645 646 /* Find a page of the appropriate size in the preferred list */ 647 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 648 area = &(zone->free_area[current_order]); 649 if (list_empty(&area->free_list[migratetype])) 650 continue; 651 652 page = list_entry(area->free_list[migratetype].next, 653 struct page, lru); 654 list_del(&page->lru); 655 rmv_page_order(page); 656 area->nr_free--; 657 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); 658 expand(zone, page, order, current_order, area, migratetype); 659 return page; 660 } 661 662 return NULL; 663} 664 665 666/* 667 * This array describes the order lists are fallen back to when 668 * the free lists for the desirable migrate type are depleted 669 */ 670static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 671 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 672 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 673 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 674 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ 675}; 676 677/* 678 * Move the free pages in a range to the free lists of the requested type. 679 * Note that start_page and end_pages are not aligned on a pageblock 680 * boundary. If alignment is required, use move_freepages_block() 681 */ 682static int move_freepages(struct zone *zone, 683 struct page *start_page, struct page *end_page, 684 int migratetype) 685{ 686 struct page *page; 687 unsigned long order; 688 int pages_moved = 0; 689 690#ifndef CONFIG_HOLES_IN_ZONE 691 /* 692 * page_zone is not safe to call in this context when 693 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 694 * anyway as we check zone boundaries in move_freepages_block(). 695 * Remove at a later date when no bug reports exist related to 696 * grouping pages by mobility 697 */ 698 BUG_ON(page_zone(start_page) != page_zone(end_page)); 699#endif 700 701 for (page = start_page; page <= end_page;) { 702 /* Make sure we are not inadvertently changing nodes */ 703 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); 704 705 if (!pfn_valid_within(page_to_pfn(page))) { 706 page++; 707 continue; 708 } 709 710 if (!PageBuddy(page)) { 711 page++; 712 continue; 713 } 714 715 order = page_order(page); 716 list_del(&page->lru); 717 list_add(&page->lru, 718 &zone->free_area[order].free_list[migratetype]); 719 page += 1 << order; 720 pages_moved += 1 << order; 721 } 722 723 return pages_moved; 724} 725 726static int move_freepages_block(struct zone *zone, struct page *page, 727 int migratetype) 728{ 729 unsigned long start_pfn, end_pfn; 730 struct page *start_page, *end_page; 731 732 start_pfn = page_to_pfn(page); 733 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 734 start_page = pfn_to_page(start_pfn); 735 end_page = start_page + pageblock_nr_pages - 1; 736 end_pfn = start_pfn + pageblock_nr_pages - 1; 737 738 /* Do not cross zone boundaries */ 739 if (start_pfn < zone->zone_start_pfn) 740 start_page = page; 741 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) 742 return 0; 743 744 return move_freepages(zone, start_page, end_page, migratetype); 745} 746 747/* Remove an element from the buddy allocator from the fallback list */ 748static struct page *__rmqueue_fallback(struct zone *zone, int order, 749 int start_migratetype) 750{ 751 struct free_area * area; 752 int current_order; 753 struct page *page; 754 int migratetype, i; 755 756 /* Find the largest possible block of pages in the other list */ 757 for (current_order = MAX_ORDER-1; current_order >= order; 758 --current_order) { 759 for (i = 0; i < MIGRATE_TYPES - 1; i++) { 760 migratetype = fallbacks[start_migratetype][i]; 761 762 /* MIGRATE_RESERVE handled later if necessary */ 763 if (migratetype == MIGRATE_RESERVE) 764 continue; 765 766 area = &(zone->free_area[current_order]); 767 if (list_empty(&area->free_list[migratetype])) 768 continue; 769 770 page = list_entry(area->free_list[migratetype].next, 771 struct page, lru); 772 area->nr_free--; 773 774 /* 775 * If breaking a large block of pages, move all free 776 * pages to the preferred allocation list. If falling 777 * back for a reclaimable kernel allocation, be more 778 * agressive about taking ownership of free pages 779 */ 780 if (unlikely(current_order >= (pageblock_order >> 1)) || 781 start_migratetype == MIGRATE_RECLAIMABLE) { 782 unsigned long pages; 783 pages = move_freepages_block(zone, page, 784 start_migratetype); 785 786 /* Claim the whole block if over half of it is free */ 787 if (pages >= (1 << (pageblock_order-1))) 788 set_pageblock_migratetype(page, 789 start_migratetype); 790 791 migratetype = start_migratetype; 792 } 793 794 /* Remove the page from the freelists */ 795 list_del(&page->lru); 796 rmv_page_order(page); 797 __mod_zone_page_state(zone, NR_FREE_PAGES, 798 -(1UL << order)); 799 800 if (current_order == pageblock_order) 801 set_pageblock_migratetype(page, 802 start_migratetype); 803 804 expand(zone, page, order, current_order, area, migratetype); 805 return page; 806 } 807 } 808 809 /* Use MIGRATE_RESERVE rather than fail an allocation */ 810 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); 811} 812 813/* 814 * Do the hard work of removing an element from the buddy allocator. 815 * Call me with the zone->lock already held. 816 */ 817static struct page *__rmqueue(struct zone *zone, unsigned int order, 818 int migratetype) 819{ 820 struct page *page; 821 822 page = __rmqueue_smallest(zone, order, migratetype); 823 824 if (unlikely(!page)) 825 page = __rmqueue_fallback(zone, order, migratetype); 826 827 return page; 828} 829 830/* 831 * Obtain a specified number of elements from the buddy allocator, all under 832 * a single hold of the lock, for efficiency. Add them to the supplied list. 833 * Returns the number of new pages which were placed at *list. 834 */ 835static int rmqueue_bulk(struct zone *zone, unsigned int order, 836 unsigned long count, struct list_head *list, 837 int migratetype) 838{ 839 int i; 840 841 spin_lock(&zone->lock); 842 for (i = 0; i < count; ++i) { 843 struct page *page = __rmqueue(zone, order, migratetype); 844 if (unlikely(page == NULL)) 845 break; 846 847 /* 848 * Split buddy pages returned by expand() are received here 849 * in physical page order. The page is added to the callers and 850 * list and the list head then moves forward. From the callers 851 * perspective, the linked list is ordered by page number in 852 * some conditions. This is useful for IO devices that can 853 * merge IO requests if the physical pages are ordered 854 * properly. 855 */ 856 list_add(&page->lru, list); 857 set_page_private(page, migratetype); 858 list = &page->lru; 859 } 860 spin_unlock(&zone->lock); 861 return i; 862} 863 864#ifdef CONFIG_NUMA 865/* 866 * Called from the vmstat counter updater to drain pagesets of this 867 * currently executing processor on remote nodes after they have 868 * expired. 869 * 870 * Note that this function must be called with the thread pinned to 871 * a single processor. 872 */ 873void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 874{ 875 unsigned long flags; 876 int to_drain; 877 878 local_irq_save(flags); 879 if (pcp->count >= pcp->batch) 880 to_drain = pcp->batch; 881 else 882 to_drain = pcp->count; 883 free_pages_bulk(zone, to_drain, &pcp->list, 0); 884 pcp->count -= to_drain; 885 local_irq_restore(flags); 886} 887#endif 888 889/* 890 * Drain pages of the indicated processor. 891 * 892 * The processor must either be the current processor and the 893 * thread pinned to the current processor or a processor that 894 * is not online. 895 */ 896static void drain_pages(unsigned int cpu) 897{ 898 unsigned long flags; 899 struct zone *zone; 900 901 for_each_zone(zone) { 902 struct per_cpu_pageset *pset; 903 struct per_cpu_pages *pcp; 904 905 if (!populated_zone(zone)) 906 continue; 907 908 pset = zone_pcp(zone, cpu); 909 910 pcp = &pset->pcp; 911 local_irq_save(flags); 912 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 913 pcp->count = 0; 914 local_irq_restore(flags); 915 } 916} 917 918/* 919 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 920 */ 921void drain_local_pages(void *arg) 922{ 923 drain_pages(smp_processor_id()); 924} 925 926/* 927 * Spill all the per-cpu pages from all CPUs back into the buddy allocator 928 */ 929void drain_all_pages(void) 930{ 931 on_each_cpu(drain_local_pages, NULL, 1); 932} 933 934#ifdef CONFIG_HIBERNATION 935 936void mark_free_pages(struct zone *zone) 937{ 938 unsigned long pfn, max_zone_pfn; 939 unsigned long flags; 940 int order, t; 941 struct list_head *curr; 942 943 if (!zone->spanned_pages) 944 return; 945 946 spin_lock_irqsave(&zone->lock, flags); 947 948 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 949 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 950 if (pfn_valid(pfn)) { 951 struct page *page = pfn_to_page(pfn); 952 953 if (!swsusp_page_is_forbidden(page)) 954 swsusp_unset_page_free(page); 955 } 956 957 for_each_migratetype_order(order, t) { 958 list_for_each(curr, &zone->free_area[order].free_list[t]) { 959 unsigned long i; 960 961 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 962 for (i = 0; i < (1UL << order); i++) 963 swsusp_set_page_free(pfn_to_page(pfn + i)); 964 } 965 } 966 spin_unlock_irqrestore(&zone->lock, flags); 967} 968#endif /* CONFIG_PM */ 969 970/* 971 * Free a 0-order page 972 */ 973static void free_hot_cold_page(struct page *page, int cold) 974{ 975 struct zone *zone = page_zone(page); 976 struct per_cpu_pages *pcp; 977 unsigned long flags; 978 979 if (PageAnon(page)) 980 page->mapping = NULL; 981 if (free_pages_check(page)) 982 return; 983 984 if (!PageHighMem(page)) { 985 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 986 debug_check_no_obj_freed(page_address(page), PAGE_SIZE); 987 } 988 arch_free_page(page, 0); 989 kernel_map_pages(page, 1, 0); 990 991 pcp = &zone_pcp(zone, get_cpu())->pcp; 992 local_irq_save(flags); 993 __count_vm_event(PGFREE); 994 if (cold) 995 list_add_tail(&page->lru, &pcp->list); 996 else 997 list_add(&page->lru, &pcp->list); 998 set_page_private(page, get_pageblock_migratetype(page)); 999 pcp->count++; 1000 if (pcp->count >= pcp->high) { 1001 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 1002 pcp->count -= pcp->batch; 1003 } 1004 local_irq_restore(flags); 1005 put_cpu(); 1006} 1007 1008void free_hot_page(struct page *page) 1009{ 1010 free_hot_cold_page(page, 0); 1011} 1012 1013void free_cold_page(struct page *page) 1014{ 1015 free_hot_cold_page(page, 1); 1016} 1017 1018/* 1019 * split_page takes a non-compound higher-order page, and splits it into 1020 * n (1<<order) sub-pages: page[0..n] 1021 * Each sub-page must be freed individually. 1022 * 1023 * Note: this is probably too low level an operation for use in drivers. 1024 * Please consult with lkml before using this in your driver. 1025 */ 1026void split_page(struct page *page, unsigned int order) 1027{ 1028 int i; 1029 1030 VM_BUG_ON(PageCompound(page)); 1031 VM_BUG_ON(!page_count(page)); 1032 for (i = 1; i < (1 << order); i++) 1033 set_page_refcounted(page + i); 1034} 1035 1036/* 1037 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1038 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1039 * or two. 1040 */ 1041static struct page *buffered_rmqueue(struct zone *preferred_zone, 1042 struct zone *zone, int order, gfp_t gfp_flags) 1043{ 1044 unsigned long flags; 1045 struct page *page; 1046 int cold = !!(gfp_flags & __GFP_COLD); 1047 int cpu; 1048 int migratetype = allocflags_to_migratetype(gfp_flags); 1049 1050again: 1051 cpu = get_cpu(); 1052 if (likely(order == 0)) { 1053 struct per_cpu_pages *pcp; 1054 1055 pcp = &zone_pcp(zone, cpu)->pcp; 1056 local_irq_save(flags); 1057 if (!pcp->count) { 1058 pcp->count = rmqueue_bulk(zone, 0, 1059 pcp->batch, &pcp->list, migratetype); 1060 if (unlikely(!pcp->count)) 1061 goto failed; 1062 } 1063 1064 /* Find a page of the appropriate migrate type */ 1065 if (cold) { 1066 list_for_each_entry_reverse(page, &pcp->list, lru) 1067 if (page_private(page) == migratetype) 1068 break; 1069 } else { 1070 list_for_each_entry(page, &pcp->list, lru) 1071 if (page_private(page) == migratetype) 1072 break; 1073 } 1074 1075 /* Allocate more to the pcp list if necessary */ 1076 if (unlikely(&page->lru == &pcp->list)) { 1077 pcp->count += rmqueue_bulk(zone, 0, 1078 pcp->batch, &pcp->list, migratetype); 1079 page = list_entry(pcp->list.next, struct page, lru); 1080 } 1081 1082 list_del(&page->lru); 1083 pcp->count--; 1084 } else { 1085 spin_lock_irqsave(&zone->lock, flags); 1086 page = __rmqueue(zone, order, migratetype); 1087 spin_unlock(&zone->lock); 1088 if (!page) 1089 goto failed; 1090 } 1091 1092 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1093 zone_statistics(preferred_zone, zone); 1094 local_irq_restore(flags); 1095 put_cpu(); 1096 1097 VM_BUG_ON(bad_range(zone, page)); 1098 if (prep_new_page(page, order, gfp_flags)) 1099 goto again; 1100 return page; 1101 1102failed: 1103 local_irq_restore(flags); 1104 put_cpu(); 1105 return NULL; 1106} 1107 1108#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 1109#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 1110#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 1111#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 1112#define ALLOC_HARDER 0x10 /* try to alloc harder */ 1113#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 1114#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1115 1116#ifdef CONFIG_FAIL_PAGE_ALLOC 1117 1118static struct fail_page_alloc_attr { 1119 struct fault_attr attr; 1120 1121 u32 ignore_gfp_highmem; 1122 u32 ignore_gfp_wait; 1123 u32 min_order; 1124 1125#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1126 1127 struct dentry *ignore_gfp_highmem_file; 1128 struct dentry *ignore_gfp_wait_file; 1129 struct dentry *min_order_file; 1130 1131#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1132 1133} fail_page_alloc = { 1134 .attr = FAULT_ATTR_INITIALIZER, 1135 .ignore_gfp_wait = 1, 1136 .ignore_gfp_highmem = 1, 1137 .min_order = 1, 1138}; 1139 1140static int __init setup_fail_page_alloc(char *str) 1141{ 1142 return setup_fault_attr(&fail_page_alloc.attr, str); 1143} 1144__setup("fail_page_alloc=", setup_fail_page_alloc); 1145 1146static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1147{ 1148 if (order < fail_page_alloc.min_order) 1149 return 0; 1150 if (gfp_mask & __GFP_NOFAIL) 1151 return 0; 1152 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1153 return 0; 1154 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1155 return 0; 1156 1157 return should_fail(&fail_page_alloc.attr, 1 << order); 1158} 1159 1160#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1161 1162static int __init fail_page_alloc_debugfs(void) 1163{ 1164 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1165 struct dentry *dir; 1166 int err; 1167 1168 err = init_fault_attr_dentries(&fail_page_alloc.attr, 1169 "fail_page_alloc"); 1170 if (err) 1171 return err; 1172 dir = fail_page_alloc.attr.dentries.dir; 1173 1174 fail_page_alloc.ignore_gfp_wait_file = 1175 debugfs_create_bool("ignore-gfp-wait", mode, dir, 1176 &fail_page_alloc.ignore_gfp_wait); 1177 1178 fail_page_alloc.ignore_gfp_highmem_file = 1179 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1180 &fail_page_alloc.ignore_gfp_highmem); 1181 fail_page_alloc.min_order_file = 1182 debugfs_create_u32("min-order", mode, dir, 1183 &fail_page_alloc.min_order); 1184 1185 if (!fail_page_alloc.ignore_gfp_wait_file || 1186 !fail_page_alloc.ignore_gfp_highmem_file || 1187 !fail_page_alloc.min_order_file) { 1188 err = -ENOMEM; 1189 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 1190 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 1191 debugfs_remove(fail_page_alloc.min_order_file); 1192 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 1193 } 1194 1195 return err; 1196} 1197 1198late_initcall(fail_page_alloc_debugfs); 1199 1200#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1201 1202#else /* CONFIG_FAIL_PAGE_ALLOC */ 1203 1204static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1205{ 1206 return 0; 1207} 1208 1209#endif /* CONFIG_FAIL_PAGE_ALLOC */ 1210 1211/* 1212 * Return 1 if free pages are above 'mark'. This takes into account the order 1213 * of the allocation. 1214 */ 1215int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1216 int classzone_idx, int alloc_flags) 1217{ 1218 /* free_pages my go negative - that's OK */ 1219 long min = mark; 1220 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 1221 int o; 1222 1223 if (alloc_flags & ALLOC_HIGH) 1224 min -= min / 2; 1225 if (alloc_flags & ALLOC_HARDER) 1226 min -= min / 4; 1227 1228 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1229 return 0; 1230 for (o = 0; o < order; o++) { 1231 /* At the next order, this order's pages become unavailable */ 1232 free_pages -= z->free_area[o].nr_free << o; 1233 1234 /* Require fewer higher order pages to be free */ 1235 min >>= 1; 1236 1237 if (free_pages <= min) 1238 return 0; 1239 } 1240 return 1; 1241} 1242 1243#ifdef CONFIG_NUMA 1244/* 1245 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1246 * skip over zones that are not allowed by the cpuset, or that have 1247 * been recently (in last second) found to be nearly full. See further 1248 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1249 * that have to skip over a lot of full or unallowed zones. 1250 * 1251 * If the zonelist cache is present in the passed in zonelist, then 1252 * returns a pointer to the allowed node mask (either the current 1253 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) 1254 * 1255 * If the zonelist cache is not available for this zonelist, does 1256 * nothing and returns NULL. 1257 * 1258 * If the fullzones BITMAP in the zonelist cache is stale (more than 1259 * a second since last zap'd) then we zap it out (clear its bits.) 1260 * 1261 * We hold off even calling zlc_setup, until after we've checked the 1262 * first zone in the zonelist, on the theory that most allocations will 1263 * be satisfied from that first zone, so best to examine that zone as 1264 * quickly as we can. 1265 */ 1266static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1267{ 1268 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1269 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1270 1271 zlc = zonelist->zlcache_ptr; 1272 if (!zlc) 1273 return NULL; 1274 1275 if (time_after(jiffies, zlc->last_full_zap + HZ)) { 1276 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1277 zlc->last_full_zap = jiffies; 1278 } 1279 1280 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1281 &cpuset_current_mems_allowed : 1282 &node_states[N_HIGH_MEMORY]; 1283 return allowednodes; 1284} 1285 1286/* 1287 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1288 * if it is worth looking at further for free memory: 1289 * 1) Check that the zone isn't thought to be full (doesn't have its 1290 * bit set in the zonelist_cache fullzones BITMAP). 1291 * 2) Check that the zones node (obtained from the zonelist_cache 1292 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1293 * Return true (non-zero) if zone is worth looking at further, or 1294 * else return false (zero) if it is not. 1295 * 1296 * This check -ignores- the distinction between various watermarks, 1297 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1298 * found to be full for any variation of these watermarks, it will 1299 * be considered full for up to one second by all requests, unless 1300 * we are so low on memory on all allowed nodes that we are forced 1301 * into the second scan of the zonelist. 1302 * 1303 * In the second scan we ignore this zonelist cache and exactly 1304 * apply the watermarks to all zones, even it is slower to do so. 1305 * We are low on memory in the second scan, and should leave no stone 1306 * unturned looking for a free page. 1307 */ 1308static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1309 nodemask_t *allowednodes) 1310{ 1311 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1312 int i; /* index of *z in zonelist zones */ 1313 int n; /* node that zone *z is on */ 1314 1315 zlc = zonelist->zlcache_ptr; 1316 if (!zlc) 1317 return 1; 1318 1319 i = z - zonelist->_zonerefs; 1320 n = zlc->z_to_n[i]; 1321 1322 /* This zone is worth trying if it is allowed but not full */ 1323 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1324} 1325 1326/* 1327 * Given 'z' scanning a zonelist, set the corresponding bit in 1328 * zlc->fullzones, so that subsequent attempts to allocate a page 1329 * from that zone don't waste time re-examining it. 1330 */ 1331static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1332{ 1333 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1334 int i; /* index of *z in zonelist zones */ 1335 1336 zlc = zonelist->zlcache_ptr; 1337 if (!zlc) 1338 return; 1339 1340 i = z - zonelist->_zonerefs; 1341 1342 set_bit(i, zlc->fullzones); 1343} 1344 1345#else /* CONFIG_NUMA */ 1346 1347static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1348{ 1349 return NULL; 1350} 1351 1352static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1353 nodemask_t *allowednodes) 1354{ 1355 return 1; 1356} 1357 1358static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1359{ 1360} 1361#endif /* CONFIG_NUMA */ 1362 1363/* 1364 * get_page_from_freelist goes through the zonelist trying to allocate 1365 * a page. 1366 */ 1367static struct page * 1368get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1369 struct zonelist *zonelist, int high_zoneidx, int alloc_flags) 1370{ 1371 struct zoneref *z; 1372 struct page *page = NULL; 1373 int classzone_idx; 1374 struct zone *zone, *preferred_zone; 1375 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1376 int zlc_active = 0; /* set if using zonelist_cache */ 1377 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1378 1379 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, 1380 &preferred_zone); 1381 if (!preferred_zone) 1382 return NULL; 1383 1384 classzone_idx = zone_idx(preferred_zone); 1385 1386zonelist_scan: 1387 /* 1388 * Scan zonelist, looking for a zone with enough free. 1389 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1390 */ 1391 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1392 high_zoneidx, nodemask) { 1393 if (NUMA_BUILD && zlc_active && 1394 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1395 continue; 1396 if ((alloc_flags & ALLOC_CPUSET) && 1397 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1398 goto try_next_zone; 1399 1400 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1401 unsigned long mark; 1402 if (alloc_flags & ALLOC_WMARK_MIN) 1403 mark = zone->pages_min; 1404 else if (alloc_flags & ALLOC_WMARK_LOW) 1405 mark = zone->pages_low; 1406 else 1407 mark = zone->pages_high; 1408 if (!zone_watermark_ok(zone, order, mark, 1409 classzone_idx, alloc_flags)) { 1410 if (!zone_reclaim_mode || 1411 !zone_reclaim(zone, gfp_mask, order)) 1412 goto this_zone_full; 1413 } 1414 } 1415 1416 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask); 1417 if (page) 1418 break; 1419this_zone_full: 1420 if (NUMA_BUILD) 1421 zlc_mark_zone_full(zonelist, z); 1422try_next_zone: 1423 if (NUMA_BUILD && !did_zlc_setup) { 1424 /* we do zlc_setup after the first zone is tried */ 1425 allowednodes = zlc_setup(zonelist, alloc_flags); 1426 zlc_active = 1; 1427 did_zlc_setup = 1; 1428 } 1429 } 1430 1431 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1432 /* Disable zlc cache for second zonelist scan */ 1433 zlc_active = 0; 1434 goto zonelist_scan; 1435 } 1436 return page; 1437} 1438 1439/* 1440 * This is the 'heart' of the zoned buddy allocator. 1441 */ 1442struct page * 1443__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, 1444 struct zonelist *zonelist, nodemask_t *nodemask) 1445{ 1446 const gfp_t wait = gfp_mask & __GFP_WAIT; 1447 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1448 struct zoneref *z; 1449 struct zone *zone; 1450 struct page *page; 1451 struct reclaim_state reclaim_state; 1452 struct task_struct *p = current; 1453 int do_retry; 1454 int alloc_flags; 1455 unsigned long did_some_progress; 1456 unsigned long pages_reclaimed = 0; 1457 1458 might_sleep_if(wait); 1459 1460 if (should_fail_alloc_page(gfp_mask, order)) 1461 return NULL; 1462 1463restart: 1464 z = zonelist->_zonerefs; /* the list of zones suitable for gfp_mask */ 1465 1466 if (unlikely(!z->zone)) { 1467 /* 1468 * Happens if we have an empty zonelist as a result of 1469 * GFP_THISNODE being used on a memoryless node 1470 */ 1471 return NULL; 1472 } 1473 1474 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 1475 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1476 if (page) 1477 goto got_pg; 1478 1479 /* 1480 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1481 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1482 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1483 * using a larger set of nodes after it has established that the 1484 * allowed per node queues are empty and that nodes are 1485 * over allocated. 1486 */ 1487 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1488 goto nopage; 1489 1490 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 1491 wakeup_kswapd(zone, order); 1492 1493 /* 1494 * OK, we're below the kswapd watermark and have kicked background 1495 * reclaim. Now things get more complex, so set up alloc_flags according 1496 * to how we want to proceed. 1497 * 1498 * The caller may dip into page reserves a bit more if the caller 1499 * cannot run direct reclaim, or if the caller has realtime scheduling 1500 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1501 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1502 */ 1503 alloc_flags = ALLOC_WMARK_MIN; 1504 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 1505 alloc_flags |= ALLOC_HARDER; 1506 if (gfp_mask & __GFP_HIGH) 1507 alloc_flags |= ALLOC_HIGH; 1508 if (wait) 1509 alloc_flags |= ALLOC_CPUSET; 1510 1511 /* 1512 * Go through the zonelist again. Let __GFP_HIGH and allocations 1513 * coming from realtime tasks go deeper into reserves. 1514 * 1515 * This is the last chance, in general, before the goto nopage. 1516 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1517 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1518 */ 1519 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 1520 high_zoneidx, alloc_flags); 1521 if (page) 1522 goto got_pg; 1523 1524 /* This allocation should allow future memory freeing. */ 1525 1526rebalance: 1527 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1528 && !in_interrupt()) { 1529 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1530nofail_alloc: 1531 /* go through the zonelist yet again, ignoring mins */ 1532 page = get_page_from_freelist(gfp_mask, nodemask, order, 1533 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS); 1534 if (page) 1535 goto got_pg; 1536 if (gfp_mask & __GFP_NOFAIL) { 1537 congestion_wait(WRITE, HZ/50); 1538 goto nofail_alloc; 1539 } 1540 } 1541 goto nopage; 1542 } 1543 1544 /* Atomic allocations - we can't balance anything */ 1545 if (!wait) 1546 goto nopage; 1547 1548 cond_resched(); 1549 1550 /* We now go into synchronous reclaim */ 1551 cpuset_memory_pressure_bump(); 1552 p->flags |= PF_MEMALLOC; 1553 reclaim_state.reclaimed_slab = 0; 1554 p->reclaim_state = &reclaim_state; 1555 1556 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); 1557 1558 p->reclaim_state = NULL; 1559 p->flags &= ~PF_MEMALLOC; 1560 1561 cond_resched(); 1562 1563 if (order != 0) 1564 drain_all_pages(); 1565 1566 if (likely(did_some_progress)) { 1567 page = get_page_from_freelist(gfp_mask, nodemask, order, 1568 zonelist, high_zoneidx, alloc_flags); 1569 if (page) 1570 goto got_pg; 1571 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1572 if (!try_set_zone_oom(zonelist, gfp_mask)) { 1573 schedule_timeout_uninterruptible(1); 1574 goto restart; 1575 } 1576 1577 /* 1578 * Go through the zonelist yet one more time, keep 1579 * very high watermark here, this is only to catch 1580 * a parallel oom killing, we must fail if we're still 1581 * under heavy pressure. 1582 */ 1583 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 1584 order, zonelist, high_zoneidx, 1585 ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1586 if (page) { 1587 clear_zonelist_oom(zonelist, gfp_mask); 1588 goto got_pg; 1589 } 1590 1591 /* The OOM killer will not help higher order allocs so fail */ 1592 if (order > PAGE_ALLOC_COSTLY_ORDER) { 1593 clear_zonelist_oom(zonelist, gfp_mask); 1594 goto nopage; 1595 } 1596 1597 out_of_memory(zonelist, gfp_mask, order); 1598 clear_zonelist_oom(zonelist, gfp_mask); 1599 goto restart; 1600 } 1601 1602 /* 1603 * Don't let big-order allocations loop unless the caller explicitly 1604 * requests that. Wait for some write requests to complete then retry. 1605 * 1606 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER 1607 * means __GFP_NOFAIL, but that may not be true in other 1608 * implementations. 1609 * 1610 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is 1611 * specified, then we retry until we no longer reclaim any pages 1612 * (above), or we've reclaimed an order of pages at least as 1613 * large as the allocation's order. In both cases, if the 1614 * allocation still fails, we stop retrying. 1615 */ 1616 pages_reclaimed += did_some_progress; 1617 do_retry = 0; 1618 if (!(gfp_mask & __GFP_NORETRY)) { 1619 if (order <= PAGE_ALLOC_COSTLY_ORDER) { 1620 do_retry = 1; 1621 } else { 1622 if (gfp_mask & __GFP_REPEAT && 1623 pages_reclaimed < (1 << order)) 1624 do_retry = 1; 1625 } 1626 if (gfp_mask & __GFP_NOFAIL) 1627 do_retry = 1; 1628 } 1629 if (do_retry) { 1630 congestion_wait(WRITE, HZ/50); 1631 goto rebalance; 1632 } 1633 1634nopage: 1635 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1636 printk(KERN_WARNING "%s: page allocation failure." 1637 " order:%d, mode:0x%x\n", 1638 p->comm, order, gfp_mask); 1639 dump_stack(); 1640 show_mem(); 1641 } 1642got_pg: 1643 return page; 1644} 1645EXPORT_SYMBOL(__alloc_pages_internal); 1646 1647/* 1648 * Common helper functions. 1649 */ 1650unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1651{ 1652 struct page * page; 1653 page = alloc_pages(gfp_mask, order); 1654 if (!page) 1655 return 0; 1656 return (unsigned long) page_address(page); 1657} 1658 1659EXPORT_SYMBOL(__get_free_pages); 1660 1661unsigned long get_zeroed_page(gfp_t gfp_mask) 1662{ 1663 struct page * page; 1664 1665 /* 1666 * get_zeroed_page() returns a 32-bit address, which cannot represent 1667 * a highmem page 1668 */ 1669 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1670 1671 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1672 if (page) 1673 return (unsigned long) page_address(page); 1674 return 0; 1675} 1676 1677EXPORT_SYMBOL(get_zeroed_page); 1678 1679void __pagevec_free(struct pagevec *pvec) 1680{ 1681 int i = pagevec_count(pvec); 1682 1683 while (--i >= 0) 1684 free_hot_cold_page(pvec->pages[i], pvec->cold); 1685} 1686 1687void __free_pages(struct page *page, unsigned int order) 1688{ 1689 if (put_page_testzero(page)) { 1690 if (order == 0) 1691 free_hot_page(page); 1692 else 1693 __free_pages_ok(page, order); 1694 } 1695} 1696 1697EXPORT_SYMBOL(__free_pages); 1698 1699void free_pages(unsigned long addr, unsigned int order) 1700{ 1701 if (addr != 0) { 1702 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1703 __free_pages(virt_to_page((void *)addr), order); 1704 } 1705} 1706 1707EXPORT_SYMBOL(free_pages); 1708 1709/** 1710 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 1711 * @size: the number of bytes to allocate 1712 * @gfp_mask: GFP flags for the allocation 1713 * 1714 * This function is similar to alloc_pages(), except that it allocates the 1715 * minimum number of pages to satisfy the request. alloc_pages() can only 1716 * allocate memory in power-of-two pages. 1717 * 1718 * This function is also limited by MAX_ORDER. 1719 * 1720 * Memory allocated by this function must be released by free_pages_exact(). 1721 */ 1722void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 1723{ 1724 unsigned int order = get_order(size); 1725 unsigned long addr; 1726 1727 addr = __get_free_pages(gfp_mask, order); 1728 if (addr) { 1729 unsigned long alloc_end = addr + (PAGE_SIZE << order); 1730 unsigned long used = addr + PAGE_ALIGN(size); 1731 1732 split_page(virt_to_page(addr), order); 1733 while (used < alloc_end) { 1734 free_page(used); 1735 used += PAGE_SIZE; 1736 } 1737 } 1738 1739 return (void *)addr; 1740} 1741EXPORT_SYMBOL(alloc_pages_exact); 1742 1743/** 1744 * free_pages_exact - release memory allocated via alloc_pages_exact() 1745 * @virt: the value returned by alloc_pages_exact. 1746 * @size: size of allocation, same value as passed to alloc_pages_exact(). 1747 * 1748 * Release the memory allocated by a previous call to alloc_pages_exact. 1749 */ 1750void free_pages_exact(void *virt, size_t size) 1751{ 1752 unsigned long addr = (unsigned long)virt; 1753 unsigned long end = addr + PAGE_ALIGN(size); 1754 1755 while (addr < end) { 1756 free_page(addr); 1757 addr += PAGE_SIZE; 1758 } 1759} 1760EXPORT_SYMBOL(free_pages_exact); 1761 1762static unsigned int nr_free_zone_pages(int offset) 1763{ 1764 struct zoneref *z; 1765 struct zone *zone; 1766 1767 /* Just pick one node, since fallback list is circular */ 1768 unsigned int sum = 0; 1769 1770 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 1771 1772 for_each_zone_zonelist(zone, z, zonelist, offset) { 1773 unsigned long size = zone->present_pages; 1774 unsigned long high = zone->pages_high; 1775 if (size > high) 1776 sum += size - high; 1777 } 1778 1779 return sum; 1780} 1781 1782/* 1783 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1784 */ 1785unsigned int nr_free_buffer_pages(void) 1786{ 1787 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1788} 1789EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 1790 1791/* 1792 * Amount of free RAM allocatable within all zones 1793 */ 1794unsigned int nr_free_pagecache_pages(void) 1795{ 1796 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 1797} 1798 1799static inline void show_node(struct zone *zone) 1800{ 1801 if (NUMA_BUILD) 1802 printk("Node %d ", zone_to_nid(zone)); 1803} 1804 1805void si_meminfo(struct sysinfo *val) 1806{ 1807 val->totalram = totalram_pages; 1808 val->sharedram = 0; 1809 val->freeram = global_page_state(NR_FREE_PAGES); 1810 val->bufferram = nr_blockdev_pages(); 1811 val->totalhigh = totalhigh_pages; 1812 val->freehigh = nr_free_highpages(); 1813 val->mem_unit = PAGE_SIZE; 1814} 1815 1816EXPORT_SYMBOL(si_meminfo); 1817 1818#ifdef CONFIG_NUMA 1819void si_meminfo_node(struct sysinfo *val, int nid) 1820{ 1821 pg_data_t *pgdat = NODE_DATA(nid); 1822 1823 val->totalram = pgdat->node_present_pages; 1824 val->freeram = node_page_state(nid, NR_FREE_PAGES); 1825#ifdef CONFIG_HIGHMEM 1826 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1827 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 1828 NR_FREE_PAGES); 1829#else 1830 val->totalhigh = 0; 1831 val->freehigh = 0; 1832#endif 1833 val->mem_unit = PAGE_SIZE; 1834} 1835#endif 1836 1837#define K(x) ((x) << (PAGE_SHIFT-10)) 1838 1839/* 1840 * Show free area list (used inside shift_scroll-lock stuff) 1841 * We also calculate the percentage fragmentation. We do this by counting the 1842 * memory on each free list with the exception of the first item on the list. 1843 */ 1844void show_free_areas(void) 1845{ 1846 int cpu; 1847 struct zone *zone; 1848 1849 for_each_zone(zone) { 1850 if (!populated_zone(zone)) 1851 continue; 1852 1853 show_node(zone); 1854 printk("%s per-cpu:\n", zone->name); 1855 1856 for_each_online_cpu(cpu) { 1857 struct per_cpu_pageset *pageset; 1858 1859 pageset = zone_pcp(zone, cpu); 1860 1861 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 1862 cpu, pageset->pcp.high, 1863 pageset->pcp.batch, pageset->pcp.count); 1864 } 1865 } 1866 1867 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" 1868 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 1869 global_page_state(NR_ACTIVE), 1870 global_page_state(NR_INACTIVE), 1871 global_page_state(NR_FILE_DIRTY), 1872 global_page_state(NR_WRITEBACK), 1873 global_page_state(NR_UNSTABLE_NFS), 1874 global_page_state(NR_FREE_PAGES), 1875 global_page_state(NR_SLAB_RECLAIMABLE) + 1876 global_page_state(NR_SLAB_UNRECLAIMABLE), 1877 global_page_state(NR_FILE_MAPPED), 1878 global_page_state(NR_PAGETABLE), 1879 global_page_state(NR_BOUNCE)); 1880 1881 for_each_zone(zone) { 1882 int i; 1883 1884 if (!populated_zone(zone)) 1885 continue; 1886 1887 show_node(zone); 1888 printk("%s" 1889 " free:%lukB" 1890 " min:%lukB" 1891 " low:%lukB" 1892 " high:%lukB" 1893 " active:%lukB" 1894 " inactive:%lukB" 1895 " present:%lukB" 1896 " pages_scanned:%lu" 1897 " all_unreclaimable? %s" 1898 "\n", 1899 zone->name, 1900 K(zone_page_state(zone, NR_FREE_PAGES)), 1901 K(zone->pages_min), 1902 K(zone->pages_low), 1903 K(zone->pages_high), 1904 K(zone_page_state(zone, NR_ACTIVE)), 1905 K(zone_page_state(zone, NR_INACTIVE)), 1906 K(zone->present_pages), 1907 zone->pages_scanned, 1908 (zone_is_all_unreclaimable(zone) ? "yes" : "no") 1909 ); 1910 printk("lowmem_reserve[]:"); 1911 for (i = 0; i < MAX_NR_ZONES; i++) 1912 printk(" %lu", zone->lowmem_reserve[i]); 1913 printk("\n"); 1914 } 1915 1916 for_each_zone(zone) { 1917 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1918 1919 if (!populated_zone(zone)) 1920 continue; 1921 1922 show_node(zone); 1923 printk("%s: ", zone->name); 1924 1925 spin_lock_irqsave(&zone->lock, flags); 1926 for (order = 0; order < MAX_ORDER; order++) { 1927 nr[order] = zone->free_area[order].nr_free; 1928 total += nr[order] << order; 1929 } 1930 spin_unlock_irqrestore(&zone->lock, flags); 1931 for (order = 0; order < MAX_ORDER; order++) 1932 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1933 printk("= %lukB\n", K(total)); 1934 } 1935 1936 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 1937 1938 show_swap_cache_info(); 1939} 1940 1941static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 1942{ 1943 zoneref->zone = zone; 1944 zoneref->zone_idx = zone_idx(zone); 1945} 1946 1947/* 1948 * Builds allocation fallback zone lists. 1949 * 1950 * Add all populated zones of a node to the zonelist. 1951 */ 1952static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 1953 int nr_zones, enum zone_type zone_type) 1954{ 1955 struct zone *zone; 1956 1957 BUG_ON(zone_type >= MAX_NR_ZONES); 1958 zone_type++; 1959 1960 do { 1961 zone_type--; 1962 zone = pgdat->node_zones + zone_type; 1963 if (populated_zone(zone)) { 1964 zoneref_set_zone(zone, 1965 &zonelist->_zonerefs[nr_zones++]); 1966 check_highest_zone(zone_type); 1967 } 1968 1969 } while (zone_type); 1970 return nr_zones; 1971} 1972 1973 1974/* 1975 * zonelist_order: 1976 * 0 = automatic detection of better ordering. 1977 * 1 = order by ([node] distance, -zonetype) 1978 * 2 = order by (-zonetype, [node] distance) 1979 * 1980 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 1981 * the same zonelist. So only NUMA can configure this param. 1982 */ 1983#define ZONELIST_ORDER_DEFAULT 0 1984#define ZONELIST_ORDER_NODE 1 1985#define ZONELIST_ORDER_ZONE 2 1986 1987/* zonelist order in the kernel. 1988 * set_zonelist_order() will set this to NODE or ZONE. 1989 */ 1990static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 1991static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 1992 1993 1994#ifdef CONFIG_NUMA 1995/* The value user specified ....changed by config */ 1996static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 1997/* string for sysctl */ 1998#define NUMA_ZONELIST_ORDER_LEN 16 1999char numa_zonelist_order[16] = "default"; 2000 2001/* 2002 * interface for configure zonelist ordering. 2003 * command line option "numa_zonelist_order" 2004 * = "[dD]efault - default, automatic configuration. 2005 * = "[nN]ode - order by node locality, then by zone within node 2006 * = "[zZ]one - order by zone, then by locality within zone 2007 */ 2008 2009static int __parse_numa_zonelist_order(char *s) 2010{ 2011 if (*s == 'd' || *s == 'D') { 2012 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 2013 } else if (*s == 'n' || *s == 'N') { 2014 user_zonelist_order = ZONELIST_ORDER_NODE; 2015 } else if (*s == 'z' || *s == 'Z') { 2016 user_zonelist_order = ZONELIST_ORDER_ZONE; 2017 } else { 2018 printk(KERN_WARNING 2019 "Ignoring invalid numa_zonelist_order value: " 2020 "%s\n", s); 2021 return -EINVAL; 2022 } 2023 return 0; 2024} 2025 2026static __init int setup_numa_zonelist_order(char *s) 2027{ 2028 if (s) 2029 return __parse_numa_zonelist_order(s); 2030 return 0; 2031} 2032early_param("numa_zonelist_order", setup_numa_zonelist_order); 2033 2034/* 2035 * sysctl handler for numa_zonelist_order 2036 */ 2037int numa_zonelist_order_handler(ctl_table *table, int write, 2038 struct file *file, void __user *buffer, size_t *length, 2039 loff_t *ppos) 2040{ 2041 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 2042 int ret; 2043 2044 if (write) 2045 strncpy(saved_string, (char*)table->data, 2046 NUMA_ZONELIST_ORDER_LEN); 2047 ret = proc_dostring(table, write, file, buffer, length, ppos); 2048 if (ret) 2049 return ret; 2050 if (write) { 2051 int oldval = user_zonelist_order; 2052 if (__parse_numa_zonelist_order((char*)table->data)) { 2053 /* 2054 * bogus value. restore saved string 2055 */ 2056 strncpy((char*)table->data, saved_string, 2057 NUMA_ZONELIST_ORDER_LEN); 2058 user_zonelist_order = oldval; 2059 } else if (oldval != user_zonelist_order) 2060 build_all_zonelists(); 2061 } 2062 return 0; 2063} 2064 2065 2066#define MAX_NODE_LOAD (num_online_nodes()) 2067static int node_load[MAX_NUMNODES]; 2068 2069/** 2070 * find_next_best_node - find the next node that should appear in a given node's fallback list 2071 * @node: node whose fallback list we're appending 2072 * @used_node_mask: nodemask_t of already used nodes 2073 * 2074 * We use a number of factors to determine which is the next node that should 2075 * appear on a given node's fallback list. The node should not have appeared 2076 * already in @node's fallback list, and it should be the next closest node 2077 * according to the distance array (which contains arbitrary distance values 2078 * from each node to each node in the system), and should also prefer nodes 2079 * with no CPUs, since presumably they'll have very little allocation pressure 2080 * on them otherwise. 2081 * It returns -1 if no node is found. 2082 */ 2083static int find_next_best_node(int node, nodemask_t *used_node_mask) 2084{ 2085 int n, val; 2086 int min_val = INT_MAX; 2087 int best_node = -1; 2088 node_to_cpumask_ptr(tmp, 0); 2089 2090 /* Use the local node if we haven't already */ 2091 if (!node_isset(node, *used_node_mask)) { 2092 node_set(node, *used_node_mask); 2093 return node; 2094 } 2095 2096 for_each_node_state(n, N_HIGH_MEMORY) { 2097 2098 /* Don't want a node to appear more than once */ 2099 if (node_isset(n, *used_node_mask)) 2100 continue; 2101 2102 /* Use the distance array to find the distance */ 2103 val = node_distance(node, n); 2104 2105 /* Penalize nodes under us ("prefer the next node") */ 2106 val += (n < node); 2107 2108 /* Give preference to headless and unused nodes */ 2109 node_to_cpumask_ptr_next(tmp, n); 2110 if (!cpus_empty(*tmp)) 2111 val += PENALTY_FOR_NODE_WITH_CPUS; 2112 2113 /* Slight preference for less loaded node */ 2114 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 2115 val += node_load[n]; 2116 2117 if (val < min_val) { 2118 min_val = val; 2119 best_node = n; 2120 } 2121 } 2122 2123 if (best_node >= 0) 2124 node_set(best_node, *used_node_mask); 2125 2126 return best_node; 2127} 2128 2129 2130/* 2131 * Build zonelists ordered by node and zones within node. 2132 * This results in maximum locality--normal zone overflows into local 2133 * DMA zone, if any--but risks exhausting DMA zone. 2134 */ 2135static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 2136{ 2137 int j; 2138 struct zonelist *zonelist; 2139 2140 zonelist = &pgdat->node_zonelists[0]; 2141 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 2142 ; 2143 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2144 MAX_NR_ZONES - 1); 2145 zonelist->_zonerefs[j].zone = NULL; 2146 zonelist->_zonerefs[j].zone_idx = 0; 2147} 2148 2149/* 2150 * Build gfp_thisnode zonelists 2151 */ 2152static void build_thisnode_zonelists(pg_data_t *pgdat) 2153{ 2154 int j; 2155 struct zonelist *zonelist; 2156 2157 zonelist = &pgdat->node_zonelists[1]; 2158 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2159 zonelist->_zonerefs[j].zone = NULL; 2160 zonelist->_zonerefs[j].zone_idx = 0; 2161} 2162 2163/* 2164 * Build zonelists ordered by zone and nodes within zones. 2165 * This results in conserving DMA zone[s] until all Normal memory is 2166 * exhausted, but results in overflowing to remote node while memory 2167 * may still exist in local DMA zone. 2168 */ 2169static int node_order[MAX_NUMNODES]; 2170 2171static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 2172{ 2173 int pos, j, node; 2174 int zone_type; /* needs to be signed */ 2175 struct zone *z; 2176 struct zonelist *zonelist; 2177 2178 zonelist = &pgdat->node_zonelists[0]; 2179 pos = 0; 2180 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 2181 for (j = 0; j < nr_nodes; j++) { 2182 node = node_order[j]; 2183 z = &NODE_DATA(node)->node_zones[zone_type]; 2184 if (populated_zone(z)) { 2185 zoneref_set_zone(z, 2186 &zonelist->_zonerefs[pos++]); 2187 check_highest_zone(zone_type); 2188 } 2189 } 2190 } 2191 zonelist->_zonerefs[pos].zone = NULL; 2192 zonelist->_zonerefs[pos].zone_idx = 0; 2193} 2194 2195static int default_zonelist_order(void) 2196{ 2197 int nid, zone_type; 2198 unsigned long low_kmem_size,total_size; 2199 struct zone *z; 2200 int average_size; 2201 /* 2202 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem. 2203 * If they are really small and used heavily, the system can fall 2204 * into OOM very easily. 2205 * This function detect ZONE_DMA/DMA32 size and confgigures zone order. 2206 */ 2207 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ 2208 low_kmem_size = 0; 2209 total_size = 0; 2210 for_each_online_node(nid) { 2211 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2212 z = &NODE_DATA(nid)->node_zones[zone_type]; 2213 if (populated_zone(z)) { 2214 if (zone_type < ZONE_NORMAL) 2215 low_kmem_size += z->present_pages; 2216 total_size += z->present_pages; 2217 } 2218 } 2219 } 2220 if (!low_kmem_size || /* there are no DMA area. */ 2221 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ 2222 return ZONELIST_ORDER_NODE; 2223 /* 2224 * look into each node's config. 2225 * If there is a node whose DMA/DMA32 memory is very big area on 2226 * local memory, NODE_ORDER may be suitable. 2227 */ 2228 average_size = total_size / 2229 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); 2230 for_each_online_node(nid) { 2231 low_kmem_size = 0; 2232 total_size = 0; 2233 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 2234 z = &NODE_DATA(nid)->node_zones[zone_type]; 2235 if (populated_zone(z)) { 2236 if (zone_type < ZONE_NORMAL) 2237 low_kmem_size += z->present_pages; 2238 total_size += z->present_pages; 2239 } 2240 } 2241 if (low_kmem_size && 2242 total_size > average_size && /* ignore small node */ 2243 low_kmem_size > total_size * 70/100) 2244 return ZONELIST_ORDER_NODE; 2245 } 2246 return ZONELIST_ORDER_ZONE; 2247} 2248 2249static void set_zonelist_order(void) 2250{ 2251 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 2252 current_zonelist_order = default_zonelist_order(); 2253 else 2254 current_zonelist_order = user_zonelist_order; 2255} 2256 2257static void build_zonelists(pg_data_t *pgdat) 2258{ 2259 int j, node, load; 2260 enum zone_type i; 2261 nodemask_t used_mask; 2262 int local_node, prev_node; 2263 struct zonelist *zonelist; 2264 int order = current_zonelist_order; 2265 2266 /* initialize zonelists */ 2267 for (i = 0; i < MAX_ZONELISTS; i++) { 2268 zonelist = pgdat->node_zonelists + i; 2269 zonelist->_zonerefs[0].zone = NULL; 2270 zonelist->_zonerefs[0].zone_idx = 0; 2271 } 2272 2273 /* NUMA-aware ordering of nodes */ 2274 local_node = pgdat->node_id; 2275 load = num_online_nodes(); 2276 prev_node = local_node; 2277 nodes_clear(used_mask); 2278 2279 memset(node_load, 0, sizeof(node_load)); 2280 memset(node_order, 0, sizeof(node_order)); 2281 j = 0; 2282 2283 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 2284 int distance = node_distance(local_node, node); 2285 2286 /* 2287 * If another node is sufficiently far away then it is better 2288 * to reclaim pages in a zone before going off node. 2289 */ 2290 if (distance > RECLAIM_DISTANCE) 2291 zone_reclaim_mode = 1; 2292 2293 /* 2294 * We don't want to pressure a particular node. 2295 * So adding penalty to the first node in same 2296 * distance group to make it round-robin. 2297 */ 2298 if (distance != node_distance(local_node, prev_node)) 2299 node_load[node] = load; 2300 2301 prev_node = node; 2302 load--; 2303 if (order == ZONELIST_ORDER_NODE) 2304 build_zonelists_in_node_order(pgdat, node); 2305 else 2306 node_order[j++] = node; /* remember order */ 2307 } 2308 2309 if (order == ZONELIST_ORDER_ZONE) { 2310 /* calculate node order -- i.e., DMA last! */ 2311 build_zonelists_in_zone_order(pgdat, j); 2312 } 2313 2314 build_thisnode_zonelists(pgdat); 2315} 2316 2317/* Construct the zonelist performance cache - see further mmzone.h */ 2318static void build_zonelist_cache(pg_data_t *pgdat) 2319{ 2320 struct zonelist *zonelist; 2321 struct zonelist_cache *zlc; 2322 struct zoneref *z; 2323 2324 zonelist = &pgdat->node_zonelists[0]; 2325 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 2326 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 2327 for (z = zonelist->_zonerefs; z->zone; z++) 2328 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 2329} 2330 2331 2332#else /* CONFIG_NUMA */ 2333 2334static void set_zonelist_order(void) 2335{ 2336 current_zonelist_order = ZONELIST_ORDER_ZONE; 2337} 2338 2339static void build_zonelists(pg_data_t *pgdat) 2340{ 2341 int node, local_node; 2342 enum zone_type j; 2343 struct zonelist *zonelist; 2344 2345 local_node = pgdat->node_id; 2346 2347 zonelist = &pgdat->node_zonelists[0]; 2348 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2349 2350 /* 2351 * Now we build the zonelist so that it contains the zones 2352 * of all the other nodes. 2353 * We don't want to pressure a particular node, so when 2354 * building the zones for node N, we make sure that the 2355 * zones coming right after the local ones are those from 2356 * node N+1 (modulo N) 2357 */ 2358 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 2359 if (!node_online(node)) 2360 continue; 2361 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2362 MAX_NR_ZONES - 1); 2363 } 2364 for (node = 0; node < local_node; node++) { 2365 if (!node_online(node)) 2366 continue; 2367 j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2368 MAX_NR_ZONES - 1); 2369 } 2370 2371 zonelist->_zonerefs[j].zone = NULL; 2372 zonelist->_zonerefs[j].zone_idx = 0; 2373} 2374 2375/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 2376static void build_zonelist_cache(pg_data_t *pgdat) 2377{ 2378 pgdat->node_zonelists[0].zlcache_ptr = NULL; 2379} 2380 2381#endif /* CONFIG_NUMA */ 2382 2383/* return values int ....just for stop_machine() */ 2384static int __build_all_zonelists(void *dummy) 2385{ 2386 int nid; 2387 2388 for_each_online_node(nid) { 2389 pg_data_t *pgdat = NODE_DATA(nid); 2390 2391 build_zonelists(pgdat); 2392 build_zonelist_cache(pgdat); 2393 } 2394 return 0; 2395} 2396 2397void build_all_zonelists(void) 2398{ 2399 set_zonelist_order(); 2400 2401 if (system_state == SYSTEM_BOOTING) { 2402 __build_all_zonelists(NULL); 2403 mminit_verify_zonelist(); 2404 cpuset_init_current_mems_allowed(); 2405 } else { 2406 /* we have to stop all cpus to guarantee there is no user 2407 of zonelist */ 2408 stop_machine(__build_all_zonelists, NULL, NULL); 2409 /* cpuset refresh routine should be here */ 2410 } 2411 vm_total_pages = nr_free_pagecache_pages(); 2412 /* 2413 * Disable grouping by mobility if the number of pages in the 2414 * system is too low to allow the mechanism to work. It would be 2415 * more accurate, but expensive to check per-zone. This check is 2416 * made on memory-hotadd so a system can start with mobility 2417 * disabled and enable it later 2418 */ 2419 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 2420 page_group_by_mobility_disabled = 1; 2421 else 2422 page_group_by_mobility_disabled = 0; 2423 2424 printk("Built %i zonelists in %s order, mobility grouping %s. " 2425 "Total pages: %ld\n", 2426 num_online_nodes(), 2427 zonelist_order_name[current_zonelist_order], 2428 page_group_by_mobility_disabled ? "off" : "on", 2429 vm_total_pages); 2430#ifdef CONFIG_NUMA 2431 printk("Policy zone: %s\n", zone_names[policy_zone]); 2432#endif 2433} 2434 2435/* 2436 * Helper functions to size the waitqueue hash table. 2437 * Essentially these want to choose hash table sizes sufficiently 2438 * large so that collisions trying to wait on pages are rare. 2439 * But in fact, the number of active page waitqueues on typical 2440 * systems is ridiculously low, less than 200. So this is even 2441 * conservative, even though it seems large. 2442 * 2443 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 2444 * waitqueues, i.e. the size of the waitq table given the number of pages. 2445 */ 2446#define PAGES_PER_WAITQUEUE 256 2447 2448#ifndef CONFIG_MEMORY_HOTPLUG 2449static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2450{ 2451 unsigned long size = 1; 2452 2453 pages /= PAGES_PER_WAITQUEUE; 2454 2455 while (size < pages) 2456 size <<= 1; 2457 2458 /* 2459 * Once we have dozens or even hundreds of threads sleeping 2460 * on IO we've got bigger problems than wait queue collision. 2461 * Limit the size of the wait table to a reasonable size. 2462 */ 2463 size = min(size, 4096UL); 2464 2465 return max(size, 4UL); 2466} 2467#else 2468/* 2469 * A zone's size might be changed by hot-add, so it is not possible to determine 2470 * a suitable size for its wait_table. So we use the maximum size now. 2471 * 2472 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 2473 * 2474 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 2475 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 2476 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 2477 * 2478 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 2479 * or more by the traditional way. (See above). It equals: 2480 * 2481 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 2482 * ia64(16K page size) : = ( 8G + 4M)byte. 2483 * powerpc (64K page size) : = (32G +16M)byte. 2484 */ 2485static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 2486{ 2487 return 4096UL; 2488} 2489#endif 2490 2491/* 2492 * This is an integer logarithm so that shifts can be used later 2493 * to extract the more random high bits from the multiplicative 2494 * hash function before the remainder is taken. 2495 */ 2496static inline unsigned long wait_table_bits(unsigned long size) 2497{ 2498 return ffz(~size); 2499} 2500 2501#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 2502 2503/* 2504 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 2505 * of blocks reserved is based on zone->pages_min. The memory within the 2506 * reserve will tend to store contiguous free pages. Setting min_free_kbytes 2507 * higher will lead to a bigger reserve which will get freed as contiguous 2508 * blocks as reclaim kicks in 2509 */ 2510static void setup_zone_migrate_reserve(struct zone *zone) 2511{ 2512 unsigned long start_pfn, pfn, end_pfn; 2513 struct page *page; 2514 unsigned long reserve, block_migratetype; 2515 2516 /* Get the start pfn, end pfn and the number of blocks to reserve */ 2517 start_pfn = zone->zone_start_pfn; 2518 end_pfn = start_pfn + zone->spanned_pages; 2519 reserve = roundup(zone->pages_min, pageblock_nr_pages) >> 2520 pageblock_order; 2521 2522 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 2523 if (!pfn_valid(pfn)) 2524 continue; 2525 page = pfn_to_page(pfn); 2526 2527 /* Watch out for overlapping nodes */ 2528 if (page_to_nid(page) != zone_to_nid(zone)) 2529 continue; 2530 2531 /* Blocks with reserved pages will never free, skip them. */ 2532 if (PageReserved(page)) 2533 continue; 2534 2535 block_migratetype = get_pageblock_migratetype(page); 2536 2537 /* If this block is reserved, account for it */ 2538 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { 2539 reserve--; 2540 continue; 2541 } 2542 2543 /* Suitable for reserving if this block is movable */ 2544 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { 2545 set_pageblock_migratetype(page, MIGRATE_RESERVE); 2546 move_freepages_block(zone, page, MIGRATE_RESERVE); 2547 reserve--; 2548 continue; 2549 } 2550 2551 /* 2552 * If the reserve is met and this is a previous reserved block, 2553 * take it back 2554 */ 2555 if (block_migratetype == MIGRATE_RESERVE) { 2556 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2557 move_freepages_block(zone, page, MIGRATE_MOVABLE); 2558 } 2559 } 2560} 2561 2562/* 2563 * Initially all pages are reserved - free ones are freed 2564 * up by free_all_bootmem() once the early boot process is 2565 * done. Non-atomic initialization, single-pass. 2566 */ 2567void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 2568 unsigned long start_pfn, enum memmap_context context) 2569{ 2570 struct page *page; 2571 unsigned long end_pfn = start_pfn + size; 2572 unsigned long pfn; 2573 struct zone *z; 2574 2575 z = &NODE_DATA(nid)->node_zones[zone]; 2576 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 2577 /* 2578 * There can be holes in boot-time mem_map[]s 2579 * handed to this function. They do not 2580 * exist on hotplugged memory. 2581 */ 2582 if (context == MEMMAP_EARLY) { 2583 if (!early_pfn_valid(pfn)) 2584 continue; 2585 if (!early_pfn_in_nid(pfn, nid)) 2586 continue; 2587 } 2588 page = pfn_to_page(pfn); 2589 set_page_links(page, zone, nid, pfn); 2590 mminit_verify_page_links(page, zone, nid, pfn); 2591 init_page_count(page); 2592 reset_page_mapcount(page); 2593 SetPageReserved(page); 2594 /* 2595 * Mark the block movable so that blocks are reserved for 2596 * movable at startup. This will force kernel allocations 2597 * to reserve their blocks rather than leaking throughout 2598 * the address space during boot when many long-lived 2599 * kernel allocations are made. Later some blocks near 2600 * the start are marked MIGRATE_RESERVE by 2601 * setup_zone_migrate_reserve() 2602 * 2603 * bitmap is created for zone's valid pfn range. but memmap 2604 * can be created for invalid pages (for alignment) 2605 * check here not to call set_pageblock_migratetype() against 2606 * pfn out of zone. 2607 */ 2608 if ((z->zone_start_pfn <= pfn) 2609 && (pfn < z->zone_start_pfn + z->spanned_pages) 2610 && !(pfn & (pageblock_nr_pages - 1))) 2611 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2612 2613 INIT_LIST_HEAD(&page->lru); 2614#ifdef WANT_PAGE_VIRTUAL 2615 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 2616 if (!is_highmem_idx(zone)) 2617 set_page_address(page, __va(pfn << PAGE_SHIFT)); 2618#endif 2619 } 2620} 2621 2622static void __meminit zone_init_free_lists(struct zone *zone) 2623{ 2624 int order, t; 2625 for_each_migratetype_order(order, t) { 2626 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 2627 zone->free_area[order].nr_free = 0; 2628 } 2629} 2630 2631#ifndef __HAVE_ARCH_MEMMAP_INIT 2632#define memmap_init(size, nid, zone, start_pfn) \ 2633 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 2634#endif 2635 2636static int zone_batchsize(struct zone *zone) 2637{ 2638 int batch; 2639 2640 /* 2641 * The per-cpu-pages pools are set to around 1000th of the 2642 * size of the zone. But no more than 1/2 of a meg. 2643 * 2644 * OK, so we don't know how big the cache is. So guess. 2645 */ 2646 batch = zone->present_pages / 1024; 2647 if (batch * PAGE_SIZE > 512 * 1024) 2648 batch = (512 * 1024) / PAGE_SIZE; 2649 batch /= 4; /* We effectively *= 4 below */ 2650 if (batch < 1) 2651 batch = 1; 2652 2653 /* 2654 * Clamp the batch to a 2^n - 1 value. Having a power 2655 * of 2 value was found to be more likely to have 2656 * suboptimal cache aliasing properties in some cases. 2657 * 2658 * For example if 2 tasks are alternately allocating 2659 * batches of pages, one task can end up with a lot 2660 * of pages of one half of the possible page colors 2661 * and the other with pages of the other colors. 2662 */ 2663 batch = (1 << (fls(batch + batch/2)-1)) - 1; 2664 2665 return batch; 2666} 2667 2668static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2669{ 2670 struct per_cpu_pages *pcp; 2671 2672 memset(p, 0, sizeof(*p)); 2673 2674 pcp = &p->pcp; 2675 pcp->count = 0; 2676 pcp->high = 6 * batch; 2677 pcp->batch = max(1UL, 1 * batch); 2678 INIT_LIST_HEAD(&pcp->list); 2679} 2680 2681/* 2682 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 2683 * to the value high for the pageset p. 2684 */ 2685 2686static void setup_pagelist_highmark(struct per_cpu_pageset *p, 2687 unsigned long high) 2688{ 2689 struct per_cpu_pages *pcp; 2690 2691 pcp = &p->pcp; 2692 pcp->high = high; 2693 pcp->batch = max(1UL, high/4); 2694 if ((high/4) > (PAGE_SHIFT * 8)) 2695 pcp->batch = PAGE_SHIFT * 8; 2696} 2697 2698 2699#ifdef CONFIG_NUMA 2700/* 2701 * Boot pageset table. One per cpu which is going to be used for all 2702 * zones and all nodes. The parameters will be set in such a way 2703 * that an item put on a list will immediately be handed over to 2704 * the buddy list. This is safe since pageset manipulation is done 2705 * with interrupts disabled. 2706 * 2707 * Some NUMA counter updates may also be caught by the boot pagesets. 2708 * 2709 * The boot_pagesets must be kept even after bootup is complete for 2710 * unused processors and/or zones. They do play a role for bootstrapping 2711 * hotplugged processors. 2712 * 2713 * zoneinfo_show() and maybe other functions do 2714 * not check if the processor is online before following the pageset pointer. 2715 * Other parts of the kernel may not check if the zone is available. 2716 */ 2717static struct per_cpu_pageset boot_pageset[NR_CPUS]; 2718 2719/* 2720 * Dynamically allocate memory for the 2721 * per cpu pageset array in struct zone. 2722 */ 2723static int __cpuinit process_zones(int cpu) 2724{ 2725 struct zone *zone, *dzone; 2726 int node = cpu_to_node(cpu); 2727 2728 node_set_state(node, N_CPU); /* this node has a cpu */ 2729 2730 for_each_zone(zone) { 2731 2732 if (!populated_zone(zone)) 2733 continue; 2734 2735 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2736 GFP_KERNEL, node); 2737 if (!zone_pcp(zone, cpu)) 2738 goto bad; 2739 2740 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 2741 2742 if (percpu_pagelist_fraction) 2743 setup_pagelist_highmark(zone_pcp(zone, cpu), 2744 (zone->present_pages / percpu_pagelist_fraction)); 2745 } 2746 2747 return 0; 2748bad: 2749 for_each_zone(dzone) { 2750 if (!populated_zone(dzone)) 2751 continue; 2752 if (dzone == zone) 2753 break; 2754 kfree(zone_pcp(dzone, cpu)); 2755 zone_pcp(dzone, cpu) = NULL; 2756 } 2757 return -ENOMEM; 2758} 2759 2760static inline void free_zone_pagesets(int cpu) 2761{ 2762 struct zone *zone; 2763 2764 for_each_zone(zone) { 2765 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 2766 2767 /* Free per_cpu_pageset if it is slab allocated */ 2768 if (pset != &boot_pageset[cpu]) 2769 kfree(pset); 2770 zone_pcp(zone, cpu) = NULL; 2771 } 2772} 2773 2774static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 2775 unsigned long action, 2776 void *hcpu) 2777{ 2778 int cpu = (long)hcpu; 2779 int ret = NOTIFY_OK; 2780 2781 switch (action) { 2782 case CPU_UP_PREPARE: 2783 case CPU_UP_PREPARE_FROZEN: 2784 if (process_zones(cpu)) 2785 ret = NOTIFY_BAD; 2786 break; 2787 case CPU_UP_CANCELED: 2788 case CPU_UP_CANCELED_FROZEN: 2789 case CPU_DEAD: 2790 case CPU_DEAD_FROZEN: 2791 free_zone_pagesets(cpu); 2792 break; 2793 default: 2794 break; 2795 } 2796 return ret; 2797} 2798 2799static struct notifier_block __cpuinitdata pageset_notifier = 2800 { &pageset_cpuup_callback, NULL, 0 }; 2801 2802void __init setup_per_cpu_pageset(void) 2803{ 2804 int err; 2805 2806 /* Initialize per_cpu_pageset for cpu 0. 2807 * A cpuup callback will do this for every cpu 2808 * as it comes online 2809 */ 2810 err = process_zones(smp_processor_id()); 2811 BUG_ON(err); 2812 register_cpu_notifier(&pageset_notifier); 2813} 2814 2815#endif 2816 2817static noinline __init_refok 2818int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2819{ 2820 int i; 2821 struct pglist_data *pgdat = zone->zone_pgdat; 2822 size_t alloc_size; 2823 2824 /* 2825 * The per-page waitqueue mechanism uses hashed waitqueues 2826 * per zone. 2827 */ 2828 zone->wait_table_hash_nr_entries = 2829 wait_table_hash_nr_entries(zone_size_pages); 2830 zone->wait_table_bits = 2831 wait_table_bits(zone->wait_table_hash_nr_entries); 2832 alloc_size = zone->wait_table_hash_nr_entries 2833 * sizeof(wait_queue_head_t); 2834 2835 if (!slab_is_available()) { 2836 zone->wait_table = (wait_queue_head_t *) 2837 alloc_bootmem_node(pgdat, alloc_size); 2838 } else { 2839 /* 2840 * This case means that a zone whose size was 0 gets new memory 2841 * via memory hot-add. 2842 * But it may be the case that a new node was hot-added. In 2843 * this case vmalloc() will not be able to use this new node's 2844 * memory - this wait_table must be initialized to use this new 2845 * node itself as well. 2846 * To use this new node's memory, further consideration will be 2847 * necessary. 2848 */ 2849 zone->wait_table = vmalloc(alloc_size); 2850 } 2851 if (!zone->wait_table) 2852 return -ENOMEM; 2853 2854 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 2855 init_waitqueue_head(zone->wait_table + i); 2856 2857 return 0; 2858} 2859 2860static __meminit void zone_pcp_init(struct zone *zone) 2861{ 2862 int cpu; 2863 unsigned long batch = zone_batchsize(zone); 2864 2865 for (cpu = 0; cpu < NR_CPUS; cpu++) { 2866#ifdef CONFIG_NUMA 2867 /* Early boot. Slab allocator not functional yet */ 2868 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 2869 setup_pageset(&boot_pageset[cpu],0); 2870#else 2871 setup_pageset(zone_pcp(zone,cpu), batch); 2872#endif 2873 } 2874 if (zone->present_pages) 2875 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 2876 zone->name, zone->present_pages, batch); 2877} 2878 2879__meminit int init_currently_empty_zone(struct zone *zone, 2880 unsigned long zone_start_pfn, 2881 unsigned long size, 2882 enum memmap_context context) 2883{ 2884 struct pglist_data *pgdat = zone->zone_pgdat; 2885 int ret; 2886 ret = zone_wait_table_init(zone, size); 2887 if (ret) 2888 return ret; 2889 pgdat->nr_zones = zone_idx(zone) + 1; 2890 2891 zone->zone_start_pfn = zone_start_pfn; 2892 2893 mminit_dprintk(MMINIT_TRACE, "memmap_init", 2894 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 2895 pgdat->node_id, 2896 (unsigned long)zone_idx(zone), 2897 zone_start_pfn, (zone_start_pfn + size)); 2898 2899 zone_init_free_lists(zone); 2900 2901 return 0; 2902} 2903 2904#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2905/* 2906 * Basic iterator support. Return the first range of PFNs for a node 2907 * Note: nid == MAX_NUMNODES returns first region regardless of node 2908 */ 2909static int __meminit first_active_region_index_in_nid(int nid) 2910{ 2911 int i; 2912 2913 for (i = 0; i < nr_nodemap_entries; i++) 2914 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 2915 return i; 2916 2917 return -1; 2918} 2919 2920/* 2921 * Basic iterator support. Return the next active range of PFNs for a node 2922 * Note: nid == MAX_NUMNODES returns next region regardless of node 2923 */ 2924static int __meminit next_active_region_index_in_nid(int index, int nid) 2925{ 2926 for (index = index + 1; index < nr_nodemap_entries; index++) 2927 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2928 return index; 2929 2930 return -1; 2931} 2932 2933#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 2934/* 2935 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 2936 * Architectures may implement their own version but if add_active_range() 2937 * was used and there are no special requirements, this is a convenient 2938 * alternative 2939 */ 2940int __meminit early_pfn_to_nid(unsigned long pfn) 2941{ 2942 int i; 2943 2944 for (i = 0; i < nr_nodemap_entries; i++) { 2945 unsigned long start_pfn = early_node_map[i].start_pfn; 2946 unsigned long end_pfn = early_node_map[i].end_pfn; 2947 2948 if (start_pfn <= pfn && pfn < end_pfn) 2949 return early_node_map[i].nid; 2950 } 2951 2952 return 0; 2953} 2954#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 2955 2956/* Basic iterator support to walk early_node_map[] */ 2957#define for_each_active_range_index_in_nid(i, nid) \ 2958 for (i = first_active_region_index_in_nid(nid); i != -1; \ 2959 i = next_active_region_index_in_nid(i, nid)) 2960 2961/** 2962 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2963 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 2964 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 2965 * 2966 * If an architecture guarantees that all ranges registered with 2967 * add_active_ranges() contain no holes and may be freed, this 2968 * this function may be used instead of calling free_bootmem() manually. 2969 */ 2970void __init free_bootmem_with_active_regions(int nid, 2971 unsigned long max_low_pfn) 2972{ 2973 int i; 2974 2975 for_each_active_range_index_in_nid(i, nid) { 2976 unsigned long size_pages = 0; 2977 unsigned long end_pfn = early_node_map[i].end_pfn; 2978 2979 if (early_node_map[i].start_pfn >= max_low_pfn) 2980 continue; 2981 2982 if (end_pfn > max_low_pfn) 2983 end_pfn = max_low_pfn; 2984 2985 size_pages = end_pfn - early_node_map[i].start_pfn; 2986 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 2987 PFN_PHYS(early_node_map[i].start_pfn), 2988 size_pages << PAGE_SHIFT); 2989 } 2990} 2991 2992void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) 2993{ 2994 int i; 2995 int ret; 2996 2997 for_each_active_range_index_in_nid(i, nid) { 2998 ret = work_fn(early_node_map[i].start_pfn, 2999 early_node_map[i].end_pfn, data); 3000 if (ret) 3001 break; 3002 } 3003} 3004/** 3005 * sparse_memory_present_with_active_regions - Call memory_present for each active range 3006 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 3007 * 3008 * If an architecture guarantees that all ranges registered with 3009 * add_active_ranges() contain no holes and may be freed, this 3010 * function may be used instead of calling memory_present() manually. 3011 */ 3012void __init sparse_memory_present_with_active_regions(int nid) 3013{ 3014 int i; 3015 3016 for_each_active_range_index_in_nid(i, nid) 3017 memory_present(early_node_map[i].nid, 3018 early_node_map[i].start_pfn, 3019 early_node_map[i].end_pfn); 3020} 3021 3022/** 3023 * push_node_boundaries - Push node boundaries to at least the requested boundary 3024 * @nid: The nid of the node to push the boundary for 3025 * @start_pfn: The start pfn of the node 3026 * @end_pfn: The end pfn of the node 3027 * 3028 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd 3029 * time. Specifically, on x86_64, SRAT will report ranges that can potentially 3030 * be hotplugged even though no physical memory exists. This function allows 3031 * an arch to push out the node boundaries so mem_map is allocated that can 3032 * be used later. 3033 */ 3034#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 3035void __init push_node_boundaries(unsigned int nid, 3036 unsigned long start_pfn, unsigned long end_pfn) 3037{ 3038 mminit_dprintk(MMINIT_TRACE, "zoneboundary", 3039 "Entering push_node_boundaries(%u, %lu, %lu)\n", 3040 nid, start_pfn, end_pfn); 3041 3042 /* Initialise the boundary for this node if necessary */ 3043 if (node_boundary_end_pfn[nid] == 0) 3044 node_boundary_start_pfn[nid] = -1UL; 3045 3046 /* Update the boundaries */ 3047 if (node_boundary_start_pfn[nid] > start_pfn) 3048 node_boundary_start_pfn[nid] = start_pfn; 3049 if (node_boundary_end_pfn[nid] < end_pfn) 3050 node_boundary_end_pfn[nid] = end_pfn; 3051} 3052 3053/* If necessary, push the node boundary out for reserve hotadd */ 3054static void __meminit account_node_boundary(unsigned int nid, 3055 unsigned long *start_pfn, unsigned long *end_pfn) 3056{ 3057 mminit_dprintk(MMINIT_TRACE, "zoneboundary", 3058 "Entering account_node_boundary(%u, %lu, %lu)\n", 3059 nid, *start_pfn, *end_pfn); 3060 3061 /* Return if boundary information has not been provided */ 3062 if (node_boundary_end_pfn[nid] == 0) 3063 return; 3064 3065 /* Check the boundaries and update if necessary */ 3066 if (node_boundary_start_pfn[nid] < *start_pfn) 3067 *start_pfn = node_boundary_start_pfn[nid]; 3068 if (node_boundary_end_pfn[nid] > *end_pfn) 3069 *end_pfn = node_boundary_end_pfn[nid]; 3070} 3071#else 3072void __init push_node_boundaries(unsigned int nid, 3073 unsigned long start_pfn, unsigned long end_pfn) {} 3074 3075static void __meminit account_node_boundary(unsigned int nid, 3076 unsigned long *start_pfn, unsigned long *end_pfn) {} 3077#endif 3078 3079 3080/** 3081 * get_pfn_range_for_nid - Return the start and end page frames for a node 3082 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 3083 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 3084 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 3085 * 3086 * It returns the start and end page frame of a node based on information 3087 * provided by an arch calling add_active_range(). If called for a node 3088 * with no available memory, a warning is printed and the start and end 3089 * PFNs will be 0. 3090 */ 3091void __meminit get_pfn_range_for_nid(unsigned int nid, 3092 unsigned long *start_pfn, unsigned long *end_pfn) 3093{ 3094 int i; 3095 *start_pfn = -1UL; 3096 *end_pfn = 0; 3097 3098 for_each_active_range_index_in_nid(i, nid) { 3099 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 3100 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 3101 } 3102 3103 if (*start_pfn == -1UL) 3104 *start_pfn = 0; 3105 3106 /* Push the node boundaries out if requested */ 3107 account_node_boundary(nid, start_pfn, end_pfn); 3108} 3109 3110/* 3111 * This finds a zone that can be used for ZONE_MOVABLE pages. The 3112 * assumption is made that zones within a node are ordered in monotonic 3113 * increasing memory addresses so that the "highest" populated zone is used 3114 */ 3115static void __init find_usable_zone_for_movable(void) 3116{ 3117 int zone_index; 3118 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 3119 if (zone_index == ZONE_MOVABLE) 3120 continue; 3121 3122 if (arch_zone_highest_possible_pfn[zone_index] > 3123 arch_zone_lowest_possible_pfn[zone_index]) 3124 break; 3125 } 3126 3127 VM_BUG_ON(zone_index == -1); 3128 movable_zone = zone_index; 3129} 3130 3131/* 3132 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 3133 * because it is sized independant of architecture. Unlike the other zones, 3134 * the starting point for ZONE_MOVABLE is not fixed. It may be different 3135 * in each node depending on the size of each node and how evenly kernelcore 3136 * is distributed. This helper function adjusts the zone ranges 3137 * provided by the architecture for a given node by using the end of the 3138 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 3139 * zones within a node are in order of monotonic increases memory addresses 3140 */ 3141static void __meminit adjust_zone_range_for_zone_movable(int nid, 3142 unsigned long zone_type, 3143 unsigned long node_start_pfn, 3144 unsigned long node_end_pfn, 3145 unsigned long *zone_start_pfn, 3146 unsigned long *zone_end_pfn) 3147{ 3148 /* Only adjust if ZONE_MOVABLE is on this node */ 3149 if (zone_movable_pfn[nid]) { 3150 /* Size ZONE_MOVABLE */ 3151 if (zone_type == ZONE_MOVABLE) { 3152 *zone_start_pfn = zone_movable_pfn[nid]; 3153 *zone_end_pfn = min(node_end_pfn, 3154 arch_zone_highest_possible_pfn[movable_zone]); 3155 3156 /* Adjust for ZONE_MOVABLE starting within this range */ 3157 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 3158 *zone_end_pfn > zone_movable_pfn[nid]) { 3159 *zone_end_pfn = zone_movable_pfn[nid]; 3160 3161 /* Check if this whole range is within ZONE_MOVABLE */ 3162 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 3163 *zone_start_pfn = *zone_end_pfn; 3164 } 3165} 3166 3167/* 3168 * Return the number of pages a zone spans in a node, including holes 3169 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 3170 */ 3171static unsigned long __meminit zone_spanned_pages_in_node(int nid, 3172 unsigned long zone_type, 3173 unsigned long *ignored) 3174{ 3175 unsigned long node_start_pfn, node_end_pfn; 3176 unsigned long zone_start_pfn, zone_end_pfn; 3177 3178 /* Get the start and end of the node and zone */ 3179 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3180 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 3181 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 3182 adjust_zone_range_for_zone_movable(nid, zone_type, 3183 node_start_pfn, node_end_pfn, 3184 &zone_start_pfn, &zone_end_pfn); 3185 3186 /* Check that this node has pages within the zone's required range */ 3187 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 3188 return 0; 3189 3190 /* Move the zone boundaries inside the node if necessary */ 3191 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 3192 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 3193 3194 /* Return the spanned pages */ 3195 return zone_end_pfn - zone_start_pfn; 3196} 3197 3198/* 3199 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 3200 * then all holes in the requested range will be accounted for. 3201 */ 3202static unsigned long __meminit __absent_pages_in_range(int nid, 3203 unsigned long range_start_pfn, 3204 unsigned long range_end_pfn) 3205{ 3206 int i = 0; 3207 unsigned long prev_end_pfn = 0, hole_pages = 0; 3208 unsigned long start_pfn; 3209 3210 /* Find the end_pfn of the first active range of pfns in the node */ 3211 i = first_active_region_index_in_nid(nid); 3212 if (i == -1) 3213 return 0; 3214 3215 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3216 3217 /* Account for ranges before physical memory on this node */ 3218 if (early_node_map[i].start_pfn > range_start_pfn) 3219 hole_pages = prev_end_pfn - range_start_pfn; 3220 3221 /* Find all holes for the zone within the node */ 3222 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 3223 3224 /* No need to continue if prev_end_pfn is outside the zone */ 3225 if (prev_end_pfn >= range_end_pfn) 3226 break; 3227 3228 /* Make sure the end of the zone is not within the hole */ 3229 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 3230 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 3231 3232 /* Update the hole size cound and move on */ 3233 if (start_pfn > range_start_pfn) { 3234 BUG_ON(prev_end_pfn > start_pfn); 3235 hole_pages += start_pfn - prev_end_pfn; 3236 } 3237 prev_end_pfn = early_node_map[i].end_pfn; 3238 } 3239 3240 /* Account for ranges past physical memory on this node */ 3241 if (range_end_pfn > prev_end_pfn) 3242 hole_pages += range_end_pfn - 3243 max(range_start_pfn, prev_end_pfn); 3244 3245 return hole_pages; 3246} 3247 3248/** 3249 * absent_pages_in_range - Return number of page frames in holes within a range 3250 * @start_pfn: The start PFN to start searching for holes 3251 * @end_pfn: The end PFN to stop searching for holes 3252 * 3253 * It returns the number of pages frames in memory holes within a range. 3254 */ 3255unsigned long __init absent_pages_in_range(unsigned long start_pfn, 3256 unsigned long end_pfn) 3257{ 3258 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 3259} 3260 3261/* Return the number of page frames in holes in a zone on a node */ 3262static unsigned long __meminit zone_absent_pages_in_node(int nid, 3263 unsigned long zone_type, 3264 unsigned long *ignored) 3265{ 3266 unsigned long node_start_pfn, node_end_pfn; 3267 unsigned long zone_start_pfn, zone_end_pfn; 3268 3269 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 3270 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 3271 node_start_pfn); 3272 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 3273 node_end_pfn); 3274 3275 adjust_zone_range_for_zone_movable(nid, zone_type, 3276 node_start_pfn, node_end_pfn, 3277 &zone_start_pfn, &zone_end_pfn); 3278 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 3279} 3280 3281#else 3282static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 3283 unsigned long zone_type, 3284 unsigned long *zones_size) 3285{ 3286 return zones_size[zone_type]; 3287} 3288 3289static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 3290 unsigned long zone_type, 3291 unsigned long *zholes_size) 3292{ 3293 if (!zholes_size) 3294 return 0; 3295 3296 return zholes_size[zone_type]; 3297} 3298 3299#endif 3300 3301static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 3302 unsigned long *zones_size, unsigned long *zholes_size) 3303{ 3304 unsigned long realtotalpages, totalpages = 0; 3305 enum zone_type i; 3306 3307 for (i = 0; i < MAX_NR_ZONES; i++) 3308 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 3309 zones_size); 3310 pgdat->node_spanned_pages = totalpages; 3311 3312 realtotalpages = totalpages; 3313 for (i = 0; i < MAX_NR_ZONES; i++) 3314 realtotalpages -= 3315 zone_absent_pages_in_node(pgdat->node_id, i, 3316 zholes_size); 3317 pgdat->node_present_pages = realtotalpages; 3318 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 3319 realtotalpages); 3320} 3321 3322#ifndef CONFIG_SPARSEMEM 3323/* 3324 * Calculate the size of the zone->blockflags rounded to an unsigned long 3325 * Start by making sure zonesize is a multiple of pageblock_order by rounding 3326 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 3327 * round what is now in bits to nearest long in bits, then return it in 3328 * bytes. 3329 */ 3330static unsigned long __init usemap_size(unsigned long zonesize) 3331{ 3332 unsigned long usemapsize; 3333 3334 usemapsize = roundup(zonesize, pageblock_nr_pages); 3335 usemapsize = usemapsize >> pageblock_order; 3336 usemapsize *= NR_PAGEBLOCK_BITS; 3337 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 3338 3339 return usemapsize / 8; 3340} 3341 3342static void __init setup_usemap(struct pglist_data *pgdat, 3343 struct zone *zone, unsigned long zonesize) 3344{ 3345 unsigned long usemapsize = usemap_size(zonesize); 3346 zone->pageblock_flags = NULL; 3347 if (usemapsize) { 3348 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 3349 memset(zone->pageblock_flags, 0, usemapsize); 3350 } 3351} 3352#else 3353static void inline setup_usemap(struct pglist_data *pgdat, 3354 struct zone *zone, unsigned long zonesize) {} 3355#endif /* CONFIG_SPARSEMEM */ 3356 3357#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 3358 3359/* Return a sensible default order for the pageblock size. */ 3360static inline int pageblock_default_order(void) 3361{ 3362 if (HPAGE_SHIFT > PAGE_SHIFT) 3363 return HUGETLB_PAGE_ORDER; 3364 3365 return MAX_ORDER-1; 3366} 3367 3368/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 3369static inline void __init set_pageblock_order(unsigned int order) 3370{ 3371 /* Check that pageblock_nr_pages has not already been setup */ 3372 if (pageblock_order) 3373 return; 3374 3375 /* 3376 * Assume the largest contiguous order of interest is a huge page. 3377 * This value may be variable depending on boot parameters on IA64 3378 */ 3379 pageblock_order = order; 3380} 3381#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3382 3383/* 3384 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 3385 * and pageblock_default_order() are unused as pageblock_order is set 3386 * at compile-time. See include/linux/pageblock-flags.h for the values of 3387 * pageblock_order based on the kernel config 3388 */ 3389static inline int pageblock_default_order(unsigned int order) 3390{ 3391 return MAX_ORDER-1; 3392} 3393#define set_pageblock_order(x) do {} while (0) 3394 3395#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 3396 3397/* 3398 * Set up the zone data structures: 3399 * - mark all pages reserved 3400 * - mark all memory queues empty 3401 * - clear the memory bitmaps 3402 */ 3403static void __paginginit free_area_init_core(struct pglist_data *pgdat, 3404 unsigned long *zones_size, unsigned long *zholes_size) 3405{ 3406 enum zone_type j; 3407 int nid = pgdat->node_id; 3408 unsigned long zone_start_pfn = pgdat->node_start_pfn; 3409 int ret; 3410 3411 pgdat_resize_init(pgdat); 3412 pgdat->nr_zones = 0; 3413 init_waitqueue_head(&pgdat->kswapd_wait); 3414 pgdat->kswapd_max_order = 0; 3415 3416 for (j = 0; j < MAX_NR_ZONES; j++) { 3417 struct zone *zone = pgdat->node_zones + j; 3418 unsigned long size, realsize, memmap_pages; 3419 enum lru_list l; 3420 3421 size = zone_spanned_pages_in_node(nid, j, zones_size); 3422 realsize = size - zone_absent_pages_in_node(nid, j, 3423 zholes_size); 3424 3425 /* 3426 * Adjust realsize so that it accounts for how much memory 3427 * is used by this zone for memmap. This affects the watermark 3428 * and per-cpu initialisations 3429 */ 3430 memmap_pages = 3431 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; 3432 if (realsize >= memmap_pages) { 3433 realsize -= memmap_pages; 3434 mminit_dprintk(MMINIT_TRACE, "memmap_init", 3435 "%s zone: %lu pages used for memmap\n", 3436 zone_names[j], memmap_pages); 3437 } else 3438 printk(KERN_WARNING 3439 " %s zone: %lu pages exceeds realsize %lu\n", 3440 zone_names[j], memmap_pages, realsize); 3441 3442 /* Account for reserved pages */ 3443 if (j == 0 && realsize > dma_reserve) { 3444 realsize -= dma_reserve; 3445 mminit_dprintk(MMINIT_TRACE, "memmap_init", 3446 "%s zone: %lu pages reserved\n", 3447 zone_names[0], dma_reserve); 3448 } 3449 3450 if (!is_highmem_idx(j)) 3451 nr_kernel_pages += realsize; 3452 nr_all_pages += realsize; 3453 3454 zone->spanned_pages = size; 3455 zone->present_pages = realsize; 3456#ifdef CONFIG_NUMA 3457 zone->node = nid; 3458 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 3459 / 100; 3460 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 3461#endif 3462 zone->name = zone_names[j]; 3463 spin_lock_init(&zone->lock); 3464 spin_lock_init(&zone->lru_lock); 3465 zone_seqlock_init(zone); 3466 zone->zone_pgdat = pgdat; 3467 3468 zone->prev_priority = DEF_PRIORITY; 3469 3470 zone_pcp_init(zone); 3471 for_each_lru(l) { 3472 INIT_LIST_HEAD(&zone->lru[l].list); 3473 zone->lru[l].nr_scan = 0; 3474 } 3475 zap_zone_vm_stats(zone); 3476 zone->flags = 0; 3477 if (!size) 3478 continue; 3479 3480 set_pageblock_order(pageblock_default_order()); 3481 setup_usemap(pgdat, zone, size); 3482 ret = init_currently_empty_zone(zone, zone_start_pfn, 3483 size, MEMMAP_EARLY); 3484 BUG_ON(ret); 3485 memmap_init(size, nid, j, zone_start_pfn); 3486 zone_start_pfn += size; 3487 } 3488} 3489 3490static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 3491{ 3492 /* Skip empty nodes */ 3493 if (!pgdat->node_spanned_pages) 3494 return; 3495 3496#ifdef CONFIG_FLAT_NODE_MEM_MAP 3497 /* ia64 gets its own node_mem_map, before this, without bootmem */ 3498 if (!pgdat->node_mem_map) { 3499 unsigned long size, start, end; 3500 struct page *map; 3501 3502 /* 3503 * The zone's endpoints aren't required to be MAX_ORDER 3504 * aligned but the node_mem_map endpoints must be in order 3505 * for the buddy allocator to function correctly. 3506 */ 3507 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 3508 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 3509 end = ALIGN(end, MAX_ORDER_NR_PAGES); 3510 size = (end - start) * sizeof(struct page); 3511 map = alloc_remap(pgdat->node_id, size); 3512 if (!map) 3513 map = alloc_bootmem_node(pgdat, size); 3514 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 3515 } 3516#ifndef CONFIG_NEED_MULTIPLE_NODES 3517 /* 3518 * With no DISCONTIG, the global mem_map is just set as node 0's 3519 */ 3520 if (pgdat == NODE_DATA(0)) { 3521 mem_map = NODE_DATA(0)->node_mem_map; 3522#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3523 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 3524 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 3525#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3526 } 3527#endif 3528#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 3529} 3530 3531void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 3532 unsigned long node_start_pfn, unsigned long *zholes_size) 3533{ 3534 pg_data_t *pgdat = NODE_DATA(nid); 3535 3536 pgdat->node_id = nid; 3537 pgdat->node_start_pfn = node_start_pfn; 3538 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3539 3540 alloc_node_mem_map(pgdat); 3541#ifdef CONFIG_FLAT_NODE_MEM_MAP 3542 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 3543 nid, (unsigned long)pgdat, 3544 (unsigned long)pgdat->node_mem_map); 3545#endif 3546 3547 free_area_init_core(pgdat, zones_size, zholes_size); 3548} 3549 3550#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3551 3552#if MAX_NUMNODES > 1 3553/* 3554 * Figure out the number of possible node ids. 3555 */ 3556static void __init setup_nr_node_ids(void) 3557{ 3558 unsigned int node; 3559 unsigned int highest = 0; 3560 3561 for_each_node_mask(node, node_possible_map) 3562 highest = node; 3563 nr_node_ids = highest + 1; 3564} 3565#else 3566static inline void setup_nr_node_ids(void) 3567{ 3568} 3569#endif 3570 3571/** 3572 * add_active_range - Register a range of PFNs backed by physical memory 3573 * @nid: The node ID the range resides on 3574 * @start_pfn: The start PFN of the available physical memory 3575 * @end_pfn: The end PFN of the available physical memory 3576 * 3577 * These ranges are stored in an early_node_map[] and later used by 3578 * free_area_init_nodes() to calculate zone sizes and holes. If the 3579 * range spans a memory hole, it is up to the architecture to ensure 3580 * the memory is not freed by the bootmem allocator. If possible 3581 * the range being registered will be merged with existing ranges. 3582 */ 3583void __init add_active_range(unsigned int nid, unsigned long start_pfn, 3584 unsigned long end_pfn) 3585{ 3586 int i; 3587 3588 mminit_dprintk(MMINIT_TRACE, "memory_register", 3589 "Entering add_active_range(%d, %#lx, %#lx) " 3590 "%d entries of %d used\n", 3591 nid, start_pfn, end_pfn, 3592 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3593 3594 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); 3595 3596 /* Merge with existing active regions if possible */ 3597 for (i = 0; i < nr_nodemap_entries; i++) { 3598 if (early_node_map[i].nid != nid) 3599 continue; 3600 3601 /* Skip if an existing region covers this new one */ 3602 if (start_pfn >= early_node_map[i].start_pfn && 3603 end_pfn <= early_node_map[i].end_pfn) 3604 return; 3605 3606 /* Merge forward if suitable */ 3607 if (start_pfn <= early_node_map[i].end_pfn && 3608 end_pfn > early_node_map[i].end_pfn) { 3609 early_node_map[i].end_pfn = end_pfn; 3610 return; 3611 } 3612 3613 /* Merge backward if suitable */ 3614 if (start_pfn < early_node_map[i].end_pfn && 3615 end_pfn >= early_node_map[i].start_pfn) { 3616 early_node_map[i].start_pfn = start_pfn; 3617 return; 3618 } 3619 } 3620 3621 /* Check that early_node_map is large enough */ 3622 if (i >= MAX_ACTIVE_REGIONS) { 3623 printk(KERN_CRIT "More than %d memory regions, truncating\n", 3624 MAX_ACTIVE_REGIONS); 3625 return; 3626 } 3627 3628 early_node_map[i].nid = nid; 3629 early_node_map[i].start_pfn = start_pfn; 3630 early_node_map[i].end_pfn = end_pfn; 3631 nr_nodemap_entries = i + 1; 3632} 3633 3634/** 3635 * remove_active_range - Shrink an existing registered range of PFNs 3636 * @nid: The node id the range is on that should be shrunk 3637 * @start_pfn: The new PFN of the range 3638 * @end_pfn: The new PFN of the range 3639 * 3640 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3641 * The map is kept near the end physical page range that has already been 3642 * registered. This function allows an arch to shrink an existing registered 3643 * range. 3644 */ 3645void __init remove_active_range(unsigned int nid, unsigned long start_pfn, 3646 unsigned long end_pfn) 3647{ 3648 int i, j; 3649 int removed = 0; 3650 3651 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n", 3652 nid, start_pfn, end_pfn); 3653 3654 /* Find the old active region end and shrink */ 3655 for_each_active_range_index_in_nid(i, nid) { 3656 if (early_node_map[i].start_pfn >= start_pfn && 3657 early_node_map[i].end_pfn <= end_pfn) { 3658 /* clear it */ 3659 early_node_map[i].start_pfn = 0; 3660 early_node_map[i].end_pfn = 0; 3661 removed = 1; 3662 continue; 3663 } 3664 if (early_node_map[i].start_pfn < start_pfn && 3665 early_node_map[i].end_pfn > start_pfn) { 3666 unsigned long temp_end_pfn = early_node_map[i].end_pfn; 3667 early_node_map[i].end_pfn = start_pfn; 3668 if (temp_end_pfn > end_pfn) 3669 add_active_range(nid, end_pfn, temp_end_pfn); 3670 continue; 3671 } 3672 if (early_node_map[i].start_pfn >= start_pfn && 3673 early_node_map[i].end_pfn > end_pfn && 3674 early_node_map[i].start_pfn < end_pfn) { 3675 early_node_map[i].start_pfn = end_pfn; 3676 continue; 3677 } 3678 } 3679 3680 if (!removed) 3681 return; 3682 3683 /* remove the blank ones */ 3684 for (i = nr_nodemap_entries - 1; i > 0; i--) { 3685 if (early_node_map[i].nid != nid) 3686 continue; 3687 if (early_node_map[i].end_pfn) 3688 continue; 3689 /* we found it, get rid of it */ 3690 for (j = i; j < nr_nodemap_entries - 1; j++) 3691 memcpy(&early_node_map[j], &early_node_map[j+1], 3692 sizeof(early_node_map[j])); 3693 j = nr_nodemap_entries - 1; 3694 memset(&early_node_map[j], 0, sizeof(early_node_map[j])); 3695 nr_nodemap_entries--; 3696 } 3697} 3698 3699/** 3700 * remove_all_active_ranges - Remove all currently registered regions 3701 * 3702 * During discovery, it may be found that a table like SRAT is invalid 3703 * and an alternative discovery method must be used. This function removes 3704 * all currently registered regions. 3705 */ 3706void __init remove_all_active_ranges(void) 3707{ 3708 memset(early_node_map, 0, sizeof(early_node_map)); 3709 nr_nodemap_entries = 0; 3710#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 3711 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); 3712 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); 3713#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 3714} 3715 3716/* Compare two active node_active_regions */ 3717static int __init cmp_node_active_region(const void *a, const void *b) 3718{ 3719 struct node_active_region *arange = (struct node_active_region *)a; 3720 struct node_active_region *brange = (struct node_active_region *)b; 3721 3722 /* Done this way to avoid overflows */ 3723 if (arange->start_pfn > brange->start_pfn) 3724 return 1; 3725 if (arange->start_pfn < brange->start_pfn) 3726 return -1; 3727 3728 return 0; 3729} 3730 3731/* sort the node_map by start_pfn */ 3732static void __init sort_node_map(void) 3733{ 3734 sort(early_node_map, (size_t)nr_nodemap_entries, 3735 sizeof(struct node_active_region), 3736 cmp_node_active_region, NULL); 3737} 3738 3739/* Find the lowest pfn for a node */ 3740static unsigned long __init find_min_pfn_for_node(int nid) 3741{ 3742 int i; 3743 unsigned long min_pfn = ULONG_MAX; 3744 3745 /* Assuming a sorted map, the first range found has the starting pfn */ 3746 for_each_active_range_index_in_nid(i, nid) 3747 min_pfn = min(min_pfn, early_node_map[i].start_pfn); 3748 3749 if (min_pfn == ULONG_MAX) { 3750 printk(KERN_WARNING 3751 "Could not find start_pfn for node %d\n", nid); 3752 return 0; 3753 } 3754 3755 return min_pfn; 3756} 3757 3758/** 3759 * find_min_pfn_with_active_regions - Find the minimum PFN registered 3760 * 3761 * It returns the minimum PFN based on information provided via 3762 * add_active_range(). 3763 */ 3764unsigned long __init find_min_pfn_with_active_regions(void) 3765{ 3766 return find_min_pfn_for_node(MAX_NUMNODES); 3767} 3768 3769/* 3770 * early_calculate_totalpages() 3771 * Sum pages in active regions for movable zone. 3772 * Populate N_HIGH_MEMORY for calculating usable_nodes. 3773 */ 3774static unsigned long __init early_calculate_totalpages(void) 3775{ 3776 int i; 3777 unsigned long totalpages = 0; 3778 3779 for (i = 0; i < nr_nodemap_entries; i++) { 3780 unsigned long pages = early_node_map[i].end_pfn - 3781 early_node_map[i].start_pfn; 3782 totalpages += pages; 3783 if (pages) 3784 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); 3785 } 3786 return totalpages; 3787} 3788 3789/* 3790 * Find the PFN the Movable zone begins in each node. Kernel memory 3791 * is spread evenly between nodes as long as the nodes have enough 3792 * memory. When they don't, some nodes will have more kernelcore than 3793 * others 3794 */ 3795static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) 3796{ 3797 int i, nid; 3798 unsigned long usable_startpfn; 3799 unsigned long kernelcore_node, kernelcore_remaining; 3800 unsigned long totalpages = early_calculate_totalpages(); 3801 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 3802 3803 /* 3804 * If movablecore was specified, calculate what size of 3805 * kernelcore that corresponds so that memory usable for 3806 * any allocation type is evenly spread. If both kernelcore 3807 * and movablecore are specified, then the value of kernelcore 3808 * will be used for required_kernelcore if it's greater than 3809 * what movablecore would have allowed. 3810 */ 3811 if (required_movablecore) { 3812 unsigned long corepages; 3813 3814 /* 3815 * Round-up so that ZONE_MOVABLE is at least as large as what 3816 * was requested by the user 3817 */ 3818 required_movablecore = 3819 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 3820 corepages = totalpages - required_movablecore; 3821 3822 required_kernelcore = max(required_kernelcore, corepages); 3823 } 3824 3825 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 3826 if (!required_kernelcore) 3827 return; 3828 3829 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 3830 find_usable_zone_for_movable(); 3831 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 3832 3833restart: 3834 /* Spread kernelcore memory as evenly as possible throughout nodes */ 3835 kernelcore_node = required_kernelcore / usable_nodes; 3836 for_each_node_state(nid, N_HIGH_MEMORY) { 3837 /* 3838 * Recalculate kernelcore_node if the division per node 3839 * now exceeds what is necessary to satisfy the requested 3840 * amount of memory for the kernel 3841 */ 3842 if (required_kernelcore < kernelcore_node) 3843 kernelcore_node = required_kernelcore / usable_nodes; 3844 3845 /* 3846 * As the map is walked, we track how much memory is usable 3847 * by the kernel using kernelcore_remaining. When it is 3848 * 0, the rest of the node is usable by ZONE_MOVABLE 3849 */ 3850 kernelcore_remaining = kernelcore_node; 3851 3852 /* Go through each range of PFNs within this node */ 3853 for_each_active_range_index_in_nid(i, nid) { 3854 unsigned long start_pfn, end_pfn; 3855 unsigned long size_pages; 3856 3857 start_pfn = max(early_node_map[i].start_pfn, 3858 zone_movable_pfn[nid]); 3859 end_pfn = early_node_map[i].end_pfn; 3860 if (start_pfn >= end_pfn) 3861 continue; 3862 3863 /* Account for what is only usable for kernelcore */ 3864 if (start_pfn < usable_startpfn) { 3865 unsigned long kernel_pages; 3866 kernel_pages = min(end_pfn, usable_startpfn) 3867 - start_pfn; 3868 3869 kernelcore_remaining -= min(kernel_pages, 3870 kernelcore_remaining); 3871 required_kernelcore -= min(kernel_pages, 3872 required_kernelcore); 3873 3874 /* Continue if range is now fully accounted */ 3875 if (end_pfn <= usable_startpfn) { 3876 3877 /* 3878 * Push zone_movable_pfn to the end so 3879 * that if we have to rebalance 3880 * kernelcore across nodes, we will 3881 * not double account here 3882 */ 3883 zone_movable_pfn[nid] = end_pfn; 3884 continue; 3885 } 3886 start_pfn = usable_startpfn; 3887 } 3888 3889 /* 3890 * The usable PFN range for ZONE_MOVABLE is from 3891 * start_pfn->end_pfn. Calculate size_pages as the 3892 * number of pages used as kernelcore 3893 */ 3894 size_pages = end_pfn - start_pfn; 3895 if (size_pages > kernelcore_remaining) 3896 size_pages = kernelcore_remaining; 3897 zone_movable_pfn[nid] = start_pfn + size_pages; 3898 3899 /* 3900 * Some kernelcore has been met, update counts and 3901 * break if the kernelcore for this node has been 3902 * satisified 3903 */ 3904 required_kernelcore -= min(required_kernelcore, 3905 size_pages); 3906 kernelcore_remaining -= size_pages; 3907 if (!kernelcore_remaining) 3908 break; 3909 } 3910 } 3911 3912 /* 3913 * If there is still required_kernelcore, we do another pass with one 3914 * less node in the count. This will push zone_movable_pfn[nid] further 3915 * along on the nodes that still have memory until kernelcore is 3916 * satisified 3917 */ 3918 usable_nodes--; 3919 if (usable_nodes && required_kernelcore > usable_nodes) 3920 goto restart; 3921 3922 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 3923 for (nid = 0; nid < MAX_NUMNODES; nid++) 3924 zone_movable_pfn[nid] = 3925 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 3926} 3927 3928/* Any regular memory on that node ? */ 3929static void check_for_regular_memory(pg_data_t *pgdat) 3930{ 3931#ifdef CONFIG_HIGHMEM 3932 enum zone_type zone_type; 3933 3934 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { 3935 struct zone *zone = &pgdat->node_zones[zone_type]; 3936 if (zone->present_pages) 3937 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); 3938 } 3939#endif 3940} 3941 3942/** 3943 * free_area_init_nodes - Initialise all pg_data_t and zone data 3944 * @max_zone_pfn: an array of max PFNs for each zone 3945 * 3946 * This will call free_area_init_node() for each active node in the system. 3947 * Using the page ranges provided by add_active_range(), the size of each 3948 * zone in each node and their holes is calculated. If the maximum PFN 3949 * between two adjacent zones match, it is assumed that the zone is empty. 3950 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 3951 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 3952 * starts where the previous one ended. For example, ZONE_DMA32 starts 3953 * at arch_max_dma_pfn. 3954 */ 3955void __init free_area_init_nodes(unsigned long *max_zone_pfn) 3956{ 3957 unsigned long nid; 3958 int i; 3959 3960 /* Sort early_node_map as initialisation assumes it is sorted */ 3961 sort_node_map(); 3962 3963 /* Record where the zone boundaries are */ 3964 memset(arch_zone_lowest_possible_pfn, 0, 3965 sizeof(arch_zone_lowest_possible_pfn)); 3966 memset(arch_zone_highest_possible_pfn, 0, 3967 sizeof(arch_zone_highest_possible_pfn)); 3968 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 3969 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 3970 for (i = 1; i < MAX_NR_ZONES; i++) { 3971 if (i == ZONE_MOVABLE) 3972 continue; 3973 arch_zone_lowest_possible_pfn[i] = 3974 arch_zone_highest_possible_pfn[i-1]; 3975 arch_zone_highest_possible_pfn[i] = 3976 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 3977 } 3978 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 3979 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 3980 3981 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 3982 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 3983 find_zone_movable_pfns_for_nodes(zone_movable_pfn); 3984 3985 /* Print out the zone ranges */ 3986 printk("Zone PFN ranges:\n"); 3987 for (i = 0; i < MAX_NR_ZONES; i++) { 3988 if (i == ZONE_MOVABLE) 3989 continue; 3990 printk(" %-8s %0#10lx -> %0#10lx\n", 3991 zone_names[i], 3992 arch_zone_lowest_possible_pfn[i], 3993 arch_zone_highest_possible_pfn[i]); 3994 } 3995 3996 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 3997 printk("Movable zone start PFN for each node\n"); 3998 for (i = 0; i < MAX_NUMNODES; i++) { 3999 if (zone_movable_pfn[i]) 4000 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); 4001 } 4002 4003 /* Print out the early_node_map[] */ 4004 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 4005 for (i = 0; i < nr_nodemap_entries; i++) 4006 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid, 4007 early_node_map[i].start_pfn, 4008 early_node_map[i].end_pfn); 4009 4010 /* Initialise every node */ 4011 mminit_verify_pageflags_layout(); 4012 setup_nr_node_ids(); 4013 for_each_online_node(nid) { 4014 pg_data_t *pgdat = NODE_DATA(nid); 4015 free_area_init_node(nid, NULL, 4016 find_min_pfn_for_node(nid), NULL); 4017 4018 /* Any memory on that node */ 4019 if (pgdat->node_present_pages) 4020 node_set_state(nid, N_HIGH_MEMORY); 4021 check_for_regular_memory(pgdat); 4022 } 4023} 4024 4025static int __init cmdline_parse_core(char *p, unsigned long *core) 4026{ 4027 unsigned long long coremem; 4028 if (!p) 4029 return -EINVAL; 4030 4031 coremem = memparse(p, &p); 4032 *core = coremem >> PAGE_SHIFT; 4033 4034 /* Paranoid check that UL is enough for the coremem value */ 4035 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 4036 4037 return 0; 4038} 4039 4040/* 4041 * kernelcore=size sets the amount of memory for use for allocations that 4042 * cannot be reclaimed or migrated. 4043 */ 4044static int __init cmdline_parse_kernelcore(char *p) 4045{ 4046 return cmdline_parse_core(p, &required_kernelcore); 4047} 4048 4049/* 4050 * movablecore=size sets the amount of memory for use for allocations that 4051 * can be reclaimed or migrated. 4052 */ 4053static int __init cmdline_parse_movablecore(char *p) 4054{ 4055 return cmdline_parse_core(p, &required_movablecore); 4056} 4057 4058early_param("kernelcore", cmdline_parse_kernelcore); 4059early_param("movablecore", cmdline_parse_movablecore); 4060 4061#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 4062 4063/** 4064 * set_dma_reserve - set the specified number of pages reserved in the first zone 4065 * @new_dma_reserve: The number of pages to mark reserved 4066 * 4067 * The per-cpu batchsize and zone watermarks are determined by present_pages. 4068 * In the DMA zone, a significant percentage may be consumed by kernel image 4069 * and other unfreeable allocations which can skew the watermarks badly. This 4070 * function may optionally be used to account for unfreeable pages in the 4071 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 4072 * smaller per-cpu batchsize. 4073 */ 4074void __init set_dma_reserve(unsigned long new_dma_reserve) 4075{ 4076 dma_reserve = new_dma_reserve; 4077} 4078 4079#ifndef CONFIG_NEED_MULTIPLE_NODES 4080struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] }; 4081EXPORT_SYMBOL(contig_page_data); 4082#endif 4083 4084void __init free_area_init(unsigned long *zones_size) 4085{ 4086 free_area_init_node(0, zones_size, 4087 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 4088} 4089 4090static int page_alloc_cpu_notify(struct notifier_block *self, 4091 unsigned long action, void *hcpu) 4092{ 4093 int cpu = (unsigned long)hcpu; 4094 4095 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 4096 drain_pages(cpu); 4097 4098 /* 4099 * Spill the event counters of the dead processor 4100 * into the current processors event counters. 4101 * This artificially elevates the count of the current 4102 * processor. 4103 */ 4104 vm_events_fold_cpu(cpu); 4105 4106 /* 4107 * Zero the differential counters of the dead processor 4108 * so that the vm statistics are consistent. 4109 * 4110 * This is only okay since the processor is dead and cannot 4111 * race with what we are doing. 4112 */ 4113 refresh_cpu_vm_stats(cpu); 4114 } 4115 return NOTIFY_OK; 4116} 4117 4118void __init page_alloc_init(void) 4119{ 4120 hotcpu_notifier(page_alloc_cpu_notify, 0); 4121} 4122 4123/* 4124 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 4125 * or min_free_kbytes changes. 4126 */ 4127static void calculate_totalreserve_pages(void) 4128{ 4129 struct pglist_data *pgdat; 4130 unsigned long reserve_pages = 0; 4131 enum zone_type i, j; 4132 4133 for_each_online_pgdat(pgdat) { 4134 for (i = 0; i < MAX_NR_ZONES; i++) { 4135 struct zone *zone = pgdat->node_zones + i; 4136 unsigned long max = 0; 4137 4138 /* Find valid and maximum lowmem_reserve in the zone */ 4139 for (j = i; j < MAX_NR_ZONES; j++) { 4140 if (zone->lowmem_reserve[j] > max) 4141 max = zone->lowmem_reserve[j]; 4142 } 4143 4144 /* we treat pages_high as reserved pages. */ 4145 max += zone->pages_high; 4146 4147 if (max > zone->present_pages) 4148 max = zone->present_pages; 4149 reserve_pages += max; 4150 } 4151 } 4152 totalreserve_pages = reserve_pages; 4153} 4154 4155/* 4156 * setup_per_zone_lowmem_reserve - called whenever 4157 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 4158 * has a correct pages reserved value, so an adequate number of 4159 * pages are left in the zone after a successful __alloc_pages(). 4160 */ 4161static void setup_per_zone_lowmem_reserve(void) 4162{ 4163 struct pglist_data *pgdat; 4164 enum zone_type j, idx; 4165 4166 for_each_online_pgdat(pgdat) { 4167 for (j = 0; j < MAX_NR_ZONES; j++) { 4168 struct zone *zone = pgdat->node_zones + j; 4169 unsigned long present_pages = zone->present_pages; 4170 4171 zone->lowmem_reserve[j] = 0; 4172 4173 idx = j; 4174 while (idx) { 4175 struct zone *lower_zone; 4176 4177 idx--; 4178 4179 if (sysctl_lowmem_reserve_ratio[idx] < 1) 4180 sysctl_lowmem_reserve_ratio[idx] = 1; 4181 4182 lower_zone = pgdat->node_zones + idx; 4183 lower_zone->lowmem_reserve[j] = present_pages / 4184 sysctl_lowmem_reserve_ratio[idx]; 4185 present_pages += lower_zone->present_pages; 4186 } 4187 } 4188 } 4189 4190 /* update totalreserve_pages */ 4191 calculate_totalreserve_pages(); 4192} 4193 4194/** 4195 * setup_per_zone_pages_min - called when min_free_kbytes changes. 4196 * 4197 * Ensures that the pages_{min,low,high} values for each zone are set correctly 4198 * with respect to min_free_kbytes. 4199 */ 4200void setup_per_zone_pages_min(void) 4201{ 4202 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 4203 unsigned long lowmem_pages = 0; 4204 struct zone *zone; 4205 unsigned long flags; 4206 4207 /* Calculate total number of !ZONE_HIGHMEM pages */ 4208 for_each_zone(zone) { 4209 if (!is_highmem(zone)) 4210 lowmem_pages += zone->present_pages; 4211 } 4212 4213 for_each_zone(zone) { 4214 u64 tmp; 4215 4216 spin_lock_irqsave(&zone->lru_lock, flags); 4217 tmp = (u64)pages_min * zone->present_pages; 4218 do_div(tmp, lowmem_pages); 4219 if (is_highmem(zone)) { 4220 /* 4221 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 4222 * need highmem pages, so cap pages_min to a small 4223 * value here. 4224 * 4225 * The (pages_high-pages_low) and (pages_low-pages_min) 4226 * deltas controls asynch page reclaim, and so should 4227 * not be capped for highmem. 4228 */ 4229 int min_pages; 4230 4231 min_pages = zone->present_pages / 1024; 4232 if (min_pages < SWAP_CLUSTER_MAX) 4233 min_pages = SWAP_CLUSTER_MAX; 4234 if (min_pages > 128) 4235 min_pages = 128; 4236 zone->pages_min = min_pages; 4237 } else { 4238 /* 4239 * If it's a lowmem zone, reserve a number of pages 4240 * proportionate to the zone's size. 4241 */ 4242 zone->pages_min = tmp; 4243 } 4244 4245 zone->pages_low = zone->pages_min + (tmp >> 2); 4246 zone->pages_high = zone->pages_min + (tmp >> 1); 4247 setup_zone_migrate_reserve(zone); 4248 spin_unlock_irqrestore(&zone->lru_lock, flags); 4249 } 4250 4251 /* update totalreserve_pages */ 4252 calculate_totalreserve_pages(); 4253} 4254 4255/* 4256 * Initialise min_free_kbytes. 4257 * 4258 * For small machines we want it small (128k min). For large machines 4259 * we want it large (64MB max). But it is not linear, because network 4260 * bandwidth does not increase linearly with machine size. We use 4261 * 4262 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 4263 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 4264 * 4265 * which yields 4266 * 4267 * 16MB: 512k 4268 * 32MB: 724k 4269 * 64MB: 1024k 4270 * 128MB: 1448k 4271 * 256MB: 2048k 4272 * 512MB: 2896k 4273 * 1024MB: 4096k 4274 * 2048MB: 5792k 4275 * 4096MB: 8192k 4276 * 8192MB: 11584k 4277 * 16384MB: 16384k 4278 */ 4279static int __init init_per_zone_pages_min(void) 4280{ 4281 unsigned long lowmem_kbytes; 4282 4283 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 4284 4285 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 4286 if (min_free_kbytes < 128) 4287 min_free_kbytes = 128; 4288 if (min_free_kbytes > 65536) 4289 min_free_kbytes = 65536; 4290 setup_per_zone_pages_min(); 4291 setup_per_zone_lowmem_reserve(); 4292 return 0; 4293} 4294module_init(init_per_zone_pages_min) 4295 4296/* 4297 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 4298 * that we can call two helper functions whenever min_free_kbytes 4299 * changes. 4300 */ 4301int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 4302 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4303{ 4304 proc_dointvec(table, write, file, buffer, length, ppos); 4305 if (write) 4306 setup_per_zone_pages_min(); 4307 return 0; 4308} 4309 4310#ifdef CONFIG_NUMA 4311int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 4312 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4313{ 4314 struct zone *zone; 4315 int rc; 4316 4317 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4318 if (rc) 4319 return rc; 4320 4321 for_each_zone(zone) 4322 zone->min_unmapped_pages = (zone->present_pages * 4323 sysctl_min_unmapped_ratio) / 100; 4324 return 0; 4325} 4326 4327int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 4328 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4329{ 4330 struct zone *zone; 4331 int rc; 4332 4333 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4334 if (rc) 4335 return rc; 4336 4337 for_each_zone(zone) 4338 zone->min_slab_pages = (zone->present_pages * 4339 sysctl_min_slab_ratio) / 100; 4340 return 0; 4341} 4342#endif 4343 4344/* 4345 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 4346 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 4347 * whenever sysctl_lowmem_reserve_ratio changes. 4348 * 4349 * The reserve ratio obviously has absolutely no relation with the 4350 * pages_min watermarks. The lowmem reserve ratio can only make sense 4351 * if in function of the boot time zone sizes. 4352 */ 4353int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 4354 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4355{ 4356 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4357 setup_per_zone_lowmem_reserve(); 4358 return 0; 4359} 4360 4361/* 4362 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 4363 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 4364 * can have before it gets flushed back to buddy allocator. 4365 */ 4366 4367int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 4368 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 4369{ 4370 struct zone *zone; 4371 unsigned int cpu; 4372 int ret; 4373 4374 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4375 if (!write || (ret == -EINVAL)) 4376 return ret; 4377 for_each_zone(zone) { 4378 for_each_online_cpu(cpu) { 4379 unsigned long high; 4380 high = zone->present_pages / percpu_pagelist_fraction; 4381 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 4382 } 4383 } 4384 return 0; 4385} 4386 4387int hashdist = HASHDIST_DEFAULT; 4388 4389#ifdef CONFIG_NUMA 4390static int __init set_hashdist(char *str) 4391{ 4392 if (!str) 4393 return 0; 4394 hashdist = simple_strtoul(str, &str, 0); 4395 return 1; 4396} 4397__setup("hashdist=", set_hashdist); 4398#endif 4399 4400/* 4401 * allocate a large system hash table from bootmem 4402 * - it is assumed that the hash table must contain an exact power-of-2 4403 * quantity of entries 4404 * - limit is the number of hash buckets, not the total allocation size 4405 */ 4406void *__init alloc_large_system_hash(const char *tablename, 4407 unsigned long bucketsize, 4408 unsigned long numentries, 4409 int scale, 4410 int flags, 4411 unsigned int *_hash_shift, 4412 unsigned int *_hash_mask, 4413 unsigned long limit) 4414{ 4415 unsigned long long max = limit; 4416 unsigned long log2qty, size; 4417 void *table = NULL; 4418 4419 /* allow the kernel cmdline to have a say */ 4420 if (!numentries) { 4421 /* round applicable memory size up to nearest megabyte */ 4422 numentries = nr_kernel_pages; 4423 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 4424 numentries >>= 20 - PAGE_SHIFT; 4425 numentries <<= 20 - PAGE_SHIFT; 4426 4427 /* limit to 1 bucket per 2^scale bytes of low memory */ 4428 if (scale > PAGE_SHIFT) 4429 numentries >>= (scale - PAGE_SHIFT); 4430 else 4431 numentries <<= (PAGE_SHIFT - scale); 4432 4433 /* Make sure we've got at least a 0-order allocation.. */ 4434 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 4435 numentries = PAGE_SIZE / bucketsize; 4436 } 4437 numentries = roundup_pow_of_two(numentries); 4438 4439 /* limit allocation size to 1/16 total memory by default */ 4440 if (max == 0) { 4441 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 4442 do_div(max, bucketsize); 4443 } 4444 4445 if (numentries > max) 4446 numentries = max; 4447 4448 log2qty = ilog2(numentries); 4449 4450 do { 4451 size = bucketsize << log2qty; 4452 if (flags & HASH_EARLY) 4453 table = alloc_bootmem_nopanic(size); 4454 else if (hashdist) 4455 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 4456 else { 4457 unsigned long order = get_order(size); 4458 table = (void*) __get_free_pages(GFP_ATOMIC, order); 4459 /* 4460 * If bucketsize is not a power-of-two, we may free 4461 * some pages at the end of hash table. 4462 */ 4463 if (table) { 4464 unsigned long alloc_end = (unsigned long)table + 4465 (PAGE_SIZE << order); 4466 unsigned long used = (unsigned long)table + 4467 PAGE_ALIGN(size); 4468 split_page(virt_to_page(table), order); 4469 while (used < alloc_end) { 4470 free_page(used); 4471 used += PAGE_SIZE; 4472 } 4473 } 4474 } 4475 } while (!table && size > PAGE_SIZE && --log2qty); 4476 4477 if (!table) 4478 panic("Failed to allocate %s hash table\n", tablename); 4479 4480 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", 4481 tablename, 4482 (1U << log2qty), 4483 ilog2(size) - PAGE_SHIFT, 4484 size); 4485 4486 if (_hash_shift) 4487 *_hash_shift = log2qty; 4488 if (_hash_mask) 4489 *_hash_mask = (1 << log2qty) - 1; 4490 4491 return table; 4492} 4493 4494#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 4495struct page *pfn_to_page(unsigned long pfn) 4496{ 4497 return __pfn_to_page(pfn); 4498} 4499unsigned long page_to_pfn(struct page *page) 4500{ 4501 return __page_to_pfn(page); 4502} 4503EXPORT_SYMBOL(pfn_to_page); 4504EXPORT_SYMBOL(page_to_pfn); 4505#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 4506 4507/* Return a pointer to the bitmap storing bits affecting a block of pages */ 4508static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 4509 unsigned long pfn) 4510{ 4511#ifdef CONFIG_SPARSEMEM 4512 return __pfn_to_section(pfn)->pageblock_flags; 4513#else 4514 return zone->pageblock_flags; 4515#endif /* CONFIG_SPARSEMEM */ 4516} 4517 4518static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 4519{ 4520#ifdef CONFIG_SPARSEMEM 4521 pfn &= (PAGES_PER_SECTION-1); 4522 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4523#else 4524 pfn = pfn - zone->zone_start_pfn; 4525 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 4526#endif /* CONFIG_SPARSEMEM */ 4527} 4528 4529/** 4530 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 4531 * @page: The page within the block of interest 4532 * @start_bitidx: The first bit of interest to retrieve 4533 * @end_bitidx: The last bit of interest 4534 * returns pageblock_bits flags 4535 */ 4536unsigned long get_pageblock_flags_group(struct page *page, 4537 int start_bitidx, int end_bitidx) 4538{ 4539 struct zone *zone; 4540 unsigned long *bitmap; 4541 unsigned long pfn, bitidx; 4542 unsigned long flags = 0; 4543 unsigned long value = 1; 4544 4545 zone = page_zone(page); 4546 pfn = page_to_pfn(page); 4547 bitmap = get_pageblock_bitmap(zone, pfn); 4548 bitidx = pfn_to_bitidx(zone, pfn); 4549 4550 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4551 if (test_bit(bitidx + start_bitidx, bitmap)) 4552 flags |= value; 4553 4554 return flags; 4555} 4556 4557/** 4558 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages 4559 * @page: The page within the block of interest 4560 * @start_bitidx: The first bit of interest 4561 * @end_bitidx: The last bit of interest 4562 * @flags: The flags to set 4563 */ 4564void set_pageblock_flags_group(struct page *page, unsigned long flags, 4565 int start_bitidx, int end_bitidx) 4566{ 4567 struct zone *zone; 4568 unsigned long *bitmap; 4569 unsigned long pfn, bitidx; 4570 unsigned long value = 1; 4571 4572 zone = page_zone(page); 4573 pfn = page_to_pfn(page); 4574 bitmap = get_pageblock_bitmap(zone, pfn); 4575 bitidx = pfn_to_bitidx(zone, pfn); 4576 VM_BUG_ON(pfn < zone->zone_start_pfn); 4577 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); 4578 4579 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 4580 if (flags & value) 4581 __set_bit(bitidx + start_bitidx, bitmap); 4582 else 4583 __clear_bit(bitidx + start_bitidx, bitmap); 4584} 4585 4586/* 4587 * This is designed as sub function...plz see page_isolation.c also. 4588 * set/clear page block's type to be ISOLATE. 4589 * page allocater never alloc memory from ISOLATE block. 4590 */ 4591 4592int set_migratetype_isolate(struct page *page) 4593{ 4594 struct zone *zone; 4595 unsigned long flags; 4596 int ret = -EBUSY; 4597 4598 zone = page_zone(page); 4599 spin_lock_irqsave(&zone->lock, flags); 4600 /* 4601 * In future, more migrate types will be able to be isolation target. 4602 */ 4603 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 4604 goto out; 4605 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 4606 move_freepages_block(zone, page, MIGRATE_ISOLATE); 4607 ret = 0; 4608out: 4609 spin_unlock_irqrestore(&zone->lock, flags); 4610 if (!ret) 4611 drain_all_pages(); 4612 return ret; 4613} 4614 4615void unset_migratetype_isolate(struct page *page) 4616{ 4617 struct zone *zone; 4618 unsigned long flags; 4619 zone = page_zone(page); 4620 spin_lock_irqsave(&zone->lock, flags); 4621 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 4622 goto out; 4623 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4624 move_freepages_block(zone, page, MIGRATE_MOVABLE); 4625out: 4626 spin_unlock_irqrestore(&zone->lock, flags); 4627} 4628 4629#ifdef CONFIG_MEMORY_HOTREMOVE 4630/* 4631 * All pages in the range must be isolated before calling this. 4632 */ 4633void 4634__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 4635{ 4636 struct page *page; 4637 struct zone *zone; 4638 int order, i; 4639 unsigned long pfn; 4640 unsigned long flags; 4641 /* find the first valid pfn */ 4642 for (pfn = start_pfn; pfn < end_pfn; pfn++) 4643 if (pfn_valid(pfn)) 4644 break; 4645 if (pfn == end_pfn) 4646 return; 4647 zone = page_zone(pfn_to_page(pfn)); 4648 spin_lock_irqsave(&zone->lock, flags); 4649 pfn = start_pfn; 4650 while (pfn < end_pfn) { 4651 if (!pfn_valid(pfn)) { 4652 pfn++; 4653 continue; 4654 } 4655 page = pfn_to_page(pfn); 4656 BUG_ON(page_count(page)); 4657 BUG_ON(!PageBuddy(page)); 4658 order = page_order(page); 4659#ifdef CONFIG_DEBUG_VM 4660 printk(KERN_INFO "remove from free list %lx %d %lx\n", 4661 pfn, 1 << order, end_pfn); 4662#endif 4663 list_del(&page->lru); 4664 rmv_page_order(page); 4665 zone->free_area[order].nr_free--; 4666 __mod_zone_page_state(zone, NR_FREE_PAGES, 4667 - (1UL << order)); 4668 for (i = 0; i < (1 << order); i++) 4669 SetPageReserved((page+i)); 4670 pfn += (1 << order); 4671 } 4672 spin_unlock_irqrestore(&zone->lock, flags); 4673} 4674#endif 4675