page_alloc.c revision 08e0f6a9705376732fd3bc9bf8ba97a6b5211eb1
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/bootmem.h> 23#include <linux/compiler.h> 24#include <linux/kernel.h> 25#include <linux/module.h> 26#include <linux/suspend.h> 27#include <linux/pagevec.h> 28#include <linux/blkdev.h> 29#include <linux/slab.h> 30#include <linux/notifier.h> 31#include <linux/topology.h> 32#include <linux/sysctl.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/memory_hotplug.h> 36#include <linux/nodemask.h> 37#include <linux/vmalloc.h> 38#include <linux/mempolicy.h> 39#include <linux/stop_machine.h> 40#include <linux/sort.h> 41#include <linux/pfn.h> 42 43#include <asm/tlbflush.h> 44#include <asm/div64.h> 45#include "internal.h" 46 47/* 48 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 49 * initializer cleaner 50 */ 51nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 52EXPORT_SYMBOL(node_online_map); 53nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 54EXPORT_SYMBOL(node_possible_map); 55unsigned long totalram_pages __read_mostly; 56unsigned long totalreserve_pages __read_mostly; 57long nr_swap_pages; 58int percpu_pagelist_fraction; 59 60static void __free_pages_ok(struct page *page, unsigned int order); 61 62/* 63 * results with 256, 32 in the lowmem_reserve sysctl: 64 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 65 * 1G machine -> (16M dma, 784M normal, 224M high) 66 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 67 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 68 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 69 * 70 * TBD: should special case ZONE_DMA32 machines here - in those we normally 71 * don't need any ZONE_NORMAL reservation 72 */ 73int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 74 256, 75#ifdef CONFIG_ZONE_DMA32 76 256, 77#endif 78#ifdef CONFIG_HIGHMEM 79 32 80#endif 81}; 82 83EXPORT_SYMBOL(totalram_pages); 84 85/* 86 * Used by page_zone() to look up the address of the struct zone whose 87 * id is encoded in the upper bits of page->flags 88 */ 89struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; 90EXPORT_SYMBOL(zone_table); 91 92static char *zone_names[MAX_NR_ZONES] = { 93 "DMA", 94#ifdef CONFIG_ZONE_DMA32 95 "DMA32", 96#endif 97 "Normal", 98#ifdef CONFIG_HIGHMEM 99 "HighMem" 100#endif 101}; 102 103int min_free_kbytes = 1024; 104 105unsigned long __meminitdata nr_kernel_pages; 106unsigned long __meminitdata nr_all_pages; 107static unsigned long __initdata dma_reserve; 108 109#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 110 /* 111 * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct 112 * ranges of memory (RAM) that may be registered with add_active_range(). 113 * Ranges passed to add_active_range() will be merged if possible 114 * so the number of times add_active_range() can be called is 115 * related to the number of nodes and the number of holes 116 */ 117 #ifdef CONFIG_MAX_ACTIVE_REGIONS 118 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 119 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 120 #else 121 #if MAX_NUMNODES >= 32 122 /* If there can be many nodes, allow up to 50 holes per node */ 123 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 124 #else 125 /* By default, allow up to 256 distinct regions */ 126 #define MAX_ACTIVE_REGIONS 256 127 #endif 128 #endif 129 130 struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; 131 int __initdata nr_nodemap_entries; 132 unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 133 unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 134#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 135 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; 136 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; 137#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 138#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 139 140#ifdef CONFIG_DEBUG_VM 141static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 142{ 143 int ret = 0; 144 unsigned seq; 145 unsigned long pfn = page_to_pfn(page); 146 147 do { 148 seq = zone_span_seqbegin(zone); 149 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 150 ret = 1; 151 else if (pfn < zone->zone_start_pfn) 152 ret = 1; 153 } while (zone_span_seqretry(zone, seq)); 154 155 return ret; 156} 157 158static int page_is_consistent(struct zone *zone, struct page *page) 159{ 160#ifdef CONFIG_HOLES_IN_ZONE 161 if (!pfn_valid(page_to_pfn(page))) 162 return 0; 163#endif 164 if (zone != page_zone(page)) 165 return 0; 166 167 return 1; 168} 169/* 170 * Temporary debugging check for pages not lying within a given zone. 171 */ 172static int bad_range(struct zone *zone, struct page *page) 173{ 174 if (page_outside_zone_boundaries(zone, page)) 175 return 1; 176 if (!page_is_consistent(zone, page)) 177 return 1; 178 179 return 0; 180} 181#else 182static inline int bad_range(struct zone *zone, struct page *page) 183{ 184 return 0; 185} 186#endif 187 188static void bad_page(struct page *page) 189{ 190 printk(KERN_EMERG "Bad page state in process '%s'\n" 191 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 192 KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 193 KERN_EMERG "Backtrace:\n", 194 current->comm, page, (int)(2*sizeof(unsigned long)), 195 (unsigned long)page->flags, page->mapping, 196 page_mapcount(page), page_count(page)); 197 dump_stack(); 198 page->flags &= ~(1 << PG_lru | 199 1 << PG_private | 200 1 << PG_locked | 201 1 << PG_active | 202 1 << PG_dirty | 203 1 << PG_reclaim | 204 1 << PG_slab | 205 1 << PG_swapcache | 206 1 << PG_writeback | 207 1 << PG_buddy ); 208 set_page_count(page, 0); 209 reset_page_mapcount(page); 210 page->mapping = NULL; 211 add_taint(TAINT_BAD_PAGE); 212} 213 214/* 215 * Higher-order pages are called "compound pages". They are structured thusly: 216 * 217 * The first PAGE_SIZE page is called the "head page". 218 * 219 * The remaining PAGE_SIZE pages are called "tail pages". 220 * 221 * All pages have PG_compound set. All pages have their ->private pointing at 222 * the head page (even the head page has this). 223 * 224 * The first tail page's ->lru.next holds the address of the compound page's 225 * put_page() function. Its ->lru.prev holds the order of allocation. 226 * This usage means that zero-order pages may not be compound. 227 */ 228 229static void free_compound_page(struct page *page) 230{ 231 __free_pages_ok(page, (unsigned long)page[1].lru.prev); 232} 233 234static void prep_compound_page(struct page *page, unsigned long order) 235{ 236 int i; 237 int nr_pages = 1 << order; 238 239 page[1].lru.next = (void *)free_compound_page; /* set dtor */ 240 page[1].lru.prev = (void *)order; 241 for (i = 0; i < nr_pages; i++) { 242 struct page *p = page + i; 243 244 __SetPageCompound(p); 245 set_page_private(p, (unsigned long)page); 246 } 247} 248 249static void destroy_compound_page(struct page *page, unsigned long order) 250{ 251 int i; 252 int nr_pages = 1 << order; 253 254 if (unlikely((unsigned long)page[1].lru.prev != order)) 255 bad_page(page); 256 257 for (i = 0; i < nr_pages; i++) { 258 struct page *p = page + i; 259 260 if (unlikely(!PageCompound(p) | 261 (page_private(p) != (unsigned long)page))) 262 bad_page(page); 263 __ClearPageCompound(p); 264 } 265} 266 267static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 268{ 269 int i; 270 271 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 272 /* 273 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 274 * and __GFP_HIGHMEM from hard or soft interrupt context. 275 */ 276 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 277 for (i = 0; i < (1 << order); i++) 278 clear_highpage(page + i); 279} 280 281/* 282 * function for dealing with page's order in buddy system. 283 * zone->lock is already acquired when we use these. 284 * So, we don't need atomic page->flags operations here. 285 */ 286static inline unsigned long page_order(struct page *page) 287{ 288 return page_private(page); 289} 290 291static inline void set_page_order(struct page *page, int order) 292{ 293 set_page_private(page, order); 294 __SetPageBuddy(page); 295} 296 297static inline void rmv_page_order(struct page *page) 298{ 299 __ClearPageBuddy(page); 300 set_page_private(page, 0); 301} 302 303/* 304 * Locate the struct page for both the matching buddy in our 305 * pair (buddy1) and the combined O(n+1) page they form (page). 306 * 307 * 1) Any buddy B1 will have an order O twin B2 which satisfies 308 * the following equation: 309 * B2 = B1 ^ (1 << O) 310 * For example, if the starting buddy (buddy2) is #8 its order 311 * 1 buddy is #10: 312 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 313 * 314 * 2) Any buddy B will have an order O+1 parent P which 315 * satisfies the following equation: 316 * P = B & ~(1 << O) 317 * 318 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 319 */ 320static inline struct page * 321__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 322{ 323 unsigned long buddy_idx = page_idx ^ (1 << order); 324 325 return page + (buddy_idx - page_idx); 326} 327 328static inline unsigned long 329__find_combined_index(unsigned long page_idx, unsigned int order) 330{ 331 return (page_idx & ~(1 << order)); 332} 333 334/* 335 * This function checks whether a page is free && is the buddy 336 * we can do coalesce a page and its buddy if 337 * (a) the buddy is not in a hole && 338 * (b) the buddy is in the buddy system && 339 * (c) a page and its buddy have the same order && 340 * (d) a page and its buddy are in the same zone. 341 * 342 * For recording whether a page is in the buddy system, we use PG_buddy. 343 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 344 * 345 * For recording page's order, we use page_private(page). 346 */ 347static inline int page_is_buddy(struct page *page, struct page *buddy, 348 int order) 349{ 350#ifdef CONFIG_HOLES_IN_ZONE 351 if (!pfn_valid(page_to_pfn(buddy))) 352 return 0; 353#endif 354 355 if (page_zone_id(page) != page_zone_id(buddy)) 356 return 0; 357 358 if (PageBuddy(buddy) && page_order(buddy) == order) { 359 BUG_ON(page_count(buddy) != 0); 360 return 1; 361 } 362 return 0; 363} 364 365/* 366 * Freeing function for a buddy system allocator. 367 * 368 * The concept of a buddy system is to maintain direct-mapped table 369 * (containing bit values) for memory blocks of various "orders". 370 * The bottom level table contains the map for the smallest allocatable 371 * units of memory (here, pages), and each level above it describes 372 * pairs of units from the levels below, hence, "buddies". 373 * At a high level, all that happens here is marking the table entry 374 * at the bottom level available, and propagating the changes upward 375 * as necessary, plus some accounting needed to play nicely with other 376 * parts of the VM system. 377 * At each level, we keep a list of pages, which are heads of continuous 378 * free pages of length of (1 << order) and marked with PG_buddy. Page's 379 * order is recorded in page_private(page) field. 380 * So when we are allocating or freeing one, we can derive the state of the 381 * other. That is, if we allocate a small block, and both were 382 * free, the remainder of the region must be split into blocks. 383 * If a block is freed, and its buddy is also free, then this 384 * triggers coalescing into a block of larger size. 385 * 386 * -- wli 387 */ 388 389static inline void __free_one_page(struct page *page, 390 struct zone *zone, unsigned int order) 391{ 392 unsigned long page_idx; 393 int order_size = 1 << order; 394 395 if (unlikely(PageCompound(page))) 396 destroy_compound_page(page, order); 397 398 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 399 400 VM_BUG_ON(page_idx & (order_size - 1)); 401 VM_BUG_ON(bad_range(zone, page)); 402 403 zone->free_pages += order_size; 404 while (order < MAX_ORDER-1) { 405 unsigned long combined_idx; 406 struct free_area *area; 407 struct page *buddy; 408 409 buddy = __page_find_buddy(page, page_idx, order); 410 if (!page_is_buddy(page, buddy, order)) 411 break; /* Move the buddy up one level. */ 412 413 list_del(&buddy->lru); 414 area = zone->free_area + order; 415 area->nr_free--; 416 rmv_page_order(buddy); 417 combined_idx = __find_combined_index(page_idx, order); 418 page = page + (combined_idx - page_idx); 419 page_idx = combined_idx; 420 order++; 421 } 422 set_page_order(page, order); 423 list_add(&page->lru, &zone->free_area[order].free_list); 424 zone->free_area[order].nr_free++; 425} 426 427static inline int free_pages_check(struct page *page) 428{ 429 if (unlikely(page_mapcount(page) | 430 (page->mapping != NULL) | 431 (page_count(page) != 0) | 432 (page->flags & ( 433 1 << PG_lru | 434 1 << PG_private | 435 1 << PG_locked | 436 1 << PG_active | 437 1 << PG_reclaim | 438 1 << PG_slab | 439 1 << PG_swapcache | 440 1 << PG_writeback | 441 1 << PG_reserved | 442 1 << PG_buddy )))) 443 bad_page(page); 444 if (PageDirty(page)) 445 __ClearPageDirty(page); 446 /* 447 * For now, we report if PG_reserved was found set, but do not 448 * clear it, and do not free the page. But we shall soon need 449 * to do more, for when the ZERO_PAGE count wraps negative. 450 */ 451 return PageReserved(page); 452} 453 454/* 455 * Frees a list of pages. 456 * Assumes all pages on list are in same zone, and of same order. 457 * count is the number of pages to free. 458 * 459 * If the zone was previously in an "all pages pinned" state then look to 460 * see if this freeing clears that state. 461 * 462 * And clear the zone's pages_scanned counter, to hold off the "all pages are 463 * pinned" detection logic. 464 */ 465static void free_pages_bulk(struct zone *zone, int count, 466 struct list_head *list, int order) 467{ 468 spin_lock(&zone->lock); 469 zone->all_unreclaimable = 0; 470 zone->pages_scanned = 0; 471 while (count--) { 472 struct page *page; 473 474 VM_BUG_ON(list_empty(list)); 475 page = list_entry(list->prev, struct page, lru); 476 /* have to delete it as __free_one_page list manipulates */ 477 list_del(&page->lru); 478 __free_one_page(page, zone, order); 479 } 480 spin_unlock(&zone->lock); 481} 482 483static void free_one_page(struct zone *zone, struct page *page, int order) 484{ 485 spin_lock(&zone->lock); 486 zone->all_unreclaimable = 0; 487 zone->pages_scanned = 0; 488 __free_one_page(page, zone ,order); 489 spin_unlock(&zone->lock); 490} 491 492static void __free_pages_ok(struct page *page, unsigned int order) 493{ 494 unsigned long flags; 495 int i; 496 int reserved = 0; 497 498 arch_free_page(page, order); 499 if (!PageHighMem(page)) 500 debug_check_no_locks_freed(page_address(page), 501 PAGE_SIZE<<order); 502 503 for (i = 0 ; i < (1 << order) ; ++i) 504 reserved += free_pages_check(page + i); 505 if (reserved) 506 return; 507 508 kernel_map_pages(page, 1 << order, 0); 509 local_irq_save(flags); 510 __count_vm_events(PGFREE, 1 << order); 511 free_one_page(page_zone(page), page, order); 512 local_irq_restore(flags); 513} 514 515/* 516 * permit the bootmem allocator to evade page validation on high-order frees 517 */ 518void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) 519{ 520 if (order == 0) { 521 __ClearPageReserved(page); 522 set_page_count(page, 0); 523 set_page_refcounted(page); 524 __free_page(page); 525 } else { 526 int loop; 527 528 prefetchw(page); 529 for (loop = 0; loop < BITS_PER_LONG; loop++) { 530 struct page *p = &page[loop]; 531 532 if (loop + 1 < BITS_PER_LONG) 533 prefetchw(p + 1); 534 __ClearPageReserved(p); 535 set_page_count(p, 0); 536 } 537 538 set_page_refcounted(page); 539 __free_pages(page, order); 540 } 541} 542 543 544/* 545 * The order of subdivision here is critical for the IO subsystem. 546 * Please do not alter this order without good reasons and regression 547 * testing. Specifically, as large blocks of memory are subdivided, 548 * the order in which smaller blocks are delivered depends on the order 549 * they're subdivided in this function. This is the primary factor 550 * influencing the order in which pages are delivered to the IO 551 * subsystem according to empirical testing, and this is also justified 552 * by considering the behavior of a buddy system containing a single 553 * large block of memory acted on by a series of small allocations. 554 * This behavior is a critical factor in sglist merging's success. 555 * 556 * -- wli 557 */ 558static inline void expand(struct zone *zone, struct page *page, 559 int low, int high, struct free_area *area) 560{ 561 unsigned long size = 1 << high; 562 563 while (high > low) { 564 area--; 565 high--; 566 size >>= 1; 567 VM_BUG_ON(bad_range(zone, &page[size])); 568 list_add(&page[size].lru, &area->free_list); 569 area->nr_free++; 570 set_page_order(&page[size], high); 571 } 572} 573 574/* 575 * This page is about to be returned from the page allocator 576 */ 577static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 578{ 579 if (unlikely(page_mapcount(page) | 580 (page->mapping != NULL) | 581 (page_count(page) != 0) | 582 (page->flags & ( 583 1 << PG_lru | 584 1 << PG_private | 585 1 << PG_locked | 586 1 << PG_active | 587 1 << PG_dirty | 588 1 << PG_reclaim | 589 1 << PG_slab | 590 1 << PG_swapcache | 591 1 << PG_writeback | 592 1 << PG_reserved | 593 1 << PG_buddy )))) 594 bad_page(page); 595 596 /* 597 * For now, we report if PG_reserved was found set, but do not 598 * clear it, and do not allocate the page: as a safety net. 599 */ 600 if (PageReserved(page)) 601 return 1; 602 603 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 604 1 << PG_referenced | 1 << PG_arch_1 | 605 1 << PG_checked | 1 << PG_mappedtodisk); 606 set_page_private(page, 0); 607 set_page_refcounted(page); 608 kernel_map_pages(page, 1 << order, 1); 609 610 if (gfp_flags & __GFP_ZERO) 611 prep_zero_page(page, order, gfp_flags); 612 613 if (order && (gfp_flags & __GFP_COMP)) 614 prep_compound_page(page, order); 615 616 return 0; 617} 618 619/* 620 * Do the hard work of removing an element from the buddy allocator. 621 * Call me with the zone->lock already held. 622 */ 623static struct page *__rmqueue(struct zone *zone, unsigned int order) 624{ 625 struct free_area * area; 626 unsigned int current_order; 627 struct page *page; 628 629 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 630 area = zone->free_area + current_order; 631 if (list_empty(&area->free_list)) 632 continue; 633 634 page = list_entry(area->free_list.next, struct page, lru); 635 list_del(&page->lru); 636 rmv_page_order(page); 637 area->nr_free--; 638 zone->free_pages -= 1UL << order; 639 expand(zone, page, order, current_order, area); 640 return page; 641 } 642 643 return NULL; 644} 645 646/* 647 * Obtain a specified number of elements from the buddy allocator, all under 648 * a single hold of the lock, for efficiency. Add them to the supplied list. 649 * Returns the number of new pages which were placed at *list. 650 */ 651static int rmqueue_bulk(struct zone *zone, unsigned int order, 652 unsigned long count, struct list_head *list) 653{ 654 int i; 655 656 spin_lock(&zone->lock); 657 for (i = 0; i < count; ++i) { 658 struct page *page = __rmqueue(zone, order); 659 if (unlikely(page == NULL)) 660 break; 661 list_add_tail(&page->lru, list); 662 } 663 spin_unlock(&zone->lock); 664 return i; 665} 666 667#ifdef CONFIG_NUMA 668/* 669 * Called from the slab reaper to drain pagesets on a particular node that 670 * belongs to the currently executing processor. 671 * Note that this function must be called with the thread pinned to 672 * a single processor. 673 */ 674void drain_node_pages(int nodeid) 675{ 676 int i; 677 enum zone_type z; 678 unsigned long flags; 679 680 for (z = 0; z < MAX_NR_ZONES; z++) { 681 struct zone *zone = NODE_DATA(nodeid)->node_zones + z; 682 struct per_cpu_pageset *pset; 683 684 if (!populated_zone(zone)) 685 continue; 686 687 pset = zone_pcp(zone, smp_processor_id()); 688 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 689 struct per_cpu_pages *pcp; 690 691 pcp = &pset->pcp[i]; 692 if (pcp->count) { 693 local_irq_save(flags); 694 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 695 pcp->count = 0; 696 local_irq_restore(flags); 697 } 698 } 699 } 700} 701#endif 702 703#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) 704static void __drain_pages(unsigned int cpu) 705{ 706 unsigned long flags; 707 struct zone *zone; 708 int i; 709 710 for_each_zone(zone) { 711 struct per_cpu_pageset *pset; 712 713 pset = zone_pcp(zone, cpu); 714 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 715 struct per_cpu_pages *pcp; 716 717 pcp = &pset->pcp[i]; 718 local_irq_save(flags); 719 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 720 pcp->count = 0; 721 local_irq_restore(flags); 722 } 723 } 724} 725#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ 726 727#ifdef CONFIG_PM 728 729void mark_free_pages(struct zone *zone) 730{ 731 unsigned long pfn, max_zone_pfn; 732 unsigned long flags; 733 int order; 734 struct list_head *curr; 735 736 if (!zone->spanned_pages) 737 return; 738 739 spin_lock_irqsave(&zone->lock, flags); 740 741 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 742 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 743 if (pfn_valid(pfn)) { 744 struct page *page = pfn_to_page(pfn); 745 746 if (!PageNosave(page)) 747 ClearPageNosaveFree(page); 748 } 749 750 for (order = MAX_ORDER - 1; order >= 0; --order) 751 list_for_each(curr, &zone->free_area[order].free_list) { 752 unsigned long i; 753 754 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 755 for (i = 0; i < (1UL << order); i++) 756 SetPageNosaveFree(pfn_to_page(pfn + i)); 757 } 758 759 spin_unlock_irqrestore(&zone->lock, flags); 760} 761 762/* 763 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 764 */ 765void drain_local_pages(void) 766{ 767 unsigned long flags; 768 769 local_irq_save(flags); 770 __drain_pages(smp_processor_id()); 771 local_irq_restore(flags); 772} 773#endif /* CONFIG_PM */ 774 775/* 776 * Free a 0-order page 777 */ 778static void fastcall free_hot_cold_page(struct page *page, int cold) 779{ 780 struct zone *zone = page_zone(page); 781 struct per_cpu_pages *pcp; 782 unsigned long flags; 783 784 arch_free_page(page, 0); 785 786 if (PageAnon(page)) 787 page->mapping = NULL; 788 if (free_pages_check(page)) 789 return; 790 791 kernel_map_pages(page, 1, 0); 792 793 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 794 local_irq_save(flags); 795 __count_vm_event(PGFREE); 796 list_add(&page->lru, &pcp->list); 797 pcp->count++; 798 if (pcp->count >= pcp->high) { 799 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 800 pcp->count -= pcp->batch; 801 } 802 local_irq_restore(flags); 803 put_cpu(); 804} 805 806void fastcall free_hot_page(struct page *page) 807{ 808 free_hot_cold_page(page, 0); 809} 810 811void fastcall free_cold_page(struct page *page) 812{ 813 free_hot_cold_page(page, 1); 814} 815 816/* 817 * split_page takes a non-compound higher-order page, and splits it into 818 * n (1<<order) sub-pages: page[0..n] 819 * Each sub-page must be freed individually. 820 * 821 * Note: this is probably too low level an operation for use in drivers. 822 * Please consult with lkml before using this in your driver. 823 */ 824void split_page(struct page *page, unsigned int order) 825{ 826 int i; 827 828 VM_BUG_ON(PageCompound(page)); 829 VM_BUG_ON(!page_count(page)); 830 for (i = 1; i < (1 << order); i++) 831 set_page_refcounted(page + i); 832} 833 834/* 835 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 836 * we cheat by calling it from here, in the order > 0 path. Saves a branch 837 * or two. 838 */ 839static struct page *buffered_rmqueue(struct zonelist *zonelist, 840 struct zone *zone, int order, gfp_t gfp_flags) 841{ 842 unsigned long flags; 843 struct page *page; 844 int cold = !!(gfp_flags & __GFP_COLD); 845 int cpu; 846 847again: 848 cpu = get_cpu(); 849 if (likely(order == 0)) { 850 struct per_cpu_pages *pcp; 851 852 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 853 local_irq_save(flags); 854 if (!pcp->count) { 855 pcp->count += rmqueue_bulk(zone, 0, 856 pcp->batch, &pcp->list); 857 if (unlikely(!pcp->count)) 858 goto failed; 859 } 860 page = list_entry(pcp->list.next, struct page, lru); 861 list_del(&page->lru); 862 pcp->count--; 863 } else { 864 spin_lock_irqsave(&zone->lock, flags); 865 page = __rmqueue(zone, order); 866 spin_unlock(&zone->lock); 867 if (!page) 868 goto failed; 869 } 870 871 __count_zone_vm_events(PGALLOC, zone, 1 << order); 872 zone_statistics(zonelist, zone); 873 local_irq_restore(flags); 874 put_cpu(); 875 876 VM_BUG_ON(bad_range(zone, page)); 877 if (prep_new_page(page, order, gfp_flags)) 878 goto again; 879 return page; 880 881failed: 882 local_irq_restore(flags); 883 put_cpu(); 884 return NULL; 885} 886 887#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 888#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 889#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 890#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 891#define ALLOC_HARDER 0x10 /* try to alloc harder */ 892#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 893#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 894 895/* 896 * Return 1 if free pages are above 'mark'. This takes into account the order 897 * of the allocation. 898 */ 899int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 900 int classzone_idx, int alloc_flags) 901{ 902 /* free_pages my go negative - that's OK */ 903 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 904 int o; 905 906 if (alloc_flags & ALLOC_HIGH) 907 min -= min / 2; 908 if (alloc_flags & ALLOC_HARDER) 909 min -= min / 4; 910 911 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 912 return 0; 913 for (o = 0; o < order; o++) { 914 /* At the next order, this order's pages become unavailable */ 915 free_pages -= z->free_area[o].nr_free << o; 916 917 /* Require fewer higher order pages to be free */ 918 min >>= 1; 919 920 if (free_pages <= min) 921 return 0; 922 } 923 return 1; 924} 925 926/* 927 * get_page_from_freeliest goes through the zonelist trying to allocate 928 * a page. 929 */ 930static struct page * 931get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 932 struct zonelist *zonelist, int alloc_flags) 933{ 934 struct zone **z = zonelist->zones; 935 struct page *page = NULL; 936 int classzone_idx = zone_idx(*z); 937 struct zone *zone; 938 939 /* 940 * Go through the zonelist once, looking for a zone with enough free. 941 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 942 */ 943 do { 944 zone = *z; 945 if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && 946 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) 947 break; 948 if ((alloc_flags & ALLOC_CPUSET) && 949 !cpuset_zone_allowed(zone, gfp_mask)) 950 continue; 951 952 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 953 unsigned long mark; 954 if (alloc_flags & ALLOC_WMARK_MIN) 955 mark = zone->pages_min; 956 else if (alloc_flags & ALLOC_WMARK_LOW) 957 mark = zone->pages_low; 958 else 959 mark = zone->pages_high; 960 if (!zone_watermark_ok(zone , order, mark, 961 classzone_idx, alloc_flags)) 962 if (!zone_reclaim_mode || 963 !zone_reclaim(zone, gfp_mask, order)) 964 continue; 965 } 966 967 page = buffered_rmqueue(zonelist, zone, order, gfp_mask); 968 if (page) { 969 break; 970 } 971 } while (*(++z) != NULL); 972 return page; 973} 974 975/* 976 * This is the 'heart' of the zoned buddy allocator. 977 */ 978struct page * fastcall 979__alloc_pages(gfp_t gfp_mask, unsigned int order, 980 struct zonelist *zonelist) 981{ 982 const gfp_t wait = gfp_mask & __GFP_WAIT; 983 struct zone **z; 984 struct page *page; 985 struct reclaim_state reclaim_state; 986 struct task_struct *p = current; 987 int do_retry; 988 int alloc_flags; 989 int did_some_progress; 990 991 might_sleep_if(wait); 992 993restart: 994 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 995 996 if (unlikely(*z == NULL)) { 997 /* Should this ever happen?? */ 998 return NULL; 999 } 1000 1001 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1002 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1003 if (page) 1004 goto got_pg; 1005 1006 do { 1007 wakeup_kswapd(*z, order); 1008 } while (*(++z)); 1009 1010 /* 1011 * OK, we're below the kswapd watermark and have kicked background 1012 * reclaim. Now things get more complex, so set up alloc_flags according 1013 * to how we want to proceed. 1014 * 1015 * The caller may dip into page reserves a bit more if the caller 1016 * cannot run direct reclaim, or if the caller has realtime scheduling 1017 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1018 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1019 */ 1020 alloc_flags = ALLOC_WMARK_MIN; 1021 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 1022 alloc_flags |= ALLOC_HARDER; 1023 if (gfp_mask & __GFP_HIGH) 1024 alloc_flags |= ALLOC_HIGH; 1025 if (wait) 1026 alloc_flags |= ALLOC_CPUSET; 1027 1028 /* 1029 * Go through the zonelist again. Let __GFP_HIGH and allocations 1030 * coming from realtime tasks go deeper into reserves. 1031 * 1032 * This is the last chance, in general, before the goto nopage. 1033 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1034 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1035 */ 1036 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 1037 if (page) 1038 goto got_pg; 1039 1040 /* This allocation should allow future memory freeing. */ 1041 1042 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1043 && !in_interrupt()) { 1044 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1045nofail_alloc: 1046 /* go through the zonelist yet again, ignoring mins */ 1047 page = get_page_from_freelist(gfp_mask, order, 1048 zonelist, ALLOC_NO_WATERMARKS); 1049 if (page) 1050 goto got_pg; 1051 if (gfp_mask & __GFP_NOFAIL) { 1052 blk_congestion_wait(WRITE, HZ/50); 1053 goto nofail_alloc; 1054 } 1055 } 1056 goto nopage; 1057 } 1058 1059 /* Atomic allocations - we can't balance anything */ 1060 if (!wait) 1061 goto nopage; 1062 1063rebalance: 1064 cond_resched(); 1065 1066 /* We now go into synchronous reclaim */ 1067 cpuset_memory_pressure_bump(); 1068 p->flags |= PF_MEMALLOC; 1069 reclaim_state.reclaimed_slab = 0; 1070 p->reclaim_state = &reclaim_state; 1071 1072 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 1073 1074 p->reclaim_state = NULL; 1075 p->flags &= ~PF_MEMALLOC; 1076 1077 cond_resched(); 1078 1079 if (likely(did_some_progress)) { 1080 page = get_page_from_freelist(gfp_mask, order, 1081 zonelist, alloc_flags); 1082 if (page) 1083 goto got_pg; 1084 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1085 /* 1086 * Go through the zonelist yet one more time, keep 1087 * very high watermark here, this is only to catch 1088 * a parallel oom killing, we must fail if we're still 1089 * under heavy pressure. 1090 */ 1091 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1092 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1093 if (page) 1094 goto got_pg; 1095 1096 out_of_memory(zonelist, gfp_mask, order); 1097 goto restart; 1098 } 1099 1100 /* 1101 * Don't let big-order allocations loop unless the caller explicitly 1102 * requests that. Wait for some write requests to complete then retry. 1103 * 1104 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 1105 * <= 3, but that may not be true in other implementations. 1106 */ 1107 do_retry = 0; 1108 if (!(gfp_mask & __GFP_NORETRY)) { 1109 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1110 do_retry = 1; 1111 if (gfp_mask & __GFP_NOFAIL) 1112 do_retry = 1; 1113 } 1114 if (do_retry) { 1115 blk_congestion_wait(WRITE, HZ/50); 1116 goto rebalance; 1117 } 1118 1119nopage: 1120 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1121 printk(KERN_WARNING "%s: page allocation failure." 1122 " order:%d, mode:0x%x\n", 1123 p->comm, order, gfp_mask); 1124 dump_stack(); 1125 show_mem(); 1126 } 1127got_pg: 1128 return page; 1129} 1130 1131EXPORT_SYMBOL(__alloc_pages); 1132 1133/* 1134 * Common helper functions. 1135 */ 1136fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1137{ 1138 struct page * page; 1139 page = alloc_pages(gfp_mask, order); 1140 if (!page) 1141 return 0; 1142 return (unsigned long) page_address(page); 1143} 1144 1145EXPORT_SYMBOL(__get_free_pages); 1146 1147fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1148{ 1149 struct page * page; 1150 1151 /* 1152 * get_zeroed_page() returns a 32-bit address, which cannot represent 1153 * a highmem page 1154 */ 1155 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1156 1157 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1158 if (page) 1159 return (unsigned long) page_address(page); 1160 return 0; 1161} 1162 1163EXPORT_SYMBOL(get_zeroed_page); 1164 1165void __pagevec_free(struct pagevec *pvec) 1166{ 1167 int i = pagevec_count(pvec); 1168 1169 while (--i >= 0) 1170 free_hot_cold_page(pvec->pages[i], pvec->cold); 1171} 1172 1173fastcall void __free_pages(struct page *page, unsigned int order) 1174{ 1175 if (put_page_testzero(page)) { 1176 if (order == 0) 1177 free_hot_page(page); 1178 else 1179 __free_pages_ok(page, order); 1180 } 1181} 1182 1183EXPORT_SYMBOL(__free_pages); 1184 1185fastcall void free_pages(unsigned long addr, unsigned int order) 1186{ 1187 if (addr != 0) { 1188 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1189 __free_pages(virt_to_page((void *)addr), order); 1190 } 1191} 1192 1193EXPORT_SYMBOL(free_pages); 1194 1195/* 1196 * Total amount of free (allocatable) RAM: 1197 */ 1198unsigned int nr_free_pages(void) 1199{ 1200 unsigned int sum = 0; 1201 struct zone *zone; 1202 1203 for_each_zone(zone) 1204 sum += zone->free_pages; 1205 1206 return sum; 1207} 1208 1209EXPORT_SYMBOL(nr_free_pages); 1210 1211#ifdef CONFIG_NUMA 1212unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1213{ 1214 unsigned int sum = 0; 1215 enum zone_type i; 1216 1217 for (i = 0; i < MAX_NR_ZONES; i++) 1218 sum += pgdat->node_zones[i].free_pages; 1219 1220 return sum; 1221} 1222#endif 1223 1224static unsigned int nr_free_zone_pages(int offset) 1225{ 1226 /* Just pick one node, since fallback list is circular */ 1227 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1228 unsigned int sum = 0; 1229 1230 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1231 struct zone **zonep = zonelist->zones; 1232 struct zone *zone; 1233 1234 for (zone = *zonep++; zone; zone = *zonep++) { 1235 unsigned long size = zone->present_pages; 1236 unsigned long high = zone->pages_high; 1237 if (size > high) 1238 sum += size - high; 1239 } 1240 1241 return sum; 1242} 1243 1244/* 1245 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1246 */ 1247unsigned int nr_free_buffer_pages(void) 1248{ 1249 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1250} 1251 1252/* 1253 * Amount of free RAM allocatable within all zones 1254 */ 1255unsigned int nr_free_pagecache_pages(void) 1256{ 1257 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1258} 1259 1260static inline void show_node(struct zone *zone) 1261{ 1262 if (NUMA_BUILD) 1263 printk("Node %ld ", zone_to_nid(zone)); 1264} 1265 1266void si_meminfo(struct sysinfo *val) 1267{ 1268 val->totalram = totalram_pages; 1269 val->sharedram = 0; 1270 val->freeram = nr_free_pages(); 1271 val->bufferram = nr_blockdev_pages(); 1272 val->totalhigh = totalhigh_pages; 1273 val->freehigh = nr_free_highpages(); 1274 val->mem_unit = PAGE_SIZE; 1275} 1276 1277EXPORT_SYMBOL(si_meminfo); 1278 1279#ifdef CONFIG_NUMA 1280void si_meminfo_node(struct sysinfo *val, int nid) 1281{ 1282 pg_data_t *pgdat = NODE_DATA(nid); 1283 1284 val->totalram = pgdat->node_present_pages; 1285 val->freeram = nr_free_pages_pgdat(pgdat); 1286#ifdef CONFIG_HIGHMEM 1287 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1288 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1289#else 1290 val->totalhigh = 0; 1291 val->freehigh = 0; 1292#endif 1293 val->mem_unit = PAGE_SIZE; 1294} 1295#endif 1296 1297#define K(x) ((x) << (PAGE_SHIFT-10)) 1298 1299/* 1300 * Show free area list (used inside shift_scroll-lock stuff) 1301 * We also calculate the percentage fragmentation. We do this by counting the 1302 * memory on each free list with the exception of the first item on the list. 1303 */ 1304void show_free_areas(void) 1305{ 1306 int cpu; 1307 unsigned long active; 1308 unsigned long inactive; 1309 unsigned long free; 1310 struct zone *zone; 1311 1312 for_each_zone(zone) { 1313 if (!populated_zone(zone)) 1314 continue; 1315 1316 show_node(zone); 1317 printk("%s per-cpu:\n", zone->name); 1318 1319 for_each_online_cpu(cpu) { 1320 struct per_cpu_pageset *pageset; 1321 1322 pageset = zone_pcp(zone, cpu); 1323 1324 printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d " 1325 "Cold: hi:%5d, btch:%4d usd:%4d\n", 1326 cpu, pageset->pcp[0].high, 1327 pageset->pcp[0].batch, pageset->pcp[0].count, 1328 pageset->pcp[1].high, pageset->pcp[1].batch, 1329 pageset->pcp[1].count); 1330 } 1331 } 1332 1333 get_zone_counts(&active, &inactive, &free); 1334 1335 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " 1336 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", 1337 active, 1338 inactive, 1339 global_page_state(NR_FILE_DIRTY), 1340 global_page_state(NR_WRITEBACK), 1341 global_page_state(NR_UNSTABLE_NFS), 1342 nr_free_pages(), 1343 global_page_state(NR_SLAB_RECLAIMABLE) + 1344 global_page_state(NR_SLAB_UNRECLAIMABLE), 1345 global_page_state(NR_FILE_MAPPED), 1346 global_page_state(NR_PAGETABLE)); 1347 1348 for_each_zone(zone) { 1349 int i; 1350 1351 if (!populated_zone(zone)) 1352 continue; 1353 1354 show_node(zone); 1355 printk("%s" 1356 " free:%lukB" 1357 " min:%lukB" 1358 " low:%lukB" 1359 " high:%lukB" 1360 " active:%lukB" 1361 " inactive:%lukB" 1362 " present:%lukB" 1363 " pages_scanned:%lu" 1364 " all_unreclaimable? %s" 1365 "\n", 1366 zone->name, 1367 K(zone->free_pages), 1368 K(zone->pages_min), 1369 K(zone->pages_low), 1370 K(zone->pages_high), 1371 K(zone->nr_active), 1372 K(zone->nr_inactive), 1373 K(zone->present_pages), 1374 zone->pages_scanned, 1375 (zone->all_unreclaimable ? "yes" : "no") 1376 ); 1377 printk("lowmem_reserve[]:"); 1378 for (i = 0; i < MAX_NR_ZONES; i++) 1379 printk(" %lu", zone->lowmem_reserve[i]); 1380 printk("\n"); 1381 } 1382 1383 for_each_zone(zone) { 1384 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1385 1386 if (!populated_zone(zone)) 1387 continue; 1388 1389 show_node(zone); 1390 printk("%s: ", zone->name); 1391 1392 spin_lock_irqsave(&zone->lock, flags); 1393 for (order = 0; order < MAX_ORDER; order++) { 1394 nr[order] = zone->free_area[order].nr_free; 1395 total += nr[order] << order; 1396 } 1397 spin_unlock_irqrestore(&zone->lock, flags); 1398 for (order = 0; order < MAX_ORDER; order++) 1399 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1400 printk("= %lukB\n", K(total)); 1401 } 1402 1403 show_swap_cache_info(); 1404} 1405 1406/* 1407 * Builds allocation fallback zone lists. 1408 * 1409 * Add all populated zones of a node to the zonelist. 1410 */ 1411static int __meminit build_zonelists_node(pg_data_t *pgdat, 1412 struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) 1413{ 1414 struct zone *zone; 1415 1416 BUG_ON(zone_type >= MAX_NR_ZONES); 1417 zone_type++; 1418 1419 do { 1420 zone_type--; 1421 zone = pgdat->node_zones + zone_type; 1422 if (populated_zone(zone)) { 1423 zonelist->zones[nr_zones++] = zone; 1424 check_highest_zone(zone_type); 1425 } 1426 1427 } while (zone_type); 1428 return nr_zones; 1429} 1430 1431#ifdef CONFIG_NUMA 1432#define MAX_NODE_LOAD (num_online_nodes()) 1433static int __meminitdata node_load[MAX_NUMNODES]; 1434/** 1435 * find_next_best_node - find the next node that should appear in a given node's fallback list 1436 * @node: node whose fallback list we're appending 1437 * @used_node_mask: nodemask_t of already used nodes 1438 * 1439 * We use a number of factors to determine which is the next node that should 1440 * appear on a given node's fallback list. The node should not have appeared 1441 * already in @node's fallback list, and it should be the next closest node 1442 * according to the distance array (which contains arbitrary distance values 1443 * from each node to each node in the system), and should also prefer nodes 1444 * with no CPUs, since presumably they'll have very little allocation pressure 1445 * on them otherwise. 1446 * It returns -1 if no node is found. 1447 */ 1448static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) 1449{ 1450 int n, val; 1451 int min_val = INT_MAX; 1452 int best_node = -1; 1453 1454 /* Use the local node if we haven't already */ 1455 if (!node_isset(node, *used_node_mask)) { 1456 node_set(node, *used_node_mask); 1457 return node; 1458 } 1459 1460 for_each_online_node(n) { 1461 cpumask_t tmp; 1462 1463 /* Don't want a node to appear more than once */ 1464 if (node_isset(n, *used_node_mask)) 1465 continue; 1466 1467 /* Use the distance array to find the distance */ 1468 val = node_distance(node, n); 1469 1470 /* Penalize nodes under us ("prefer the next node") */ 1471 val += (n < node); 1472 1473 /* Give preference to headless and unused nodes */ 1474 tmp = node_to_cpumask(n); 1475 if (!cpus_empty(tmp)) 1476 val += PENALTY_FOR_NODE_WITH_CPUS; 1477 1478 /* Slight preference for less loaded node */ 1479 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1480 val += node_load[n]; 1481 1482 if (val < min_val) { 1483 min_val = val; 1484 best_node = n; 1485 } 1486 } 1487 1488 if (best_node >= 0) 1489 node_set(best_node, *used_node_mask); 1490 1491 return best_node; 1492} 1493 1494static void __meminit build_zonelists(pg_data_t *pgdat) 1495{ 1496 int j, node, local_node; 1497 enum zone_type i; 1498 int prev_node, load; 1499 struct zonelist *zonelist; 1500 nodemask_t used_mask; 1501 1502 /* initialize zonelists */ 1503 for (i = 0; i < MAX_NR_ZONES; i++) { 1504 zonelist = pgdat->node_zonelists + i; 1505 zonelist->zones[0] = NULL; 1506 } 1507 1508 /* NUMA-aware ordering of nodes */ 1509 local_node = pgdat->node_id; 1510 load = num_online_nodes(); 1511 prev_node = local_node; 1512 nodes_clear(used_mask); 1513 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1514 int distance = node_distance(local_node, node); 1515 1516 /* 1517 * If another node is sufficiently far away then it is better 1518 * to reclaim pages in a zone before going off node. 1519 */ 1520 if (distance > RECLAIM_DISTANCE) 1521 zone_reclaim_mode = 1; 1522 1523 /* 1524 * We don't want to pressure a particular node. 1525 * So adding penalty to the first node in same 1526 * distance group to make it round-robin. 1527 */ 1528 1529 if (distance != node_distance(local_node, prev_node)) 1530 node_load[node] += load; 1531 prev_node = node; 1532 load--; 1533 for (i = 0; i < MAX_NR_ZONES; i++) { 1534 zonelist = pgdat->node_zonelists + i; 1535 for (j = 0; zonelist->zones[j] != NULL; j++); 1536 1537 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1538 zonelist->zones[j] = NULL; 1539 } 1540 } 1541} 1542 1543#else /* CONFIG_NUMA */ 1544 1545static void __meminit build_zonelists(pg_data_t *pgdat) 1546{ 1547 int node, local_node; 1548 enum zone_type i,j; 1549 1550 local_node = pgdat->node_id; 1551 for (i = 0; i < MAX_NR_ZONES; i++) { 1552 struct zonelist *zonelist; 1553 1554 zonelist = pgdat->node_zonelists + i; 1555 1556 j = build_zonelists_node(pgdat, zonelist, 0, i); 1557 /* 1558 * Now we build the zonelist so that it contains the zones 1559 * of all the other nodes. 1560 * We don't want to pressure a particular node, so when 1561 * building the zones for node N, we make sure that the 1562 * zones coming right after the local ones are those from 1563 * node N+1 (modulo N) 1564 */ 1565 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1566 if (!node_online(node)) 1567 continue; 1568 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1569 } 1570 for (node = 0; node < local_node; node++) { 1571 if (!node_online(node)) 1572 continue; 1573 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1574 } 1575 1576 zonelist->zones[j] = NULL; 1577 } 1578} 1579 1580#endif /* CONFIG_NUMA */ 1581 1582/* return values int ....just for stop_machine_run() */ 1583static int __meminit __build_all_zonelists(void *dummy) 1584{ 1585 int nid; 1586 for_each_online_node(nid) 1587 build_zonelists(NODE_DATA(nid)); 1588 return 0; 1589} 1590 1591void __meminit build_all_zonelists(void) 1592{ 1593 if (system_state == SYSTEM_BOOTING) { 1594 __build_all_zonelists(0); 1595 cpuset_init_current_mems_allowed(); 1596 } else { 1597 /* we have to stop all cpus to guaranntee there is no user 1598 of zonelist */ 1599 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 1600 /* cpuset refresh routine should be here */ 1601 } 1602 vm_total_pages = nr_free_pagecache_pages(); 1603 printk("Built %i zonelists. Total pages: %ld\n", 1604 num_online_nodes(), vm_total_pages); 1605} 1606 1607/* 1608 * Helper functions to size the waitqueue hash table. 1609 * Essentially these want to choose hash table sizes sufficiently 1610 * large so that collisions trying to wait on pages are rare. 1611 * But in fact, the number of active page waitqueues on typical 1612 * systems is ridiculously low, less than 200. So this is even 1613 * conservative, even though it seems large. 1614 * 1615 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1616 * waitqueues, i.e. the size of the waitq table given the number of pages. 1617 */ 1618#define PAGES_PER_WAITQUEUE 256 1619 1620#ifndef CONFIG_MEMORY_HOTPLUG 1621static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1622{ 1623 unsigned long size = 1; 1624 1625 pages /= PAGES_PER_WAITQUEUE; 1626 1627 while (size < pages) 1628 size <<= 1; 1629 1630 /* 1631 * Once we have dozens or even hundreds of threads sleeping 1632 * on IO we've got bigger problems than wait queue collision. 1633 * Limit the size of the wait table to a reasonable size. 1634 */ 1635 size = min(size, 4096UL); 1636 1637 return max(size, 4UL); 1638} 1639#else 1640/* 1641 * A zone's size might be changed by hot-add, so it is not possible to determine 1642 * a suitable size for its wait_table. So we use the maximum size now. 1643 * 1644 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 1645 * 1646 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 1647 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 1648 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 1649 * 1650 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 1651 * or more by the traditional way. (See above). It equals: 1652 * 1653 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 1654 * ia64(16K page size) : = ( 8G + 4M)byte. 1655 * powerpc (64K page size) : = (32G +16M)byte. 1656 */ 1657static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1658{ 1659 return 4096UL; 1660} 1661#endif 1662 1663/* 1664 * This is an integer logarithm so that shifts can be used later 1665 * to extract the more random high bits from the multiplicative 1666 * hash function before the remainder is taken. 1667 */ 1668static inline unsigned long wait_table_bits(unsigned long size) 1669{ 1670 return ffz(~size); 1671} 1672 1673#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1674 1675/* 1676 * Initially all pages are reserved - free ones are freed 1677 * up by free_all_bootmem() once the early boot process is 1678 * done. Non-atomic initialization, single-pass. 1679 */ 1680void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1681 unsigned long start_pfn) 1682{ 1683 struct page *page; 1684 unsigned long end_pfn = start_pfn + size; 1685 unsigned long pfn; 1686 1687 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1688 if (!early_pfn_valid(pfn)) 1689 continue; 1690 page = pfn_to_page(pfn); 1691 set_page_links(page, zone, nid, pfn); 1692 init_page_count(page); 1693 reset_page_mapcount(page); 1694 SetPageReserved(page); 1695 INIT_LIST_HEAD(&page->lru); 1696#ifdef WANT_PAGE_VIRTUAL 1697 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1698 if (!is_highmem_idx(zone)) 1699 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1700#endif 1701 } 1702} 1703 1704void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1705 unsigned long size) 1706{ 1707 int order; 1708 for (order = 0; order < MAX_ORDER ; order++) { 1709 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1710 zone->free_area[order].nr_free = 0; 1711 } 1712} 1713 1714#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) 1715void zonetable_add(struct zone *zone, int nid, enum zone_type zid, 1716 unsigned long pfn, unsigned long size) 1717{ 1718 unsigned long snum = pfn_to_section_nr(pfn); 1719 unsigned long end = pfn_to_section_nr(pfn + size); 1720 1721 if (FLAGS_HAS_NODE) 1722 zone_table[ZONETABLE_INDEX(nid, zid)] = zone; 1723 else 1724 for (; snum <= end; snum++) 1725 zone_table[ZONETABLE_INDEX(snum, zid)] = zone; 1726} 1727 1728#ifndef __HAVE_ARCH_MEMMAP_INIT 1729#define memmap_init(size, nid, zone, start_pfn) \ 1730 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1731#endif 1732 1733static int __cpuinit zone_batchsize(struct zone *zone) 1734{ 1735 int batch; 1736 1737 /* 1738 * The per-cpu-pages pools are set to around 1000th of the 1739 * size of the zone. But no more than 1/2 of a meg. 1740 * 1741 * OK, so we don't know how big the cache is. So guess. 1742 */ 1743 batch = zone->present_pages / 1024; 1744 if (batch * PAGE_SIZE > 512 * 1024) 1745 batch = (512 * 1024) / PAGE_SIZE; 1746 batch /= 4; /* We effectively *= 4 below */ 1747 if (batch < 1) 1748 batch = 1; 1749 1750 /* 1751 * Clamp the batch to a 2^n - 1 value. Having a power 1752 * of 2 value was found to be more likely to have 1753 * suboptimal cache aliasing properties in some cases. 1754 * 1755 * For example if 2 tasks are alternately allocating 1756 * batches of pages, one task can end up with a lot 1757 * of pages of one half of the possible page colors 1758 * and the other with pages of the other colors. 1759 */ 1760 batch = (1 << (fls(batch + batch/2)-1)) - 1; 1761 1762 return batch; 1763} 1764 1765inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 1766{ 1767 struct per_cpu_pages *pcp; 1768 1769 memset(p, 0, sizeof(*p)); 1770 1771 pcp = &p->pcp[0]; /* hot */ 1772 pcp->count = 0; 1773 pcp->high = 6 * batch; 1774 pcp->batch = max(1UL, 1 * batch); 1775 INIT_LIST_HEAD(&pcp->list); 1776 1777 pcp = &p->pcp[1]; /* cold*/ 1778 pcp->count = 0; 1779 pcp->high = 2 * batch; 1780 pcp->batch = max(1UL, batch/2); 1781 INIT_LIST_HEAD(&pcp->list); 1782} 1783 1784/* 1785 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 1786 * to the value high for the pageset p. 1787 */ 1788 1789static void setup_pagelist_highmark(struct per_cpu_pageset *p, 1790 unsigned long high) 1791{ 1792 struct per_cpu_pages *pcp; 1793 1794 pcp = &p->pcp[0]; /* hot list */ 1795 pcp->high = high; 1796 pcp->batch = max(1UL, high/4); 1797 if ((high/4) > (PAGE_SHIFT * 8)) 1798 pcp->batch = PAGE_SHIFT * 8; 1799} 1800 1801 1802#ifdef CONFIG_NUMA 1803/* 1804 * Boot pageset table. One per cpu which is going to be used for all 1805 * zones and all nodes. The parameters will be set in such a way 1806 * that an item put on a list will immediately be handed over to 1807 * the buddy list. This is safe since pageset manipulation is done 1808 * with interrupts disabled. 1809 * 1810 * Some NUMA counter updates may also be caught by the boot pagesets. 1811 * 1812 * The boot_pagesets must be kept even after bootup is complete for 1813 * unused processors and/or zones. They do play a role for bootstrapping 1814 * hotplugged processors. 1815 * 1816 * zoneinfo_show() and maybe other functions do 1817 * not check if the processor is online before following the pageset pointer. 1818 * Other parts of the kernel may not check if the zone is available. 1819 */ 1820static struct per_cpu_pageset boot_pageset[NR_CPUS]; 1821 1822/* 1823 * Dynamically allocate memory for the 1824 * per cpu pageset array in struct zone. 1825 */ 1826static int __cpuinit process_zones(int cpu) 1827{ 1828 struct zone *zone, *dzone; 1829 1830 for_each_zone(zone) { 1831 1832 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 1833 GFP_KERNEL, cpu_to_node(cpu)); 1834 if (!zone_pcp(zone, cpu)) 1835 goto bad; 1836 1837 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 1838 1839 if (percpu_pagelist_fraction) 1840 setup_pagelist_highmark(zone_pcp(zone, cpu), 1841 (zone->present_pages / percpu_pagelist_fraction)); 1842 } 1843 1844 return 0; 1845bad: 1846 for_each_zone(dzone) { 1847 if (dzone == zone) 1848 break; 1849 kfree(zone_pcp(dzone, cpu)); 1850 zone_pcp(dzone, cpu) = NULL; 1851 } 1852 return -ENOMEM; 1853} 1854 1855static inline void free_zone_pagesets(int cpu) 1856{ 1857 struct zone *zone; 1858 1859 for_each_zone(zone) { 1860 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 1861 1862 /* Free per_cpu_pageset if it is slab allocated */ 1863 if (pset != &boot_pageset[cpu]) 1864 kfree(pset); 1865 zone_pcp(zone, cpu) = NULL; 1866 } 1867} 1868 1869static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 1870 unsigned long action, 1871 void *hcpu) 1872{ 1873 int cpu = (long)hcpu; 1874 int ret = NOTIFY_OK; 1875 1876 switch (action) { 1877 case CPU_UP_PREPARE: 1878 if (process_zones(cpu)) 1879 ret = NOTIFY_BAD; 1880 break; 1881 case CPU_UP_CANCELED: 1882 case CPU_DEAD: 1883 free_zone_pagesets(cpu); 1884 break; 1885 default: 1886 break; 1887 } 1888 return ret; 1889} 1890 1891static struct notifier_block __cpuinitdata pageset_notifier = 1892 { &pageset_cpuup_callback, NULL, 0 }; 1893 1894void __init setup_per_cpu_pageset(void) 1895{ 1896 int err; 1897 1898 /* Initialize per_cpu_pageset for cpu 0. 1899 * A cpuup callback will do this for every cpu 1900 * as it comes online 1901 */ 1902 err = process_zones(smp_processor_id()); 1903 BUG_ON(err); 1904 register_cpu_notifier(&pageset_notifier); 1905} 1906 1907#endif 1908 1909static __meminit 1910int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1911{ 1912 int i; 1913 struct pglist_data *pgdat = zone->zone_pgdat; 1914 size_t alloc_size; 1915 1916 /* 1917 * The per-page waitqueue mechanism uses hashed waitqueues 1918 * per zone. 1919 */ 1920 zone->wait_table_hash_nr_entries = 1921 wait_table_hash_nr_entries(zone_size_pages); 1922 zone->wait_table_bits = 1923 wait_table_bits(zone->wait_table_hash_nr_entries); 1924 alloc_size = zone->wait_table_hash_nr_entries 1925 * sizeof(wait_queue_head_t); 1926 1927 if (system_state == SYSTEM_BOOTING) { 1928 zone->wait_table = (wait_queue_head_t *) 1929 alloc_bootmem_node(pgdat, alloc_size); 1930 } else { 1931 /* 1932 * This case means that a zone whose size was 0 gets new memory 1933 * via memory hot-add. 1934 * But it may be the case that a new node was hot-added. In 1935 * this case vmalloc() will not be able to use this new node's 1936 * memory - this wait_table must be initialized to use this new 1937 * node itself as well. 1938 * To use this new node's memory, further consideration will be 1939 * necessary. 1940 */ 1941 zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); 1942 } 1943 if (!zone->wait_table) 1944 return -ENOMEM; 1945 1946 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 1947 init_waitqueue_head(zone->wait_table + i); 1948 1949 return 0; 1950} 1951 1952static __meminit void zone_pcp_init(struct zone *zone) 1953{ 1954 int cpu; 1955 unsigned long batch = zone_batchsize(zone); 1956 1957 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1958#ifdef CONFIG_NUMA 1959 /* Early boot. Slab allocator not functional yet */ 1960 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 1961 setup_pageset(&boot_pageset[cpu],0); 1962#else 1963 setup_pageset(zone_pcp(zone,cpu), batch); 1964#endif 1965 } 1966 if (zone->present_pages) 1967 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 1968 zone->name, zone->present_pages, batch); 1969} 1970 1971__meminit int init_currently_empty_zone(struct zone *zone, 1972 unsigned long zone_start_pfn, 1973 unsigned long size) 1974{ 1975 struct pglist_data *pgdat = zone->zone_pgdat; 1976 int ret; 1977 ret = zone_wait_table_init(zone, size); 1978 if (ret) 1979 return ret; 1980 pgdat->nr_zones = zone_idx(zone) + 1; 1981 1982 zone->zone_start_pfn = zone_start_pfn; 1983 1984 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 1985 1986 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 1987 1988 return 0; 1989} 1990 1991#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 1992/* 1993 * Basic iterator support. Return the first range of PFNs for a node 1994 * Note: nid == MAX_NUMNODES returns first region regardless of node 1995 */ 1996static int __init first_active_region_index_in_nid(int nid) 1997{ 1998 int i; 1999 2000 for (i = 0; i < nr_nodemap_entries; i++) 2001 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 2002 return i; 2003 2004 return -1; 2005} 2006 2007/* 2008 * Basic iterator support. Return the next active range of PFNs for a node 2009 * Note: nid == MAX_NUMNODES returns next region regardles of node 2010 */ 2011static int __init next_active_region_index_in_nid(int index, int nid) 2012{ 2013 for (index = index + 1; index < nr_nodemap_entries; index++) 2014 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2015 return index; 2016 2017 return -1; 2018} 2019 2020#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 2021/* 2022 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 2023 * Architectures may implement their own version but if add_active_range() 2024 * was used and there are no special requirements, this is a convenient 2025 * alternative 2026 */ 2027int __init early_pfn_to_nid(unsigned long pfn) 2028{ 2029 int i; 2030 2031 for (i = 0; i < nr_nodemap_entries; i++) { 2032 unsigned long start_pfn = early_node_map[i].start_pfn; 2033 unsigned long end_pfn = early_node_map[i].end_pfn; 2034 2035 if (start_pfn <= pfn && pfn < end_pfn) 2036 return early_node_map[i].nid; 2037 } 2038 2039 return 0; 2040} 2041#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 2042 2043/* Basic iterator support to walk early_node_map[] */ 2044#define for_each_active_range_index_in_nid(i, nid) \ 2045 for (i = first_active_region_index_in_nid(nid); i != -1; \ 2046 i = next_active_region_index_in_nid(i, nid)) 2047 2048/** 2049 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2050 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed 2051 * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node 2052 * 2053 * If an architecture guarantees that all ranges registered with 2054 * add_active_ranges() contain no holes and may be freed, this 2055 * this function may be used instead of calling free_bootmem() manually. 2056 */ 2057void __init free_bootmem_with_active_regions(int nid, 2058 unsigned long max_low_pfn) 2059{ 2060 int i; 2061 2062 for_each_active_range_index_in_nid(i, nid) { 2063 unsigned long size_pages = 0; 2064 unsigned long end_pfn = early_node_map[i].end_pfn; 2065 2066 if (early_node_map[i].start_pfn >= max_low_pfn) 2067 continue; 2068 2069 if (end_pfn > max_low_pfn) 2070 end_pfn = max_low_pfn; 2071 2072 size_pages = end_pfn - early_node_map[i].start_pfn; 2073 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 2074 PFN_PHYS(early_node_map[i].start_pfn), 2075 size_pages << PAGE_SHIFT); 2076 } 2077} 2078 2079/** 2080 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2081 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used 2082 * 2083 * If an architecture guarantees that all ranges registered with 2084 * add_active_ranges() contain no holes and may be freed, this 2085 * this function may be used instead of calling memory_present() manually. 2086 */ 2087void __init sparse_memory_present_with_active_regions(int nid) 2088{ 2089 int i; 2090 2091 for_each_active_range_index_in_nid(i, nid) 2092 memory_present(early_node_map[i].nid, 2093 early_node_map[i].start_pfn, 2094 early_node_map[i].end_pfn); 2095} 2096 2097/** 2098 * push_node_boundaries - Push node boundaries to at least the requested boundary 2099 * @nid: The nid of the node to push the boundary for 2100 * @start_pfn: The start pfn of the node 2101 * @end_pfn: The end pfn of the node 2102 * 2103 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd 2104 * time. Specifically, on x86_64, SRAT will report ranges that can potentially 2105 * be hotplugged even though no physical memory exists. This function allows 2106 * an arch to push out the node boundaries so mem_map is allocated that can 2107 * be used later. 2108 */ 2109#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2110void __init push_node_boundaries(unsigned int nid, 2111 unsigned long start_pfn, unsigned long end_pfn) 2112{ 2113 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", 2114 nid, start_pfn, end_pfn); 2115 2116 /* Initialise the boundary for this node if necessary */ 2117 if (node_boundary_end_pfn[nid] == 0) 2118 node_boundary_start_pfn[nid] = -1UL; 2119 2120 /* Update the boundaries */ 2121 if (node_boundary_start_pfn[nid] > start_pfn) 2122 node_boundary_start_pfn[nid] = start_pfn; 2123 if (node_boundary_end_pfn[nid] < end_pfn) 2124 node_boundary_end_pfn[nid] = end_pfn; 2125} 2126 2127/* If necessary, push the node boundary out for reserve hotadd */ 2128static void __init account_node_boundary(unsigned int nid, 2129 unsigned long *start_pfn, unsigned long *end_pfn) 2130{ 2131 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", 2132 nid, *start_pfn, *end_pfn); 2133 2134 /* Return if boundary information has not been provided */ 2135 if (node_boundary_end_pfn[nid] == 0) 2136 return; 2137 2138 /* Check the boundaries and update if necessary */ 2139 if (node_boundary_start_pfn[nid] < *start_pfn) 2140 *start_pfn = node_boundary_start_pfn[nid]; 2141 if (node_boundary_end_pfn[nid] > *end_pfn) 2142 *end_pfn = node_boundary_end_pfn[nid]; 2143} 2144#else 2145void __init push_node_boundaries(unsigned int nid, 2146 unsigned long start_pfn, unsigned long end_pfn) {} 2147 2148static void __init account_node_boundary(unsigned int nid, 2149 unsigned long *start_pfn, unsigned long *end_pfn) {} 2150#endif 2151 2152 2153/** 2154 * get_pfn_range_for_nid - Return the start and end page frames for a node 2155 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned 2156 * @start_pfn: Passed by reference. On return, it will have the node start_pfn 2157 * @end_pfn: Passed by reference. On return, it will have the node end_pfn 2158 * 2159 * It returns the start and end page frame of a node based on information 2160 * provided by an arch calling add_active_range(). If called for a node 2161 * with no available memory, a warning is printed and the start and end 2162 * PFNs will be 0 2163 */ 2164void __init get_pfn_range_for_nid(unsigned int nid, 2165 unsigned long *start_pfn, unsigned long *end_pfn) 2166{ 2167 int i; 2168 *start_pfn = -1UL; 2169 *end_pfn = 0; 2170 2171 for_each_active_range_index_in_nid(i, nid) { 2172 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 2173 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 2174 } 2175 2176 if (*start_pfn == -1UL) { 2177 printk(KERN_WARNING "Node %u active with no memory\n", nid); 2178 *start_pfn = 0; 2179 } 2180 2181 /* Push the node boundaries out if requested */ 2182 account_node_boundary(nid, start_pfn, end_pfn); 2183} 2184 2185/* 2186 * Return the number of pages a zone spans in a node, including holes 2187 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 2188 */ 2189unsigned long __init zone_spanned_pages_in_node(int nid, 2190 unsigned long zone_type, 2191 unsigned long *ignored) 2192{ 2193 unsigned long node_start_pfn, node_end_pfn; 2194 unsigned long zone_start_pfn, zone_end_pfn; 2195 2196 /* Get the start and end of the node and zone */ 2197 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2198 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 2199 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 2200 2201 /* Check that this node has pages within the zone's required range */ 2202 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 2203 return 0; 2204 2205 /* Move the zone boundaries inside the node if necessary */ 2206 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 2207 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 2208 2209 /* Return the spanned pages */ 2210 return zone_end_pfn - zone_start_pfn; 2211} 2212 2213/* 2214 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 2215 * then all holes in the requested range will be accounted for 2216 */ 2217unsigned long __init __absent_pages_in_range(int nid, 2218 unsigned long range_start_pfn, 2219 unsigned long range_end_pfn) 2220{ 2221 int i = 0; 2222 unsigned long prev_end_pfn = 0, hole_pages = 0; 2223 unsigned long start_pfn; 2224 2225 /* Find the end_pfn of the first active range of pfns in the node */ 2226 i = first_active_region_index_in_nid(nid); 2227 if (i == -1) 2228 return 0; 2229 2230 /* Account for ranges before physical memory on this node */ 2231 if (early_node_map[i].start_pfn > range_start_pfn) 2232 hole_pages = early_node_map[i].start_pfn - range_start_pfn; 2233 2234 prev_end_pfn = early_node_map[i].start_pfn; 2235 2236 /* Find all holes for the zone within the node */ 2237 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 2238 2239 /* No need to continue if prev_end_pfn is outside the zone */ 2240 if (prev_end_pfn >= range_end_pfn) 2241 break; 2242 2243 /* Make sure the end of the zone is not within the hole */ 2244 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 2245 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 2246 2247 /* Update the hole size cound and move on */ 2248 if (start_pfn > range_start_pfn) { 2249 BUG_ON(prev_end_pfn > start_pfn); 2250 hole_pages += start_pfn - prev_end_pfn; 2251 } 2252 prev_end_pfn = early_node_map[i].end_pfn; 2253 } 2254 2255 /* Account for ranges past physical memory on this node */ 2256 if (range_end_pfn > prev_end_pfn) 2257 hole_pages = range_end_pfn - 2258 max(range_start_pfn, prev_end_pfn); 2259 2260 return hole_pages; 2261} 2262 2263/** 2264 * absent_pages_in_range - Return number of page frames in holes within a range 2265 * @start_pfn: The start PFN to start searching for holes 2266 * @end_pfn: The end PFN to stop searching for holes 2267 * 2268 * It returns the number of pages frames in memory holes within a range 2269 */ 2270unsigned long __init absent_pages_in_range(unsigned long start_pfn, 2271 unsigned long end_pfn) 2272{ 2273 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 2274} 2275 2276/* Return the number of page frames in holes in a zone on a node */ 2277unsigned long __init zone_absent_pages_in_node(int nid, 2278 unsigned long zone_type, 2279 unsigned long *ignored) 2280{ 2281 unsigned long node_start_pfn, node_end_pfn; 2282 unsigned long zone_start_pfn, zone_end_pfn; 2283 2284 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2285 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 2286 node_start_pfn); 2287 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 2288 node_end_pfn); 2289 2290 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 2291} 2292 2293/* Return the zone index a PFN is in */ 2294int memmap_zone_idx(struct page *lmem_map) 2295{ 2296 int i; 2297 unsigned long phys_addr = virt_to_phys(lmem_map); 2298 unsigned long pfn = phys_addr >> PAGE_SHIFT; 2299 2300 for (i = 0; i < MAX_NR_ZONES; i++) 2301 if (pfn < arch_zone_highest_possible_pfn[i]) 2302 break; 2303 2304 return i; 2305} 2306#else 2307static inline unsigned long zone_spanned_pages_in_node(int nid, 2308 unsigned long zone_type, 2309 unsigned long *zones_size) 2310{ 2311 return zones_size[zone_type]; 2312} 2313 2314static inline unsigned long zone_absent_pages_in_node(int nid, 2315 unsigned long zone_type, 2316 unsigned long *zholes_size) 2317{ 2318 if (!zholes_size) 2319 return 0; 2320 2321 return zholes_size[zone_type]; 2322} 2323 2324static inline int memmap_zone_idx(struct page *lmem_map) 2325{ 2326 return MAX_NR_ZONES; 2327} 2328#endif 2329 2330static void __init calculate_node_totalpages(struct pglist_data *pgdat, 2331 unsigned long *zones_size, unsigned long *zholes_size) 2332{ 2333 unsigned long realtotalpages, totalpages = 0; 2334 enum zone_type i; 2335 2336 for (i = 0; i < MAX_NR_ZONES; i++) 2337 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 2338 zones_size); 2339 pgdat->node_spanned_pages = totalpages; 2340 2341 realtotalpages = totalpages; 2342 for (i = 0; i < MAX_NR_ZONES; i++) 2343 realtotalpages -= 2344 zone_absent_pages_in_node(pgdat->node_id, i, 2345 zholes_size); 2346 pgdat->node_present_pages = realtotalpages; 2347 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 2348 realtotalpages); 2349} 2350 2351/* 2352 * Set up the zone data structures: 2353 * - mark all pages reserved 2354 * - mark all memory queues empty 2355 * - clear the memory bitmaps 2356 */ 2357static void __meminit free_area_init_core(struct pglist_data *pgdat, 2358 unsigned long *zones_size, unsigned long *zholes_size) 2359{ 2360 enum zone_type j; 2361 int nid = pgdat->node_id; 2362 unsigned long zone_start_pfn = pgdat->node_start_pfn; 2363 int ret; 2364 2365 pgdat_resize_init(pgdat); 2366 pgdat->nr_zones = 0; 2367 init_waitqueue_head(&pgdat->kswapd_wait); 2368 pgdat->kswapd_max_order = 0; 2369 2370 for (j = 0; j < MAX_NR_ZONES; j++) { 2371 struct zone *zone = pgdat->node_zones + j; 2372 unsigned long size, realsize, memmap_pages; 2373 2374 size = zone_spanned_pages_in_node(nid, j, zones_size); 2375 realsize = size - zone_absent_pages_in_node(nid, j, 2376 zholes_size); 2377 2378 /* 2379 * Adjust realsize so that it accounts for how much memory 2380 * is used by this zone for memmap. This affects the watermark 2381 * and per-cpu initialisations 2382 */ 2383 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; 2384 if (realsize >= memmap_pages) { 2385 realsize -= memmap_pages; 2386 printk(KERN_DEBUG 2387 " %s zone: %lu pages used for memmap\n", 2388 zone_names[j], memmap_pages); 2389 } else 2390 printk(KERN_WARNING 2391 " %s zone: %lu pages exceeds realsize %lu\n", 2392 zone_names[j], memmap_pages, realsize); 2393 2394 /* Account for reserved DMA pages */ 2395 if (j == ZONE_DMA && realsize > dma_reserve) { 2396 realsize -= dma_reserve; 2397 printk(KERN_DEBUG " DMA zone: %lu pages reserved\n", 2398 dma_reserve); 2399 } 2400 2401 if (!is_highmem_idx(j)) 2402 nr_kernel_pages += realsize; 2403 nr_all_pages += realsize; 2404 2405 zone->spanned_pages = size; 2406 zone->present_pages = realsize; 2407#ifdef CONFIG_NUMA 2408 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 2409 / 100; 2410 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 2411#endif 2412 zone->name = zone_names[j]; 2413 spin_lock_init(&zone->lock); 2414 spin_lock_init(&zone->lru_lock); 2415 zone_seqlock_init(zone); 2416 zone->zone_pgdat = pgdat; 2417 zone->free_pages = 0; 2418 2419 zone->temp_priority = zone->prev_priority = DEF_PRIORITY; 2420 2421 zone_pcp_init(zone); 2422 INIT_LIST_HEAD(&zone->active_list); 2423 INIT_LIST_HEAD(&zone->inactive_list); 2424 zone->nr_scan_active = 0; 2425 zone->nr_scan_inactive = 0; 2426 zone->nr_active = 0; 2427 zone->nr_inactive = 0; 2428 zap_zone_vm_stats(zone); 2429 atomic_set(&zone->reclaim_in_progress, 0); 2430 if (!size) 2431 continue; 2432 2433 zonetable_add(zone, nid, j, zone_start_pfn, size); 2434 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 2435 BUG_ON(ret); 2436 zone_start_pfn += size; 2437 } 2438} 2439 2440static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2441{ 2442 /* Skip empty nodes */ 2443 if (!pgdat->node_spanned_pages) 2444 return; 2445 2446#ifdef CONFIG_FLAT_NODE_MEM_MAP 2447 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2448 if (!pgdat->node_mem_map) { 2449 unsigned long size, start, end; 2450 struct page *map; 2451 2452 /* 2453 * The zone's endpoints aren't required to be MAX_ORDER 2454 * aligned but the node_mem_map endpoints must be in order 2455 * for the buddy allocator to function correctly. 2456 */ 2457 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 2458 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 2459 end = ALIGN(end, MAX_ORDER_NR_PAGES); 2460 size = (end - start) * sizeof(struct page); 2461 map = alloc_remap(pgdat->node_id, size); 2462 if (!map) 2463 map = alloc_bootmem_node(pgdat, size); 2464 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 2465 } 2466#ifdef CONFIG_FLATMEM 2467 /* 2468 * With no DISCONTIG, the global mem_map is just set as node 0's 2469 */ 2470 if (pgdat == NODE_DATA(0)) { 2471 mem_map = NODE_DATA(0)->node_mem_map; 2472#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2473 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 2474 mem_map -= pgdat->node_start_pfn; 2475#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2476 } 2477#endif 2478#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2479} 2480 2481void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, 2482 unsigned long *zones_size, unsigned long node_start_pfn, 2483 unsigned long *zholes_size) 2484{ 2485 pgdat->node_id = nid; 2486 pgdat->node_start_pfn = node_start_pfn; 2487 calculate_node_totalpages(pgdat, zones_size, zholes_size); 2488 2489 alloc_node_mem_map(pgdat); 2490 2491 free_area_init_core(pgdat, zones_size, zholes_size); 2492} 2493 2494#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2495/** 2496 * add_active_range - Register a range of PFNs backed by physical memory 2497 * @nid: The node ID the range resides on 2498 * @start_pfn: The start PFN of the available physical memory 2499 * @end_pfn: The end PFN of the available physical memory 2500 * 2501 * These ranges are stored in an early_node_map[] and later used by 2502 * free_area_init_nodes() to calculate zone sizes and holes. If the 2503 * range spans a memory hole, it is up to the architecture to ensure 2504 * the memory is not freed by the bootmem allocator. If possible 2505 * the range being registered will be merged with existing ranges. 2506 */ 2507void __init add_active_range(unsigned int nid, unsigned long start_pfn, 2508 unsigned long end_pfn) 2509{ 2510 int i; 2511 2512 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 2513 "%d entries of %d used\n", 2514 nid, start_pfn, end_pfn, 2515 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 2516 2517 /* Merge with existing active regions if possible */ 2518 for (i = 0; i < nr_nodemap_entries; i++) { 2519 if (early_node_map[i].nid != nid) 2520 continue; 2521 2522 /* Skip if an existing region covers this new one */ 2523 if (start_pfn >= early_node_map[i].start_pfn && 2524 end_pfn <= early_node_map[i].end_pfn) 2525 return; 2526 2527 /* Merge forward if suitable */ 2528 if (start_pfn <= early_node_map[i].end_pfn && 2529 end_pfn > early_node_map[i].end_pfn) { 2530 early_node_map[i].end_pfn = end_pfn; 2531 return; 2532 } 2533 2534 /* Merge backward if suitable */ 2535 if (start_pfn < early_node_map[i].end_pfn && 2536 end_pfn >= early_node_map[i].start_pfn) { 2537 early_node_map[i].start_pfn = start_pfn; 2538 return; 2539 } 2540 } 2541 2542 /* Check that early_node_map is large enough */ 2543 if (i >= MAX_ACTIVE_REGIONS) { 2544 printk(KERN_CRIT "More than %d memory regions, truncating\n", 2545 MAX_ACTIVE_REGIONS); 2546 return; 2547 } 2548 2549 early_node_map[i].nid = nid; 2550 early_node_map[i].start_pfn = start_pfn; 2551 early_node_map[i].end_pfn = end_pfn; 2552 nr_nodemap_entries = i + 1; 2553} 2554 2555/** 2556 * shrink_active_range - Shrink an existing registered range of PFNs 2557 * @nid: The node id the range is on that should be shrunk 2558 * @old_end_pfn: The old end PFN of the range 2559 * @new_end_pfn: The new PFN of the range 2560 * 2561 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 2562 * The map is kept at the end physical page range that has already been 2563 * registered with add_active_range(). This function allows an arch to shrink 2564 * an existing registered range. 2565 */ 2566void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 2567 unsigned long new_end_pfn) 2568{ 2569 int i; 2570 2571 /* Find the old active region end and shrink */ 2572 for_each_active_range_index_in_nid(i, nid) 2573 if (early_node_map[i].end_pfn == old_end_pfn) { 2574 early_node_map[i].end_pfn = new_end_pfn; 2575 break; 2576 } 2577} 2578 2579/** 2580 * remove_all_active_ranges - Remove all currently registered regions 2581 * During discovery, it may be found that a table like SRAT is invalid 2582 * and an alternative discovery method must be used. This function removes 2583 * all currently registered regions. 2584 */ 2585void __init remove_all_active_ranges() 2586{ 2587 memset(early_node_map, 0, sizeof(early_node_map)); 2588 nr_nodemap_entries = 0; 2589#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2590 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); 2591 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); 2592#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 2593} 2594 2595/* Compare two active node_active_regions */ 2596static int __init cmp_node_active_region(const void *a, const void *b) 2597{ 2598 struct node_active_region *arange = (struct node_active_region *)a; 2599 struct node_active_region *brange = (struct node_active_region *)b; 2600 2601 /* Done this way to avoid overflows */ 2602 if (arange->start_pfn > brange->start_pfn) 2603 return 1; 2604 if (arange->start_pfn < brange->start_pfn) 2605 return -1; 2606 2607 return 0; 2608} 2609 2610/* sort the node_map by start_pfn */ 2611static void __init sort_node_map(void) 2612{ 2613 sort(early_node_map, (size_t)nr_nodemap_entries, 2614 sizeof(struct node_active_region), 2615 cmp_node_active_region, NULL); 2616} 2617 2618/* Find the lowest pfn for a node. This depends on a sorted early_node_map */ 2619unsigned long __init find_min_pfn_for_node(unsigned long nid) 2620{ 2621 int i; 2622 2623 /* Assuming a sorted map, the first range found has the starting pfn */ 2624 for_each_active_range_index_in_nid(i, nid) 2625 return early_node_map[i].start_pfn; 2626 2627 printk(KERN_WARNING "Could not find start_pfn for node %lu\n", nid); 2628 return 0; 2629} 2630 2631/** 2632 * find_min_pfn_with_active_regions - Find the minimum PFN registered 2633 * 2634 * It returns the minimum PFN based on information provided via 2635 * add_active_range() 2636 */ 2637unsigned long __init find_min_pfn_with_active_regions(void) 2638{ 2639 return find_min_pfn_for_node(MAX_NUMNODES); 2640} 2641 2642/** 2643 * find_max_pfn_with_active_regions - Find the maximum PFN registered 2644 * 2645 * It returns the maximum PFN based on information provided via 2646 * add_active_range() 2647 */ 2648unsigned long __init find_max_pfn_with_active_regions(void) 2649{ 2650 int i; 2651 unsigned long max_pfn = 0; 2652 2653 for (i = 0; i < nr_nodemap_entries; i++) 2654 max_pfn = max(max_pfn, early_node_map[i].end_pfn); 2655 2656 return max_pfn; 2657} 2658 2659/** 2660 * free_area_init_nodes - Initialise all pg_data_t and zone data 2661 * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA 2662 * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32 2663 * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL 2664 * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM 2665 * 2666 * This will call free_area_init_node() for each active node in the system. 2667 * Using the page ranges provided by add_active_range(), the size of each 2668 * zone in each node and their holes is calculated. If the maximum PFN 2669 * between two adjacent zones match, it is assumed that the zone is empty. 2670 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 2671 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 2672 * starts where the previous one ended. For example, ZONE_DMA32 starts 2673 * at arch_max_dma_pfn. 2674 */ 2675void __init free_area_init_nodes(unsigned long *max_zone_pfn) 2676{ 2677 unsigned long nid; 2678 enum zone_type i; 2679 2680 /* Record where the zone boundaries are */ 2681 memset(arch_zone_lowest_possible_pfn, 0, 2682 sizeof(arch_zone_lowest_possible_pfn)); 2683 memset(arch_zone_highest_possible_pfn, 0, 2684 sizeof(arch_zone_highest_possible_pfn)); 2685 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 2686 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 2687 for (i = 1; i < MAX_NR_ZONES; i++) { 2688 arch_zone_lowest_possible_pfn[i] = 2689 arch_zone_highest_possible_pfn[i-1]; 2690 arch_zone_highest_possible_pfn[i] = 2691 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 2692 } 2693 2694 /* Regions in the early_node_map can be in any order */ 2695 sort_node_map(); 2696 2697 /* Print out the zone ranges */ 2698 printk("Zone PFN ranges:\n"); 2699 for (i = 0; i < MAX_NR_ZONES; i++) 2700 printk(" %-8s %8lu -> %8lu\n", 2701 zone_names[i], 2702 arch_zone_lowest_possible_pfn[i], 2703 arch_zone_highest_possible_pfn[i]); 2704 2705 /* Print out the early_node_map[] */ 2706 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 2707 for (i = 0; i < nr_nodemap_entries; i++) 2708 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 2709 early_node_map[i].start_pfn, 2710 early_node_map[i].end_pfn); 2711 2712 /* Initialise every node */ 2713 for_each_online_node(nid) { 2714 pg_data_t *pgdat = NODE_DATA(nid); 2715 free_area_init_node(nid, pgdat, NULL, 2716 find_min_pfn_for_node(nid), NULL); 2717 } 2718} 2719#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2720 2721/** 2722 * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA 2723 * @new_dma_reserve - The number of pages to mark reserved 2724 * 2725 * The per-cpu batchsize and zone watermarks are determined by present_pages. 2726 * In the DMA zone, a significant percentage may be consumed by kernel image 2727 * and other unfreeable allocations which can skew the watermarks badly. This 2728 * function may optionally be used to account for unfreeable pages in 2729 * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize 2730 */ 2731void __init set_dma_reserve(unsigned long new_dma_reserve) 2732{ 2733 dma_reserve = new_dma_reserve; 2734} 2735 2736#ifndef CONFIG_NEED_MULTIPLE_NODES 2737static bootmem_data_t contig_bootmem_data; 2738struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2739 2740EXPORT_SYMBOL(contig_page_data); 2741#endif 2742 2743void __init free_area_init(unsigned long *zones_size) 2744{ 2745 free_area_init_node(0, NODE_DATA(0), zones_size, 2746 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2747} 2748 2749#ifdef CONFIG_HOTPLUG_CPU 2750static int page_alloc_cpu_notify(struct notifier_block *self, 2751 unsigned long action, void *hcpu) 2752{ 2753 int cpu = (unsigned long)hcpu; 2754 2755 if (action == CPU_DEAD) { 2756 local_irq_disable(); 2757 __drain_pages(cpu); 2758 vm_events_fold_cpu(cpu); 2759 local_irq_enable(); 2760 refresh_cpu_vm_stats(cpu); 2761 } 2762 return NOTIFY_OK; 2763} 2764#endif /* CONFIG_HOTPLUG_CPU */ 2765 2766void __init page_alloc_init(void) 2767{ 2768 hotcpu_notifier(page_alloc_cpu_notify, 0); 2769} 2770 2771/* 2772 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 2773 * or min_free_kbytes changes. 2774 */ 2775static void calculate_totalreserve_pages(void) 2776{ 2777 struct pglist_data *pgdat; 2778 unsigned long reserve_pages = 0; 2779 enum zone_type i, j; 2780 2781 for_each_online_pgdat(pgdat) { 2782 for (i = 0; i < MAX_NR_ZONES; i++) { 2783 struct zone *zone = pgdat->node_zones + i; 2784 unsigned long max = 0; 2785 2786 /* Find valid and maximum lowmem_reserve in the zone */ 2787 for (j = i; j < MAX_NR_ZONES; j++) { 2788 if (zone->lowmem_reserve[j] > max) 2789 max = zone->lowmem_reserve[j]; 2790 } 2791 2792 /* we treat pages_high as reserved pages. */ 2793 max += zone->pages_high; 2794 2795 if (max > zone->present_pages) 2796 max = zone->present_pages; 2797 reserve_pages += max; 2798 } 2799 } 2800 totalreserve_pages = reserve_pages; 2801} 2802 2803/* 2804 * setup_per_zone_lowmem_reserve - called whenever 2805 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 2806 * has a correct pages reserved value, so an adequate number of 2807 * pages are left in the zone after a successful __alloc_pages(). 2808 */ 2809static void setup_per_zone_lowmem_reserve(void) 2810{ 2811 struct pglist_data *pgdat; 2812 enum zone_type j, idx; 2813 2814 for_each_online_pgdat(pgdat) { 2815 for (j = 0; j < MAX_NR_ZONES; j++) { 2816 struct zone *zone = pgdat->node_zones + j; 2817 unsigned long present_pages = zone->present_pages; 2818 2819 zone->lowmem_reserve[j] = 0; 2820 2821 idx = j; 2822 while (idx) { 2823 struct zone *lower_zone; 2824 2825 idx--; 2826 2827 if (sysctl_lowmem_reserve_ratio[idx] < 1) 2828 sysctl_lowmem_reserve_ratio[idx] = 1; 2829 2830 lower_zone = pgdat->node_zones + idx; 2831 lower_zone->lowmem_reserve[j] = present_pages / 2832 sysctl_lowmem_reserve_ratio[idx]; 2833 present_pages += lower_zone->present_pages; 2834 } 2835 } 2836 } 2837 2838 /* update totalreserve_pages */ 2839 calculate_totalreserve_pages(); 2840} 2841 2842/* 2843 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures 2844 * that the pages_{min,low,high} values for each zone are set correctly 2845 * with respect to min_free_kbytes. 2846 */ 2847void setup_per_zone_pages_min(void) 2848{ 2849 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 2850 unsigned long lowmem_pages = 0; 2851 struct zone *zone; 2852 unsigned long flags; 2853 2854 /* Calculate total number of !ZONE_HIGHMEM pages */ 2855 for_each_zone(zone) { 2856 if (!is_highmem(zone)) 2857 lowmem_pages += zone->present_pages; 2858 } 2859 2860 for_each_zone(zone) { 2861 u64 tmp; 2862 2863 spin_lock_irqsave(&zone->lru_lock, flags); 2864 tmp = (u64)pages_min * zone->present_pages; 2865 do_div(tmp, lowmem_pages); 2866 if (is_highmem(zone)) { 2867 /* 2868 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2869 * need highmem pages, so cap pages_min to a small 2870 * value here. 2871 * 2872 * The (pages_high-pages_low) and (pages_low-pages_min) 2873 * deltas controls asynch page reclaim, and so should 2874 * not be capped for highmem. 2875 */ 2876 int min_pages; 2877 2878 min_pages = zone->present_pages / 1024; 2879 if (min_pages < SWAP_CLUSTER_MAX) 2880 min_pages = SWAP_CLUSTER_MAX; 2881 if (min_pages > 128) 2882 min_pages = 128; 2883 zone->pages_min = min_pages; 2884 } else { 2885 /* 2886 * If it's a lowmem zone, reserve a number of pages 2887 * proportionate to the zone's size. 2888 */ 2889 zone->pages_min = tmp; 2890 } 2891 2892 zone->pages_low = zone->pages_min + (tmp >> 2); 2893 zone->pages_high = zone->pages_min + (tmp >> 1); 2894 spin_unlock_irqrestore(&zone->lru_lock, flags); 2895 } 2896 2897 /* update totalreserve_pages */ 2898 calculate_totalreserve_pages(); 2899} 2900 2901/* 2902 * Initialise min_free_kbytes. 2903 * 2904 * For small machines we want it small (128k min). For large machines 2905 * we want it large (64MB max). But it is not linear, because network 2906 * bandwidth does not increase linearly with machine size. We use 2907 * 2908 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 2909 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 2910 * 2911 * which yields 2912 * 2913 * 16MB: 512k 2914 * 32MB: 724k 2915 * 64MB: 1024k 2916 * 128MB: 1448k 2917 * 256MB: 2048k 2918 * 512MB: 2896k 2919 * 1024MB: 4096k 2920 * 2048MB: 5792k 2921 * 4096MB: 8192k 2922 * 8192MB: 11584k 2923 * 16384MB: 16384k 2924 */ 2925static int __init init_per_zone_pages_min(void) 2926{ 2927 unsigned long lowmem_kbytes; 2928 2929 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 2930 2931 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 2932 if (min_free_kbytes < 128) 2933 min_free_kbytes = 128; 2934 if (min_free_kbytes > 65536) 2935 min_free_kbytes = 65536; 2936 setup_per_zone_pages_min(); 2937 setup_per_zone_lowmem_reserve(); 2938 return 0; 2939} 2940module_init(init_per_zone_pages_min) 2941 2942/* 2943 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 2944 * that we can call two helper functions whenever min_free_kbytes 2945 * changes. 2946 */ 2947int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 2948 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2949{ 2950 proc_dointvec(table, write, file, buffer, length, ppos); 2951 setup_per_zone_pages_min(); 2952 return 0; 2953} 2954 2955#ifdef CONFIG_NUMA 2956int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 2957 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2958{ 2959 struct zone *zone; 2960 int rc; 2961 2962 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2963 if (rc) 2964 return rc; 2965 2966 for_each_zone(zone) 2967 zone->min_unmapped_pages = (zone->present_pages * 2968 sysctl_min_unmapped_ratio) / 100; 2969 return 0; 2970} 2971 2972int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 2973 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2974{ 2975 struct zone *zone; 2976 int rc; 2977 2978 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2979 if (rc) 2980 return rc; 2981 2982 for_each_zone(zone) 2983 zone->min_slab_pages = (zone->present_pages * 2984 sysctl_min_slab_ratio) / 100; 2985 return 0; 2986} 2987#endif 2988 2989/* 2990 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 2991 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 2992 * whenever sysctl_lowmem_reserve_ratio changes. 2993 * 2994 * The reserve ratio obviously has absolutely no relation with the 2995 * pages_min watermarks. The lowmem reserve ratio can only make sense 2996 * if in function of the boot time zone sizes. 2997 */ 2998int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 2999 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3000{ 3001 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3002 setup_per_zone_lowmem_reserve(); 3003 return 0; 3004} 3005 3006/* 3007 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 3008 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 3009 * can have before it gets flushed back to buddy allocator. 3010 */ 3011 3012int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 3013 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3014{ 3015 struct zone *zone; 3016 unsigned int cpu; 3017 int ret; 3018 3019 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3020 if (!write || (ret == -EINVAL)) 3021 return ret; 3022 for_each_zone(zone) { 3023 for_each_online_cpu(cpu) { 3024 unsigned long high; 3025 high = zone->present_pages / percpu_pagelist_fraction; 3026 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 3027 } 3028 } 3029 return 0; 3030} 3031 3032int hashdist = HASHDIST_DEFAULT; 3033 3034#ifdef CONFIG_NUMA 3035static int __init set_hashdist(char *str) 3036{ 3037 if (!str) 3038 return 0; 3039 hashdist = simple_strtoul(str, &str, 0); 3040 return 1; 3041} 3042__setup("hashdist=", set_hashdist); 3043#endif 3044 3045/* 3046 * allocate a large system hash table from bootmem 3047 * - it is assumed that the hash table must contain an exact power-of-2 3048 * quantity of entries 3049 * - limit is the number of hash buckets, not the total allocation size 3050 */ 3051void *__init alloc_large_system_hash(const char *tablename, 3052 unsigned long bucketsize, 3053 unsigned long numentries, 3054 int scale, 3055 int flags, 3056 unsigned int *_hash_shift, 3057 unsigned int *_hash_mask, 3058 unsigned long limit) 3059{ 3060 unsigned long long max = limit; 3061 unsigned long log2qty, size; 3062 void *table = NULL; 3063 3064 /* allow the kernel cmdline to have a say */ 3065 if (!numentries) { 3066 /* round applicable memory size up to nearest megabyte */ 3067 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; 3068 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 3069 numentries >>= 20 - PAGE_SHIFT; 3070 numentries <<= 20 - PAGE_SHIFT; 3071 3072 /* limit to 1 bucket per 2^scale bytes of low memory */ 3073 if (scale > PAGE_SHIFT) 3074 numentries >>= (scale - PAGE_SHIFT); 3075 else 3076 numentries <<= (PAGE_SHIFT - scale); 3077 } 3078 numentries = roundup_pow_of_two(numentries); 3079 3080 /* limit allocation size to 1/16 total memory by default */ 3081 if (max == 0) { 3082 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 3083 do_div(max, bucketsize); 3084 } 3085 3086 if (numentries > max) 3087 numentries = max; 3088 3089 log2qty = long_log2(numentries); 3090 3091 do { 3092 size = bucketsize << log2qty; 3093 if (flags & HASH_EARLY) 3094 table = alloc_bootmem(size); 3095 else if (hashdist) 3096 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 3097 else { 3098 unsigned long order; 3099 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 3100 ; 3101 table = (void*) __get_free_pages(GFP_ATOMIC, order); 3102 } 3103 } while (!table && size > PAGE_SIZE && --log2qty); 3104 3105 if (!table) 3106 panic("Failed to allocate %s hash table\n", tablename); 3107 3108 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 3109 tablename, 3110 (1U << log2qty), 3111 long_log2(size) - PAGE_SHIFT, 3112 size); 3113 3114 if (_hash_shift) 3115 *_hash_shift = log2qty; 3116 if (_hash_mask) 3117 *_hash_mask = (1 << log2qty) - 1; 3118 3119 return table; 3120} 3121 3122#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 3123struct page *pfn_to_page(unsigned long pfn) 3124{ 3125 return __pfn_to_page(pfn); 3126} 3127unsigned long page_to_pfn(struct page *page) 3128{ 3129 return __page_to_pfn(page); 3130} 3131EXPORT_SYMBOL(pfn_to_page); 3132EXPORT_SYMBOL(page_to_pfn); 3133#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 3134