page_alloc.c revision 933e312e73f8fc39652bd4d216a5393cc3a014b9
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/bootmem.h> 23#include <linux/compiler.h> 24#include <linux/kernel.h> 25#include <linux/module.h> 26#include <linux/suspend.h> 27#include <linux/pagevec.h> 28#include <linux/blkdev.h> 29#include <linux/slab.h> 30#include <linux/notifier.h> 31#include <linux/topology.h> 32#include <linux/sysctl.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/memory_hotplug.h> 36#include <linux/nodemask.h> 37#include <linux/vmalloc.h> 38#include <linux/mempolicy.h> 39#include <linux/stop_machine.h> 40#include <linux/sort.h> 41#include <linux/pfn.h> 42#include <linux/backing-dev.h> 43#include <linux/fault-inject.h> 44 45#include <asm/tlbflush.h> 46#include <asm/div64.h> 47#include "internal.h" 48 49/* 50 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 51 * initializer cleaner 52 */ 53nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 54EXPORT_SYMBOL(node_online_map); 55nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 56EXPORT_SYMBOL(node_possible_map); 57unsigned long totalram_pages __read_mostly; 58unsigned long totalreserve_pages __read_mostly; 59long nr_swap_pages; 60int percpu_pagelist_fraction; 61 62static void __free_pages_ok(struct page *page, unsigned int order); 63 64/* 65 * results with 256, 32 in the lowmem_reserve sysctl: 66 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 67 * 1G machine -> (16M dma, 784M normal, 224M high) 68 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 69 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 70 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 71 * 72 * TBD: should special case ZONE_DMA32 machines here - in those we normally 73 * don't need any ZONE_NORMAL reservation 74 */ 75int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 76 256, 77#ifdef CONFIG_ZONE_DMA32 78 256, 79#endif 80#ifdef CONFIG_HIGHMEM 81 32 82#endif 83}; 84 85EXPORT_SYMBOL(totalram_pages); 86 87static char * const zone_names[MAX_NR_ZONES] = { 88 "DMA", 89#ifdef CONFIG_ZONE_DMA32 90 "DMA32", 91#endif 92 "Normal", 93#ifdef CONFIG_HIGHMEM 94 "HighMem" 95#endif 96}; 97 98int min_free_kbytes = 1024; 99 100unsigned long __meminitdata nr_kernel_pages; 101unsigned long __meminitdata nr_all_pages; 102static unsigned long __initdata dma_reserve; 103 104#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 105 /* 106 * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct 107 * ranges of memory (RAM) that may be registered with add_active_range(). 108 * Ranges passed to add_active_range() will be merged if possible 109 * so the number of times add_active_range() can be called is 110 * related to the number of nodes and the number of holes 111 */ 112 #ifdef CONFIG_MAX_ACTIVE_REGIONS 113 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 114 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 115 #else 116 #if MAX_NUMNODES >= 32 117 /* If there can be many nodes, allow up to 50 holes per node */ 118 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 119 #else 120 /* By default, allow up to 256 distinct regions */ 121 #define MAX_ACTIVE_REGIONS 256 122 #endif 123 #endif 124 125 struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; 126 int __initdata nr_nodemap_entries; 127 unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 128 unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 129#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 130 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; 131 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; 132#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 133#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 134 135#ifdef CONFIG_DEBUG_VM 136static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 137{ 138 int ret = 0; 139 unsigned seq; 140 unsigned long pfn = page_to_pfn(page); 141 142 do { 143 seq = zone_span_seqbegin(zone); 144 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 145 ret = 1; 146 else if (pfn < zone->zone_start_pfn) 147 ret = 1; 148 } while (zone_span_seqretry(zone, seq)); 149 150 return ret; 151} 152 153static int page_is_consistent(struct zone *zone, struct page *page) 154{ 155#ifdef CONFIG_HOLES_IN_ZONE 156 if (!pfn_valid(page_to_pfn(page))) 157 return 0; 158#endif 159 if (zone != page_zone(page)) 160 return 0; 161 162 return 1; 163} 164/* 165 * Temporary debugging check for pages not lying within a given zone. 166 */ 167static int bad_range(struct zone *zone, struct page *page) 168{ 169 if (page_outside_zone_boundaries(zone, page)) 170 return 1; 171 if (!page_is_consistent(zone, page)) 172 return 1; 173 174 return 0; 175} 176#else 177static inline int bad_range(struct zone *zone, struct page *page) 178{ 179 return 0; 180} 181#endif 182 183static void bad_page(struct page *page) 184{ 185 printk(KERN_EMERG "Bad page state in process '%s'\n" 186 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 187 KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 188 KERN_EMERG "Backtrace:\n", 189 current->comm, page, (int)(2*sizeof(unsigned long)), 190 (unsigned long)page->flags, page->mapping, 191 page_mapcount(page), page_count(page)); 192 dump_stack(); 193 page->flags &= ~(1 << PG_lru | 194 1 << PG_private | 195 1 << PG_locked | 196 1 << PG_active | 197 1 << PG_dirty | 198 1 << PG_reclaim | 199 1 << PG_slab | 200 1 << PG_swapcache | 201 1 << PG_writeback | 202 1 << PG_buddy ); 203 set_page_count(page, 0); 204 reset_page_mapcount(page); 205 page->mapping = NULL; 206 add_taint(TAINT_BAD_PAGE); 207} 208 209/* 210 * Higher-order pages are called "compound pages". They are structured thusly: 211 * 212 * The first PAGE_SIZE page is called the "head page". 213 * 214 * The remaining PAGE_SIZE pages are called "tail pages". 215 * 216 * All pages have PG_compound set. All pages have their ->private pointing at 217 * the head page (even the head page has this). 218 * 219 * The first tail page's ->lru.next holds the address of the compound page's 220 * put_page() function. Its ->lru.prev holds the order of allocation. 221 * This usage means that zero-order pages may not be compound. 222 */ 223 224static void free_compound_page(struct page *page) 225{ 226 __free_pages_ok(page, (unsigned long)page[1].lru.prev); 227} 228 229static void prep_compound_page(struct page *page, unsigned long order) 230{ 231 int i; 232 int nr_pages = 1 << order; 233 234 set_compound_page_dtor(page, free_compound_page); 235 page[1].lru.prev = (void *)order; 236 for (i = 0; i < nr_pages; i++) { 237 struct page *p = page + i; 238 239 __SetPageCompound(p); 240 set_page_private(p, (unsigned long)page); 241 } 242} 243 244static void destroy_compound_page(struct page *page, unsigned long order) 245{ 246 int i; 247 int nr_pages = 1 << order; 248 249 if (unlikely((unsigned long)page[1].lru.prev != order)) 250 bad_page(page); 251 252 for (i = 0; i < nr_pages; i++) { 253 struct page *p = page + i; 254 255 if (unlikely(!PageCompound(p) | 256 (page_private(p) != (unsigned long)page))) 257 bad_page(page); 258 __ClearPageCompound(p); 259 } 260} 261 262static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 263{ 264 int i; 265 266 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 267 /* 268 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 269 * and __GFP_HIGHMEM from hard or soft interrupt context. 270 */ 271 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 272 for (i = 0; i < (1 << order); i++) 273 clear_highpage(page + i); 274} 275 276/* 277 * function for dealing with page's order in buddy system. 278 * zone->lock is already acquired when we use these. 279 * So, we don't need atomic page->flags operations here. 280 */ 281static inline unsigned long page_order(struct page *page) 282{ 283 return page_private(page); 284} 285 286static inline void set_page_order(struct page *page, int order) 287{ 288 set_page_private(page, order); 289 __SetPageBuddy(page); 290} 291 292static inline void rmv_page_order(struct page *page) 293{ 294 __ClearPageBuddy(page); 295 set_page_private(page, 0); 296} 297 298/* 299 * Locate the struct page for both the matching buddy in our 300 * pair (buddy1) and the combined O(n+1) page they form (page). 301 * 302 * 1) Any buddy B1 will have an order O twin B2 which satisfies 303 * the following equation: 304 * B2 = B1 ^ (1 << O) 305 * For example, if the starting buddy (buddy2) is #8 its order 306 * 1 buddy is #10: 307 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 308 * 309 * 2) Any buddy B will have an order O+1 parent P which 310 * satisfies the following equation: 311 * P = B & ~(1 << O) 312 * 313 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 314 */ 315static inline struct page * 316__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 317{ 318 unsigned long buddy_idx = page_idx ^ (1 << order); 319 320 return page + (buddy_idx - page_idx); 321} 322 323static inline unsigned long 324__find_combined_index(unsigned long page_idx, unsigned int order) 325{ 326 return (page_idx & ~(1 << order)); 327} 328 329/* 330 * This function checks whether a page is free && is the buddy 331 * we can do coalesce a page and its buddy if 332 * (a) the buddy is not in a hole && 333 * (b) the buddy is in the buddy system && 334 * (c) a page and its buddy have the same order && 335 * (d) a page and its buddy are in the same zone. 336 * 337 * For recording whether a page is in the buddy system, we use PG_buddy. 338 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 339 * 340 * For recording page's order, we use page_private(page). 341 */ 342static inline int page_is_buddy(struct page *page, struct page *buddy, 343 int order) 344{ 345#ifdef CONFIG_HOLES_IN_ZONE 346 if (!pfn_valid(page_to_pfn(buddy))) 347 return 0; 348#endif 349 350 if (page_zone_id(page) != page_zone_id(buddy)) 351 return 0; 352 353 if (PageBuddy(buddy) && page_order(buddy) == order) { 354 BUG_ON(page_count(buddy) != 0); 355 return 1; 356 } 357 return 0; 358} 359 360/* 361 * Freeing function for a buddy system allocator. 362 * 363 * The concept of a buddy system is to maintain direct-mapped table 364 * (containing bit values) for memory blocks of various "orders". 365 * The bottom level table contains the map for the smallest allocatable 366 * units of memory (here, pages), and each level above it describes 367 * pairs of units from the levels below, hence, "buddies". 368 * At a high level, all that happens here is marking the table entry 369 * at the bottom level available, and propagating the changes upward 370 * as necessary, plus some accounting needed to play nicely with other 371 * parts of the VM system. 372 * At each level, we keep a list of pages, which are heads of continuous 373 * free pages of length of (1 << order) and marked with PG_buddy. Page's 374 * order is recorded in page_private(page) field. 375 * So when we are allocating or freeing one, we can derive the state of the 376 * other. That is, if we allocate a small block, and both were 377 * free, the remainder of the region must be split into blocks. 378 * If a block is freed, and its buddy is also free, then this 379 * triggers coalescing into a block of larger size. 380 * 381 * -- wli 382 */ 383 384static inline void __free_one_page(struct page *page, 385 struct zone *zone, unsigned int order) 386{ 387 unsigned long page_idx; 388 int order_size = 1 << order; 389 390 if (unlikely(PageCompound(page))) 391 destroy_compound_page(page, order); 392 393 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 394 395 VM_BUG_ON(page_idx & (order_size - 1)); 396 VM_BUG_ON(bad_range(zone, page)); 397 398 zone->free_pages += order_size; 399 while (order < MAX_ORDER-1) { 400 unsigned long combined_idx; 401 struct free_area *area; 402 struct page *buddy; 403 404 buddy = __page_find_buddy(page, page_idx, order); 405 if (!page_is_buddy(page, buddy, order)) 406 break; /* Move the buddy up one level. */ 407 408 list_del(&buddy->lru); 409 area = zone->free_area + order; 410 area->nr_free--; 411 rmv_page_order(buddy); 412 combined_idx = __find_combined_index(page_idx, order); 413 page = page + (combined_idx - page_idx); 414 page_idx = combined_idx; 415 order++; 416 } 417 set_page_order(page, order); 418 list_add(&page->lru, &zone->free_area[order].free_list); 419 zone->free_area[order].nr_free++; 420} 421 422static inline int free_pages_check(struct page *page) 423{ 424 if (unlikely(page_mapcount(page) | 425 (page->mapping != NULL) | 426 (page_count(page) != 0) | 427 (page->flags & ( 428 1 << PG_lru | 429 1 << PG_private | 430 1 << PG_locked | 431 1 << PG_active | 432 1 << PG_reclaim | 433 1 << PG_slab | 434 1 << PG_swapcache | 435 1 << PG_writeback | 436 1 << PG_reserved | 437 1 << PG_buddy )))) 438 bad_page(page); 439 if (PageDirty(page)) 440 __ClearPageDirty(page); 441 /* 442 * For now, we report if PG_reserved was found set, but do not 443 * clear it, and do not free the page. But we shall soon need 444 * to do more, for when the ZERO_PAGE count wraps negative. 445 */ 446 return PageReserved(page); 447} 448 449/* 450 * Frees a list of pages. 451 * Assumes all pages on list are in same zone, and of same order. 452 * count is the number of pages to free. 453 * 454 * If the zone was previously in an "all pages pinned" state then look to 455 * see if this freeing clears that state. 456 * 457 * And clear the zone's pages_scanned counter, to hold off the "all pages are 458 * pinned" detection logic. 459 */ 460static void free_pages_bulk(struct zone *zone, int count, 461 struct list_head *list, int order) 462{ 463 spin_lock(&zone->lock); 464 zone->all_unreclaimable = 0; 465 zone->pages_scanned = 0; 466 while (count--) { 467 struct page *page; 468 469 VM_BUG_ON(list_empty(list)); 470 page = list_entry(list->prev, struct page, lru); 471 /* have to delete it as __free_one_page list manipulates */ 472 list_del(&page->lru); 473 __free_one_page(page, zone, order); 474 } 475 spin_unlock(&zone->lock); 476} 477 478static void free_one_page(struct zone *zone, struct page *page, int order) 479{ 480 spin_lock(&zone->lock); 481 zone->all_unreclaimable = 0; 482 zone->pages_scanned = 0; 483 __free_one_page(page, zone, order); 484 spin_unlock(&zone->lock); 485} 486 487static void __free_pages_ok(struct page *page, unsigned int order) 488{ 489 unsigned long flags; 490 int i; 491 int reserved = 0; 492 493 for (i = 0 ; i < (1 << order) ; ++i) 494 reserved += free_pages_check(page + i); 495 if (reserved) 496 return; 497 498 if (!PageHighMem(page)) 499 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 500 arch_free_page(page, order); 501 kernel_map_pages(page, 1 << order, 0); 502 503 local_irq_save(flags); 504 __count_vm_events(PGFREE, 1 << order); 505 free_one_page(page_zone(page), page, order); 506 local_irq_restore(flags); 507} 508 509/* 510 * permit the bootmem allocator to evade page validation on high-order frees 511 */ 512void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) 513{ 514 if (order == 0) { 515 __ClearPageReserved(page); 516 set_page_count(page, 0); 517 set_page_refcounted(page); 518 __free_page(page); 519 } else { 520 int loop; 521 522 prefetchw(page); 523 for (loop = 0; loop < BITS_PER_LONG; loop++) { 524 struct page *p = &page[loop]; 525 526 if (loop + 1 < BITS_PER_LONG) 527 prefetchw(p + 1); 528 __ClearPageReserved(p); 529 set_page_count(p, 0); 530 } 531 532 set_page_refcounted(page); 533 __free_pages(page, order); 534 } 535} 536 537 538/* 539 * The order of subdivision here is critical for the IO subsystem. 540 * Please do not alter this order without good reasons and regression 541 * testing. Specifically, as large blocks of memory are subdivided, 542 * the order in which smaller blocks are delivered depends on the order 543 * they're subdivided in this function. This is the primary factor 544 * influencing the order in which pages are delivered to the IO 545 * subsystem according to empirical testing, and this is also justified 546 * by considering the behavior of a buddy system containing a single 547 * large block of memory acted on by a series of small allocations. 548 * This behavior is a critical factor in sglist merging's success. 549 * 550 * -- wli 551 */ 552static inline void expand(struct zone *zone, struct page *page, 553 int low, int high, struct free_area *area) 554{ 555 unsigned long size = 1 << high; 556 557 while (high > low) { 558 area--; 559 high--; 560 size >>= 1; 561 VM_BUG_ON(bad_range(zone, &page[size])); 562 list_add(&page[size].lru, &area->free_list); 563 area->nr_free++; 564 set_page_order(&page[size], high); 565 } 566} 567 568/* 569 * This page is about to be returned from the page allocator 570 */ 571static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 572{ 573 if (unlikely(page_mapcount(page) | 574 (page->mapping != NULL) | 575 (page_count(page) != 0) | 576 (page->flags & ( 577 1 << PG_lru | 578 1 << PG_private | 579 1 << PG_locked | 580 1 << PG_active | 581 1 << PG_dirty | 582 1 << PG_reclaim | 583 1 << PG_slab | 584 1 << PG_swapcache | 585 1 << PG_writeback | 586 1 << PG_reserved | 587 1 << PG_buddy )))) 588 bad_page(page); 589 590 /* 591 * For now, we report if PG_reserved was found set, but do not 592 * clear it, and do not allocate the page: as a safety net. 593 */ 594 if (PageReserved(page)) 595 return 1; 596 597 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 598 1 << PG_referenced | 1 << PG_arch_1 | 599 1 << PG_checked | 1 << PG_mappedtodisk); 600 set_page_private(page, 0); 601 set_page_refcounted(page); 602 603 arch_alloc_page(page, order); 604 kernel_map_pages(page, 1 << order, 1); 605 606 if (gfp_flags & __GFP_ZERO) 607 prep_zero_page(page, order, gfp_flags); 608 609 if (order && (gfp_flags & __GFP_COMP)) 610 prep_compound_page(page, order); 611 612 return 0; 613} 614 615/* 616 * Do the hard work of removing an element from the buddy allocator. 617 * Call me with the zone->lock already held. 618 */ 619static struct page *__rmqueue(struct zone *zone, unsigned int order) 620{ 621 struct free_area * area; 622 unsigned int current_order; 623 struct page *page; 624 625 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 626 area = zone->free_area + current_order; 627 if (list_empty(&area->free_list)) 628 continue; 629 630 page = list_entry(area->free_list.next, struct page, lru); 631 list_del(&page->lru); 632 rmv_page_order(page); 633 area->nr_free--; 634 zone->free_pages -= 1UL << order; 635 expand(zone, page, order, current_order, area); 636 return page; 637 } 638 639 return NULL; 640} 641 642/* 643 * Obtain a specified number of elements from the buddy allocator, all under 644 * a single hold of the lock, for efficiency. Add them to the supplied list. 645 * Returns the number of new pages which were placed at *list. 646 */ 647static int rmqueue_bulk(struct zone *zone, unsigned int order, 648 unsigned long count, struct list_head *list) 649{ 650 int i; 651 652 spin_lock(&zone->lock); 653 for (i = 0; i < count; ++i) { 654 struct page *page = __rmqueue(zone, order); 655 if (unlikely(page == NULL)) 656 break; 657 list_add_tail(&page->lru, list); 658 } 659 spin_unlock(&zone->lock); 660 return i; 661} 662 663#ifdef CONFIG_NUMA 664/* 665 * Called from the slab reaper to drain pagesets on a particular node that 666 * belongs to the currently executing processor. 667 * Note that this function must be called with the thread pinned to 668 * a single processor. 669 */ 670void drain_node_pages(int nodeid) 671{ 672 int i; 673 enum zone_type z; 674 unsigned long flags; 675 676 for (z = 0; z < MAX_NR_ZONES; z++) { 677 struct zone *zone = NODE_DATA(nodeid)->node_zones + z; 678 struct per_cpu_pageset *pset; 679 680 if (!populated_zone(zone)) 681 continue; 682 683 pset = zone_pcp(zone, smp_processor_id()); 684 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 685 struct per_cpu_pages *pcp; 686 687 pcp = &pset->pcp[i]; 688 if (pcp->count) { 689 int to_drain; 690 691 local_irq_save(flags); 692 if (pcp->count >= pcp->batch) 693 to_drain = pcp->batch; 694 else 695 to_drain = pcp->count; 696 free_pages_bulk(zone, to_drain, &pcp->list, 0); 697 pcp->count -= to_drain; 698 local_irq_restore(flags); 699 } 700 } 701 } 702} 703#endif 704 705static void __drain_pages(unsigned int cpu) 706{ 707 unsigned long flags; 708 struct zone *zone; 709 int i; 710 711 for_each_zone(zone) { 712 struct per_cpu_pageset *pset; 713 714 pset = zone_pcp(zone, cpu); 715 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 716 struct per_cpu_pages *pcp; 717 718 pcp = &pset->pcp[i]; 719 local_irq_save(flags); 720 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 721 pcp->count = 0; 722 local_irq_restore(flags); 723 } 724 } 725} 726 727#ifdef CONFIG_PM 728 729void mark_free_pages(struct zone *zone) 730{ 731 unsigned long pfn, max_zone_pfn; 732 unsigned long flags; 733 int order; 734 struct list_head *curr; 735 736 if (!zone->spanned_pages) 737 return; 738 739 spin_lock_irqsave(&zone->lock, flags); 740 741 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 742 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 743 if (pfn_valid(pfn)) { 744 struct page *page = pfn_to_page(pfn); 745 746 if (!PageNosave(page)) 747 ClearPageNosaveFree(page); 748 } 749 750 for (order = MAX_ORDER - 1; order >= 0; --order) 751 list_for_each(curr, &zone->free_area[order].free_list) { 752 unsigned long i; 753 754 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 755 for (i = 0; i < (1UL << order); i++) 756 SetPageNosaveFree(pfn_to_page(pfn + i)); 757 } 758 759 spin_unlock_irqrestore(&zone->lock, flags); 760} 761 762/* 763 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 764 */ 765void drain_local_pages(void) 766{ 767 unsigned long flags; 768 769 local_irq_save(flags); 770 __drain_pages(smp_processor_id()); 771 local_irq_restore(flags); 772} 773#endif /* CONFIG_PM */ 774 775/* 776 * Free a 0-order page 777 */ 778static void fastcall free_hot_cold_page(struct page *page, int cold) 779{ 780 struct zone *zone = page_zone(page); 781 struct per_cpu_pages *pcp; 782 unsigned long flags; 783 784 if (PageAnon(page)) 785 page->mapping = NULL; 786 if (free_pages_check(page)) 787 return; 788 789 if (!PageHighMem(page)) 790 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 791 arch_free_page(page, 0); 792 kernel_map_pages(page, 1, 0); 793 794 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 795 local_irq_save(flags); 796 __count_vm_event(PGFREE); 797 list_add(&page->lru, &pcp->list); 798 pcp->count++; 799 if (pcp->count >= pcp->high) { 800 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 801 pcp->count -= pcp->batch; 802 } 803 local_irq_restore(flags); 804 put_cpu(); 805} 806 807void fastcall free_hot_page(struct page *page) 808{ 809 free_hot_cold_page(page, 0); 810} 811 812void fastcall free_cold_page(struct page *page) 813{ 814 free_hot_cold_page(page, 1); 815} 816 817/* 818 * split_page takes a non-compound higher-order page, and splits it into 819 * n (1<<order) sub-pages: page[0..n] 820 * Each sub-page must be freed individually. 821 * 822 * Note: this is probably too low level an operation for use in drivers. 823 * Please consult with lkml before using this in your driver. 824 */ 825void split_page(struct page *page, unsigned int order) 826{ 827 int i; 828 829 VM_BUG_ON(PageCompound(page)); 830 VM_BUG_ON(!page_count(page)); 831 for (i = 1; i < (1 << order); i++) 832 set_page_refcounted(page + i); 833} 834 835/* 836 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 837 * we cheat by calling it from here, in the order > 0 path. Saves a branch 838 * or two. 839 */ 840static struct page *buffered_rmqueue(struct zonelist *zonelist, 841 struct zone *zone, int order, gfp_t gfp_flags) 842{ 843 unsigned long flags; 844 struct page *page; 845 int cold = !!(gfp_flags & __GFP_COLD); 846 int cpu; 847 848again: 849 cpu = get_cpu(); 850 if (likely(order == 0)) { 851 struct per_cpu_pages *pcp; 852 853 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 854 local_irq_save(flags); 855 if (!pcp->count) { 856 pcp->count = rmqueue_bulk(zone, 0, 857 pcp->batch, &pcp->list); 858 if (unlikely(!pcp->count)) 859 goto failed; 860 } 861 page = list_entry(pcp->list.next, struct page, lru); 862 list_del(&page->lru); 863 pcp->count--; 864 } else { 865 spin_lock_irqsave(&zone->lock, flags); 866 page = __rmqueue(zone, order); 867 spin_unlock(&zone->lock); 868 if (!page) 869 goto failed; 870 } 871 872 __count_zone_vm_events(PGALLOC, zone, 1 << order); 873 zone_statistics(zonelist, zone); 874 local_irq_restore(flags); 875 put_cpu(); 876 877 VM_BUG_ON(bad_range(zone, page)); 878 if (prep_new_page(page, order, gfp_flags)) 879 goto again; 880 return page; 881 882failed: 883 local_irq_restore(flags); 884 put_cpu(); 885 return NULL; 886} 887 888#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 889#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 890#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 891#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 892#define ALLOC_HARDER 0x10 /* try to alloc harder */ 893#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 894#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 895 896#ifdef CONFIG_FAIL_PAGE_ALLOC 897 898static struct fail_page_alloc_attr { 899 struct fault_attr attr; 900 901 u32 ignore_gfp_highmem; 902 u32 ignore_gfp_wait; 903 904#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 905 906 struct dentry *ignore_gfp_highmem_file; 907 struct dentry *ignore_gfp_wait_file; 908 909#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 910 911} fail_page_alloc = { 912 .attr = FAULT_ATTR_INITIALIZER, 913}; 914 915static int __init setup_fail_page_alloc(char *str) 916{ 917 return setup_fault_attr(&fail_page_alloc.attr, str); 918} 919__setup("fail_page_alloc=", setup_fail_page_alloc); 920 921static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 922{ 923 if (gfp_mask & __GFP_NOFAIL) 924 return 0; 925 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 926 return 0; 927 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 928 return 0; 929 930 return should_fail(&fail_page_alloc.attr, 1 << order); 931} 932 933#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 934 935static int __init fail_page_alloc_debugfs(void) 936{ 937 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 938 struct dentry *dir; 939 int err; 940 941 err = init_fault_attr_dentries(&fail_page_alloc.attr, 942 "fail_page_alloc"); 943 if (err) 944 return err; 945 dir = fail_page_alloc.attr.dentries.dir; 946 947 fail_page_alloc.ignore_gfp_wait_file = 948 debugfs_create_bool("ignore-gfp-wait", mode, dir, 949 &fail_page_alloc.ignore_gfp_wait); 950 951 fail_page_alloc.ignore_gfp_highmem_file = 952 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 953 &fail_page_alloc.ignore_gfp_highmem); 954 955 if (!fail_page_alloc.ignore_gfp_wait_file || 956 !fail_page_alloc.ignore_gfp_highmem_file) { 957 err = -ENOMEM; 958 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 959 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 960 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 961 } 962 963 return err; 964} 965 966late_initcall(fail_page_alloc_debugfs); 967 968#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 969 970#else /* CONFIG_FAIL_PAGE_ALLOC */ 971 972static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 973{ 974 return 0; 975} 976 977#endif /* CONFIG_FAIL_PAGE_ALLOC */ 978 979/* 980 * Return 1 if free pages are above 'mark'. This takes into account the order 981 * of the allocation. 982 */ 983int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 984 int classzone_idx, int alloc_flags) 985{ 986 /* free_pages my go negative - that's OK */ 987 unsigned long min = mark; 988 long free_pages = z->free_pages - (1 << order) + 1; 989 int o; 990 991 if (alloc_flags & ALLOC_HIGH) 992 min -= min / 2; 993 if (alloc_flags & ALLOC_HARDER) 994 min -= min / 4; 995 996 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 997 return 0; 998 for (o = 0; o < order; o++) { 999 /* At the next order, this order's pages become unavailable */ 1000 free_pages -= z->free_area[o].nr_free << o; 1001 1002 /* Require fewer higher order pages to be free */ 1003 min >>= 1; 1004 1005 if (free_pages <= min) 1006 return 0; 1007 } 1008 return 1; 1009} 1010 1011#ifdef CONFIG_NUMA 1012/* 1013 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1014 * skip over zones that are not allowed by the cpuset, or that have 1015 * been recently (in last second) found to be nearly full. See further 1016 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1017 * that have to skip over alot of full or unallowed zones. 1018 * 1019 * If the zonelist cache is present in the passed in zonelist, then 1020 * returns a pointer to the allowed node mask (either the current 1021 * tasks mems_allowed, or node_online_map.) 1022 * 1023 * If the zonelist cache is not available for this zonelist, does 1024 * nothing and returns NULL. 1025 * 1026 * If the fullzones BITMAP in the zonelist cache is stale (more than 1027 * a second since last zap'd) then we zap it out (clear its bits.) 1028 * 1029 * We hold off even calling zlc_setup, until after we've checked the 1030 * first zone in the zonelist, on the theory that most allocations will 1031 * be satisfied from that first zone, so best to examine that zone as 1032 * quickly as we can. 1033 */ 1034static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1035{ 1036 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1037 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1038 1039 zlc = zonelist->zlcache_ptr; 1040 if (!zlc) 1041 return NULL; 1042 1043 if (jiffies - zlc->last_full_zap > 1 * HZ) { 1044 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1045 zlc->last_full_zap = jiffies; 1046 } 1047 1048 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1049 &cpuset_current_mems_allowed : 1050 &node_online_map; 1051 return allowednodes; 1052} 1053 1054/* 1055 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1056 * if it is worth looking at further for free memory: 1057 * 1) Check that the zone isn't thought to be full (doesn't have its 1058 * bit set in the zonelist_cache fullzones BITMAP). 1059 * 2) Check that the zones node (obtained from the zonelist_cache 1060 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1061 * Return true (non-zero) if zone is worth looking at further, or 1062 * else return false (zero) if it is not. 1063 * 1064 * This check -ignores- the distinction between various watermarks, 1065 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1066 * found to be full for any variation of these watermarks, it will 1067 * be considered full for up to one second by all requests, unless 1068 * we are so low on memory on all allowed nodes that we are forced 1069 * into the second scan of the zonelist. 1070 * 1071 * In the second scan we ignore this zonelist cache and exactly 1072 * apply the watermarks to all zones, even it is slower to do so. 1073 * We are low on memory in the second scan, and should leave no stone 1074 * unturned looking for a free page. 1075 */ 1076static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, 1077 nodemask_t *allowednodes) 1078{ 1079 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1080 int i; /* index of *z in zonelist zones */ 1081 int n; /* node that zone *z is on */ 1082 1083 zlc = zonelist->zlcache_ptr; 1084 if (!zlc) 1085 return 1; 1086 1087 i = z - zonelist->zones; 1088 n = zlc->z_to_n[i]; 1089 1090 /* This zone is worth trying if it is allowed but not full */ 1091 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1092} 1093 1094/* 1095 * Given 'z' scanning a zonelist, set the corresponding bit in 1096 * zlc->fullzones, so that subsequent attempts to allocate a page 1097 * from that zone don't waste time re-examining it. 1098 */ 1099static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) 1100{ 1101 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1102 int i; /* index of *z in zonelist zones */ 1103 1104 zlc = zonelist->zlcache_ptr; 1105 if (!zlc) 1106 return; 1107 1108 i = z - zonelist->zones; 1109 1110 set_bit(i, zlc->fullzones); 1111} 1112 1113#else /* CONFIG_NUMA */ 1114 1115static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1116{ 1117 return NULL; 1118} 1119 1120static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, 1121 nodemask_t *allowednodes) 1122{ 1123 return 1; 1124} 1125 1126static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) 1127{ 1128} 1129#endif /* CONFIG_NUMA */ 1130 1131/* 1132 * get_page_from_freelist goes through the zonelist trying to allocate 1133 * a page. 1134 */ 1135static struct page * 1136get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 1137 struct zonelist *zonelist, int alloc_flags) 1138{ 1139 struct zone **z; 1140 struct page *page = NULL; 1141 int classzone_idx = zone_idx(zonelist->zones[0]); 1142 struct zone *zone; 1143 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1144 int zlc_active = 0; /* set if using zonelist_cache */ 1145 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1146 1147zonelist_scan: 1148 /* 1149 * Scan zonelist, looking for a zone with enough free. 1150 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1151 */ 1152 z = zonelist->zones; 1153 1154 do { 1155 if (NUMA_BUILD && zlc_active && 1156 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1157 continue; 1158 zone = *z; 1159 if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && 1160 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) 1161 break; 1162 if ((alloc_flags & ALLOC_CPUSET) && 1163 !cpuset_zone_allowed(zone, gfp_mask)) 1164 goto try_next_zone; 1165 1166 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1167 unsigned long mark; 1168 if (alloc_flags & ALLOC_WMARK_MIN) 1169 mark = zone->pages_min; 1170 else if (alloc_flags & ALLOC_WMARK_LOW) 1171 mark = zone->pages_low; 1172 else 1173 mark = zone->pages_high; 1174 if (!zone_watermark_ok(zone, order, mark, 1175 classzone_idx, alloc_flags)) { 1176 if (!zone_reclaim_mode || 1177 !zone_reclaim(zone, gfp_mask, order)) 1178 goto this_zone_full; 1179 } 1180 } 1181 1182 page = buffered_rmqueue(zonelist, zone, order, gfp_mask); 1183 if (page) 1184 break; 1185this_zone_full: 1186 if (NUMA_BUILD) 1187 zlc_mark_zone_full(zonelist, z); 1188try_next_zone: 1189 if (NUMA_BUILD && !did_zlc_setup) { 1190 /* we do zlc_setup after the first zone is tried */ 1191 allowednodes = zlc_setup(zonelist, alloc_flags); 1192 zlc_active = 1; 1193 did_zlc_setup = 1; 1194 } 1195 } while (*(++z) != NULL); 1196 1197 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1198 /* Disable zlc cache for second zonelist scan */ 1199 zlc_active = 0; 1200 goto zonelist_scan; 1201 } 1202 return page; 1203} 1204 1205/* 1206 * This is the 'heart' of the zoned buddy allocator. 1207 */ 1208struct page * fastcall 1209__alloc_pages(gfp_t gfp_mask, unsigned int order, 1210 struct zonelist *zonelist) 1211{ 1212 const gfp_t wait = gfp_mask & __GFP_WAIT; 1213 struct zone **z; 1214 struct page *page; 1215 struct reclaim_state reclaim_state; 1216 struct task_struct *p = current; 1217 int do_retry; 1218 int alloc_flags; 1219 int did_some_progress; 1220 1221 might_sleep_if(wait); 1222 1223 if (should_fail_alloc_page(gfp_mask, order)) 1224 return NULL; 1225 1226restart: 1227 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 1228 1229 if (unlikely(*z == NULL)) { 1230 /* Should this ever happen?? */ 1231 return NULL; 1232 } 1233 1234 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1235 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1236 if (page) 1237 goto got_pg; 1238 1239 /* 1240 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1241 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1242 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1243 * using a larger set of nodes after it has established that the 1244 * allowed per node queues are empty and that nodes are 1245 * over allocated. 1246 */ 1247 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1248 goto nopage; 1249 1250 for (z = zonelist->zones; *z; z++) 1251 wakeup_kswapd(*z, order); 1252 1253 /* 1254 * OK, we're below the kswapd watermark and have kicked background 1255 * reclaim. Now things get more complex, so set up alloc_flags according 1256 * to how we want to proceed. 1257 * 1258 * The caller may dip into page reserves a bit more if the caller 1259 * cannot run direct reclaim, or if the caller has realtime scheduling 1260 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1261 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1262 */ 1263 alloc_flags = ALLOC_WMARK_MIN; 1264 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 1265 alloc_flags |= ALLOC_HARDER; 1266 if (gfp_mask & __GFP_HIGH) 1267 alloc_flags |= ALLOC_HIGH; 1268 if (wait) 1269 alloc_flags |= ALLOC_CPUSET; 1270 1271 /* 1272 * Go through the zonelist again. Let __GFP_HIGH and allocations 1273 * coming from realtime tasks go deeper into reserves. 1274 * 1275 * This is the last chance, in general, before the goto nopage. 1276 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1277 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1278 */ 1279 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 1280 if (page) 1281 goto got_pg; 1282 1283 /* This allocation should allow future memory freeing. */ 1284 1285rebalance: 1286 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1287 && !in_interrupt()) { 1288 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1289nofail_alloc: 1290 /* go through the zonelist yet again, ignoring mins */ 1291 page = get_page_from_freelist(gfp_mask, order, 1292 zonelist, ALLOC_NO_WATERMARKS); 1293 if (page) 1294 goto got_pg; 1295 if (gfp_mask & __GFP_NOFAIL) { 1296 congestion_wait(WRITE, HZ/50); 1297 goto nofail_alloc; 1298 } 1299 } 1300 goto nopage; 1301 } 1302 1303 /* Atomic allocations - we can't balance anything */ 1304 if (!wait) 1305 goto nopage; 1306 1307 cond_resched(); 1308 1309 /* We now go into synchronous reclaim */ 1310 cpuset_memory_pressure_bump(); 1311 p->flags |= PF_MEMALLOC; 1312 reclaim_state.reclaimed_slab = 0; 1313 p->reclaim_state = &reclaim_state; 1314 1315 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 1316 1317 p->reclaim_state = NULL; 1318 p->flags &= ~PF_MEMALLOC; 1319 1320 cond_resched(); 1321 1322 if (likely(did_some_progress)) { 1323 page = get_page_from_freelist(gfp_mask, order, 1324 zonelist, alloc_flags); 1325 if (page) 1326 goto got_pg; 1327 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1328 /* 1329 * Go through the zonelist yet one more time, keep 1330 * very high watermark here, this is only to catch 1331 * a parallel oom killing, we must fail if we're still 1332 * under heavy pressure. 1333 */ 1334 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1335 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1336 if (page) 1337 goto got_pg; 1338 1339 out_of_memory(zonelist, gfp_mask, order); 1340 goto restart; 1341 } 1342 1343 /* 1344 * Don't let big-order allocations loop unless the caller explicitly 1345 * requests that. Wait for some write requests to complete then retry. 1346 * 1347 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 1348 * <= 3, but that may not be true in other implementations. 1349 */ 1350 do_retry = 0; 1351 if (!(gfp_mask & __GFP_NORETRY)) { 1352 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1353 do_retry = 1; 1354 if (gfp_mask & __GFP_NOFAIL) 1355 do_retry = 1; 1356 } 1357 if (do_retry) { 1358 congestion_wait(WRITE, HZ/50); 1359 goto rebalance; 1360 } 1361 1362nopage: 1363 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1364 printk(KERN_WARNING "%s: page allocation failure." 1365 " order:%d, mode:0x%x\n", 1366 p->comm, order, gfp_mask); 1367 dump_stack(); 1368 show_mem(); 1369 } 1370got_pg: 1371 return page; 1372} 1373 1374EXPORT_SYMBOL(__alloc_pages); 1375 1376/* 1377 * Common helper functions. 1378 */ 1379fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1380{ 1381 struct page * page; 1382 page = alloc_pages(gfp_mask, order); 1383 if (!page) 1384 return 0; 1385 return (unsigned long) page_address(page); 1386} 1387 1388EXPORT_SYMBOL(__get_free_pages); 1389 1390fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1391{ 1392 struct page * page; 1393 1394 /* 1395 * get_zeroed_page() returns a 32-bit address, which cannot represent 1396 * a highmem page 1397 */ 1398 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1399 1400 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1401 if (page) 1402 return (unsigned long) page_address(page); 1403 return 0; 1404} 1405 1406EXPORT_SYMBOL(get_zeroed_page); 1407 1408void __pagevec_free(struct pagevec *pvec) 1409{ 1410 int i = pagevec_count(pvec); 1411 1412 while (--i >= 0) 1413 free_hot_cold_page(pvec->pages[i], pvec->cold); 1414} 1415 1416fastcall void __free_pages(struct page *page, unsigned int order) 1417{ 1418 if (put_page_testzero(page)) { 1419 if (order == 0) 1420 free_hot_page(page); 1421 else 1422 __free_pages_ok(page, order); 1423 } 1424} 1425 1426EXPORT_SYMBOL(__free_pages); 1427 1428fastcall void free_pages(unsigned long addr, unsigned int order) 1429{ 1430 if (addr != 0) { 1431 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1432 __free_pages(virt_to_page((void *)addr), order); 1433 } 1434} 1435 1436EXPORT_SYMBOL(free_pages); 1437 1438/* 1439 * Total amount of free (allocatable) RAM: 1440 */ 1441unsigned int nr_free_pages(void) 1442{ 1443 unsigned int sum = 0; 1444 struct zone *zone; 1445 1446 for_each_zone(zone) 1447 sum += zone->free_pages; 1448 1449 return sum; 1450} 1451 1452EXPORT_SYMBOL(nr_free_pages); 1453 1454#ifdef CONFIG_NUMA 1455unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1456{ 1457 unsigned int sum = 0; 1458 enum zone_type i; 1459 1460 for (i = 0; i < MAX_NR_ZONES; i++) 1461 sum += pgdat->node_zones[i].free_pages; 1462 1463 return sum; 1464} 1465#endif 1466 1467static unsigned int nr_free_zone_pages(int offset) 1468{ 1469 /* Just pick one node, since fallback list is circular */ 1470 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1471 unsigned int sum = 0; 1472 1473 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1474 struct zone **zonep = zonelist->zones; 1475 struct zone *zone; 1476 1477 for (zone = *zonep++; zone; zone = *zonep++) { 1478 unsigned long size = zone->present_pages; 1479 unsigned long high = zone->pages_high; 1480 if (size > high) 1481 sum += size - high; 1482 } 1483 1484 return sum; 1485} 1486 1487/* 1488 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1489 */ 1490unsigned int nr_free_buffer_pages(void) 1491{ 1492 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1493} 1494 1495/* 1496 * Amount of free RAM allocatable within all zones 1497 */ 1498unsigned int nr_free_pagecache_pages(void) 1499{ 1500 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1501} 1502 1503static inline void show_node(struct zone *zone) 1504{ 1505 if (NUMA_BUILD) 1506 printk("Node %d ", zone_to_nid(zone)); 1507} 1508 1509void si_meminfo(struct sysinfo *val) 1510{ 1511 val->totalram = totalram_pages; 1512 val->sharedram = 0; 1513 val->freeram = nr_free_pages(); 1514 val->bufferram = nr_blockdev_pages(); 1515 val->totalhigh = totalhigh_pages; 1516 val->freehigh = nr_free_highpages(); 1517 val->mem_unit = PAGE_SIZE; 1518} 1519 1520EXPORT_SYMBOL(si_meminfo); 1521 1522#ifdef CONFIG_NUMA 1523void si_meminfo_node(struct sysinfo *val, int nid) 1524{ 1525 pg_data_t *pgdat = NODE_DATA(nid); 1526 1527 val->totalram = pgdat->node_present_pages; 1528 val->freeram = nr_free_pages_pgdat(pgdat); 1529#ifdef CONFIG_HIGHMEM 1530 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1531 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1532#else 1533 val->totalhigh = 0; 1534 val->freehigh = 0; 1535#endif 1536 val->mem_unit = PAGE_SIZE; 1537} 1538#endif 1539 1540#define K(x) ((x) << (PAGE_SHIFT-10)) 1541 1542/* 1543 * Show free area list (used inside shift_scroll-lock stuff) 1544 * We also calculate the percentage fragmentation. We do this by counting the 1545 * memory on each free list with the exception of the first item on the list. 1546 */ 1547void show_free_areas(void) 1548{ 1549 int cpu; 1550 unsigned long active; 1551 unsigned long inactive; 1552 unsigned long free; 1553 struct zone *zone; 1554 1555 for_each_zone(zone) { 1556 if (!populated_zone(zone)) 1557 continue; 1558 1559 show_node(zone); 1560 printk("%s per-cpu:\n", zone->name); 1561 1562 for_each_online_cpu(cpu) { 1563 struct per_cpu_pageset *pageset; 1564 1565 pageset = zone_pcp(zone, cpu); 1566 1567 printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d " 1568 "Cold: hi:%5d, btch:%4d usd:%4d\n", 1569 cpu, pageset->pcp[0].high, 1570 pageset->pcp[0].batch, pageset->pcp[0].count, 1571 pageset->pcp[1].high, pageset->pcp[1].batch, 1572 pageset->pcp[1].count); 1573 } 1574 } 1575 1576 get_zone_counts(&active, &inactive, &free); 1577 1578 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " 1579 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", 1580 active, 1581 inactive, 1582 global_page_state(NR_FILE_DIRTY), 1583 global_page_state(NR_WRITEBACK), 1584 global_page_state(NR_UNSTABLE_NFS), 1585 nr_free_pages(), 1586 global_page_state(NR_SLAB_RECLAIMABLE) + 1587 global_page_state(NR_SLAB_UNRECLAIMABLE), 1588 global_page_state(NR_FILE_MAPPED), 1589 global_page_state(NR_PAGETABLE)); 1590 1591 for_each_zone(zone) { 1592 int i; 1593 1594 if (!populated_zone(zone)) 1595 continue; 1596 1597 show_node(zone); 1598 printk("%s" 1599 " free:%lukB" 1600 " min:%lukB" 1601 " low:%lukB" 1602 " high:%lukB" 1603 " active:%lukB" 1604 " inactive:%lukB" 1605 " present:%lukB" 1606 " pages_scanned:%lu" 1607 " all_unreclaimable? %s" 1608 "\n", 1609 zone->name, 1610 K(zone->free_pages), 1611 K(zone->pages_min), 1612 K(zone->pages_low), 1613 K(zone->pages_high), 1614 K(zone->nr_active), 1615 K(zone->nr_inactive), 1616 K(zone->present_pages), 1617 zone->pages_scanned, 1618 (zone->all_unreclaimable ? "yes" : "no") 1619 ); 1620 printk("lowmem_reserve[]:"); 1621 for (i = 0; i < MAX_NR_ZONES; i++) 1622 printk(" %lu", zone->lowmem_reserve[i]); 1623 printk("\n"); 1624 } 1625 1626 for_each_zone(zone) { 1627 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1628 1629 if (!populated_zone(zone)) 1630 continue; 1631 1632 show_node(zone); 1633 printk("%s: ", zone->name); 1634 1635 spin_lock_irqsave(&zone->lock, flags); 1636 for (order = 0; order < MAX_ORDER; order++) { 1637 nr[order] = zone->free_area[order].nr_free; 1638 total += nr[order] << order; 1639 } 1640 spin_unlock_irqrestore(&zone->lock, flags); 1641 for (order = 0; order < MAX_ORDER; order++) 1642 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1643 printk("= %lukB\n", K(total)); 1644 } 1645 1646 show_swap_cache_info(); 1647} 1648 1649/* 1650 * Builds allocation fallback zone lists. 1651 * 1652 * Add all populated zones of a node to the zonelist. 1653 */ 1654static int __meminit build_zonelists_node(pg_data_t *pgdat, 1655 struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) 1656{ 1657 struct zone *zone; 1658 1659 BUG_ON(zone_type >= MAX_NR_ZONES); 1660 zone_type++; 1661 1662 do { 1663 zone_type--; 1664 zone = pgdat->node_zones + zone_type; 1665 if (populated_zone(zone)) { 1666 zonelist->zones[nr_zones++] = zone; 1667 check_highest_zone(zone_type); 1668 } 1669 1670 } while (zone_type); 1671 return nr_zones; 1672} 1673 1674#ifdef CONFIG_NUMA 1675#define MAX_NODE_LOAD (num_online_nodes()) 1676static int __meminitdata node_load[MAX_NUMNODES]; 1677/** 1678 * find_next_best_node - find the next node that should appear in a given node's fallback list 1679 * @node: node whose fallback list we're appending 1680 * @used_node_mask: nodemask_t of already used nodes 1681 * 1682 * We use a number of factors to determine which is the next node that should 1683 * appear on a given node's fallback list. The node should not have appeared 1684 * already in @node's fallback list, and it should be the next closest node 1685 * according to the distance array (which contains arbitrary distance values 1686 * from each node to each node in the system), and should also prefer nodes 1687 * with no CPUs, since presumably they'll have very little allocation pressure 1688 * on them otherwise. 1689 * It returns -1 if no node is found. 1690 */ 1691static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) 1692{ 1693 int n, val; 1694 int min_val = INT_MAX; 1695 int best_node = -1; 1696 1697 /* Use the local node if we haven't already */ 1698 if (!node_isset(node, *used_node_mask)) { 1699 node_set(node, *used_node_mask); 1700 return node; 1701 } 1702 1703 for_each_online_node(n) { 1704 cpumask_t tmp; 1705 1706 /* Don't want a node to appear more than once */ 1707 if (node_isset(n, *used_node_mask)) 1708 continue; 1709 1710 /* Use the distance array to find the distance */ 1711 val = node_distance(node, n); 1712 1713 /* Penalize nodes under us ("prefer the next node") */ 1714 val += (n < node); 1715 1716 /* Give preference to headless and unused nodes */ 1717 tmp = node_to_cpumask(n); 1718 if (!cpus_empty(tmp)) 1719 val += PENALTY_FOR_NODE_WITH_CPUS; 1720 1721 /* Slight preference for less loaded node */ 1722 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1723 val += node_load[n]; 1724 1725 if (val < min_val) { 1726 min_val = val; 1727 best_node = n; 1728 } 1729 } 1730 1731 if (best_node >= 0) 1732 node_set(best_node, *used_node_mask); 1733 1734 return best_node; 1735} 1736 1737static void __meminit build_zonelists(pg_data_t *pgdat) 1738{ 1739 int j, node, local_node; 1740 enum zone_type i; 1741 int prev_node, load; 1742 struct zonelist *zonelist; 1743 nodemask_t used_mask; 1744 1745 /* initialize zonelists */ 1746 for (i = 0; i < MAX_NR_ZONES; i++) { 1747 zonelist = pgdat->node_zonelists + i; 1748 zonelist->zones[0] = NULL; 1749 } 1750 1751 /* NUMA-aware ordering of nodes */ 1752 local_node = pgdat->node_id; 1753 load = num_online_nodes(); 1754 prev_node = local_node; 1755 nodes_clear(used_mask); 1756 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1757 int distance = node_distance(local_node, node); 1758 1759 /* 1760 * If another node is sufficiently far away then it is better 1761 * to reclaim pages in a zone before going off node. 1762 */ 1763 if (distance > RECLAIM_DISTANCE) 1764 zone_reclaim_mode = 1; 1765 1766 /* 1767 * We don't want to pressure a particular node. 1768 * So adding penalty to the first node in same 1769 * distance group to make it round-robin. 1770 */ 1771 1772 if (distance != node_distance(local_node, prev_node)) 1773 node_load[node] += load; 1774 prev_node = node; 1775 load--; 1776 for (i = 0; i < MAX_NR_ZONES; i++) { 1777 zonelist = pgdat->node_zonelists + i; 1778 for (j = 0; zonelist->zones[j] != NULL; j++); 1779 1780 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1781 zonelist->zones[j] = NULL; 1782 } 1783 } 1784} 1785 1786/* Construct the zonelist performance cache - see further mmzone.h */ 1787static void __meminit build_zonelist_cache(pg_data_t *pgdat) 1788{ 1789 int i; 1790 1791 for (i = 0; i < MAX_NR_ZONES; i++) { 1792 struct zonelist *zonelist; 1793 struct zonelist_cache *zlc; 1794 struct zone **z; 1795 1796 zonelist = pgdat->node_zonelists + i; 1797 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 1798 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1799 for (z = zonelist->zones; *z; z++) 1800 zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z); 1801 } 1802} 1803 1804#else /* CONFIG_NUMA */ 1805 1806static void __meminit build_zonelists(pg_data_t *pgdat) 1807{ 1808 int node, local_node; 1809 enum zone_type i,j; 1810 1811 local_node = pgdat->node_id; 1812 for (i = 0; i < MAX_NR_ZONES; i++) { 1813 struct zonelist *zonelist; 1814 1815 zonelist = pgdat->node_zonelists + i; 1816 1817 j = build_zonelists_node(pgdat, zonelist, 0, i); 1818 /* 1819 * Now we build the zonelist so that it contains the zones 1820 * of all the other nodes. 1821 * We don't want to pressure a particular node, so when 1822 * building the zones for node N, we make sure that the 1823 * zones coming right after the local ones are those from 1824 * node N+1 (modulo N) 1825 */ 1826 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1827 if (!node_online(node)) 1828 continue; 1829 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1830 } 1831 for (node = 0; node < local_node; node++) { 1832 if (!node_online(node)) 1833 continue; 1834 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1835 } 1836 1837 zonelist->zones[j] = NULL; 1838 } 1839} 1840 1841/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 1842static void __meminit build_zonelist_cache(pg_data_t *pgdat) 1843{ 1844 int i; 1845 1846 for (i = 0; i < MAX_NR_ZONES; i++) 1847 pgdat->node_zonelists[i].zlcache_ptr = NULL; 1848} 1849 1850#endif /* CONFIG_NUMA */ 1851 1852/* return values int ....just for stop_machine_run() */ 1853static int __meminit __build_all_zonelists(void *dummy) 1854{ 1855 int nid; 1856 1857 for_each_online_node(nid) { 1858 build_zonelists(NODE_DATA(nid)); 1859 build_zonelist_cache(NODE_DATA(nid)); 1860 } 1861 return 0; 1862} 1863 1864void __meminit build_all_zonelists(void) 1865{ 1866 if (system_state == SYSTEM_BOOTING) { 1867 __build_all_zonelists(NULL); 1868 cpuset_init_current_mems_allowed(); 1869 } else { 1870 /* we have to stop all cpus to guaranntee there is no user 1871 of zonelist */ 1872 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 1873 /* cpuset refresh routine should be here */ 1874 } 1875 vm_total_pages = nr_free_pagecache_pages(); 1876 printk("Built %i zonelists. Total pages: %ld\n", 1877 num_online_nodes(), vm_total_pages); 1878} 1879 1880/* 1881 * Helper functions to size the waitqueue hash table. 1882 * Essentially these want to choose hash table sizes sufficiently 1883 * large so that collisions trying to wait on pages are rare. 1884 * But in fact, the number of active page waitqueues on typical 1885 * systems is ridiculously low, less than 200. So this is even 1886 * conservative, even though it seems large. 1887 * 1888 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1889 * waitqueues, i.e. the size of the waitq table given the number of pages. 1890 */ 1891#define PAGES_PER_WAITQUEUE 256 1892 1893#ifndef CONFIG_MEMORY_HOTPLUG 1894static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1895{ 1896 unsigned long size = 1; 1897 1898 pages /= PAGES_PER_WAITQUEUE; 1899 1900 while (size < pages) 1901 size <<= 1; 1902 1903 /* 1904 * Once we have dozens or even hundreds of threads sleeping 1905 * on IO we've got bigger problems than wait queue collision. 1906 * Limit the size of the wait table to a reasonable size. 1907 */ 1908 size = min(size, 4096UL); 1909 1910 return max(size, 4UL); 1911} 1912#else 1913/* 1914 * A zone's size might be changed by hot-add, so it is not possible to determine 1915 * a suitable size for its wait_table. So we use the maximum size now. 1916 * 1917 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 1918 * 1919 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 1920 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 1921 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 1922 * 1923 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 1924 * or more by the traditional way. (See above). It equals: 1925 * 1926 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 1927 * ia64(16K page size) : = ( 8G + 4M)byte. 1928 * powerpc (64K page size) : = (32G +16M)byte. 1929 */ 1930static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1931{ 1932 return 4096UL; 1933} 1934#endif 1935 1936/* 1937 * This is an integer logarithm so that shifts can be used later 1938 * to extract the more random high bits from the multiplicative 1939 * hash function before the remainder is taken. 1940 */ 1941static inline unsigned long wait_table_bits(unsigned long size) 1942{ 1943 return ffz(~size); 1944} 1945 1946#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1947 1948/* 1949 * Initially all pages are reserved - free ones are freed 1950 * up by free_all_bootmem() once the early boot process is 1951 * done. Non-atomic initialization, single-pass. 1952 */ 1953void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1954 unsigned long start_pfn) 1955{ 1956 struct page *page; 1957 unsigned long end_pfn = start_pfn + size; 1958 unsigned long pfn; 1959 1960 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1961 if (!early_pfn_valid(pfn)) 1962 continue; 1963 if (!early_pfn_in_nid(pfn, nid)) 1964 continue; 1965 page = pfn_to_page(pfn); 1966 set_page_links(page, zone, nid, pfn); 1967 init_page_count(page); 1968 reset_page_mapcount(page); 1969 SetPageReserved(page); 1970 INIT_LIST_HEAD(&page->lru); 1971#ifdef WANT_PAGE_VIRTUAL 1972 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1973 if (!is_highmem_idx(zone)) 1974 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1975#endif 1976 } 1977} 1978 1979void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1980 unsigned long size) 1981{ 1982 int order; 1983 for (order = 0; order < MAX_ORDER ; order++) { 1984 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1985 zone->free_area[order].nr_free = 0; 1986 } 1987} 1988 1989#ifndef __HAVE_ARCH_MEMMAP_INIT 1990#define memmap_init(size, nid, zone, start_pfn) \ 1991 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1992#endif 1993 1994static int __cpuinit zone_batchsize(struct zone *zone) 1995{ 1996 int batch; 1997 1998 /* 1999 * The per-cpu-pages pools are set to around 1000th of the 2000 * size of the zone. But no more than 1/2 of a meg. 2001 * 2002 * OK, so we don't know how big the cache is. So guess. 2003 */ 2004 batch = zone->present_pages / 1024; 2005 if (batch * PAGE_SIZE > 512 * 1024) 2006 batch = (512 * 1024) / PAGE_SIZE; 2007 batch /= 4; /* We effectively *= 4 below */ 2008 if (batch < 1) 2009 batch = 1; 2010 2011 /* 2012 * Clamp the batch to a 2^n - 1 value. Having a power 2013 * of 2 value was found to be more likely to have 2014 * suboptimal cache aliasing properties in some cases. 2015 * 2016 * For example if 2 tasks are alternately allocating 2017 * batches of pages, one task can end up with a lot 2018 * of pages of one half of the possible page colors 2019 * and the other with pages of the other colors. 2020 */ 2021 batch = (1 << (fls(batch + batch/2)-1)) - 1; 2022 2023 return batch; 2024} 2025 2026inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2027{ 2028 struct per_cpu_pages *pcp; 2029 2030 memset(p, 0, sizeof(*p)); 2031 2032 pcp = &p->pcp[0]; /* hot */ 2033 pcp->count = 0; 2034 pcp->high = 6 * batch; 2035 pcp->batch = max(1UL, 1 * batch); 2036 INIT_LIST_HEAD(&pcp->list); 2037 2038 pcp = &p->pcp[1]; /* cold*/ 2039 pcp->count = 0; 2040 pcp->high = 2 * batch; 2041 pcp->batch = max(1UL, batch/2); 2042 INIT_LIST_HEAD(&pcp->list); 2043} 2044 2045/* 2046 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 2047 * to the value high for the pageset p. 2048 */ 2049 2050static void setup_pagelist_highmark(struct per_cpu_pageset *p, 2051 unsigned long high) 2052{ 2053 struct per_cpu_pages *pcp; 2054 2055 pcp = &p->pcp[0]; /* hot list */ 2056 pcp->high = high; 2057 pcp->batch = max(1UL, high/4); 2058 if ((high/4) > (PAGE_SHIFT * 8)) 2059 pcp->batch = PAGE_SHIFT * 8; 2060} 2061 2062 2063#ifdef CONFIG_NUMA 2064/* 2065 * Boot pageset table. One per cpu which is going to be used for all 2066 * zones and all nodes. The parameters will be set in such a way 2067 * that an item put on a list will immediately be handed over to 2068 * the buddy list. This is safe since pageset manipulation is done 2069 * with interrupts disabled. 2070 * 2071 * Some NUMA counter updates may also be caught by the boot pagesets. 2072 * 2073 * The boot_pagesets must be kept even after bootup is complete for 2074 * unused processors and/or zones. They do play a role for bootstrapping 2075 * hotplugged processors. 2076 * 2077 * zoneinfo_show() and maybe other functions do 2078 * not check if the processor is online before following the pageset pointer. 2079 * Other parts of the kernel may not check if the zone is available. 2080 */ 2081static struct per_cpu_pageset boot_pageset[NR_CPUS]; 2082 2083/* 2084 * Dynamically allocate memory for the 2085 * per cpu pageset array in struct zone. 2086 */ 2087static int __cpuinit process_zones(int cpu) 2088{ 2089 struct zone *zone, *dzone; 2090 2091 for_each_zone(zone) { 2092 2093 if (!populated_zone(zone)) 2094 continue; 2095 2096 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2097 GFP_KERNEL, cpu_to_node(cpu)); 2098 if (!zone_pcp(zone, cpu)) 2099 goto bad; 2100 2101 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 2102 2103 if (percpu_pagelist_fraction) 2104 setup_pagelist_highmark(zone_pcp(zone, cpu), 2105 (zone->present_pages / percpu_pagelist_fraction)); 2106 } 2107 2108 return 0; 2109bad: 2110 for_each_zone(dzone) { 2111 if (dzone == zone) 2112 break; 2113 kfree(zone_pcp(dzone, cpu)); 2114 zone_pcp(dzone, cpu) = NULL; 2115 } 2116 return -ENOMEM; 2117} 2118 2119static inline void free_zone_pagesets(int cpu) 2120{ 2121 struct zone *zone; 2122 2123 for_each_zone(zone) { 2124 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 2125 2126 /* Free per_cpu_pageset if it is slab allocated */ 2127 if (pset != &boot_pageset[cpu]) 2128 kfree(pset); 2129 zone_pcp(zone, cpu) = NULL; 2130 } 2131} 2132 2133static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 2134 unsigned long action, 2135 void *hcpu) 2136{ 2137 int cpu = (long)hcpu; 2138 int ret = NOTIFY_OK; 2139 2140 switch (action) { 2141 case CPU_UP_PREPARE: 2142 if (process_zones(cpu)) 2143 ret = NOTIFY_BAD; 2144 break; 2145 case CPU_UP_CANCELED: 2146 case CPU_DEAD: 2147 free_zone_pagesets(cpu); 2148 break; 2149 default: 2150 break; 2151 } 2152 return ret; 2153} 2154 2155static struct notifier_block __cpuinitdata pageset_notifier = 2156 { &pageset_cpuup_callback, NULL, 0 }; 2157 2158void __init setup_per_cpu_pageset(void) 2159{ 2160 int err; 2161 2162 /* Initialize per_cpu_pageset for cpu 0. 2163 * A cpuup callback will do this for every cpu 2164 * as it comes online 2165 */ 2166 err = process_zones(smp_processor_id()); 2167 BUG_ON(err); 2168 register_cpu_notifier(&pageset_notifier); 2169} 2170 2171#endif 2172 2173static __meminit 2174int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2175{ 2176 int i; 2177 struct pglist_data *pgdat = zone->zone_pgdat; 2178 size_t alloc_size; 2179 2180 /* 2181 * The per-page waitqueue mechanism uses hashed waitqueues 2182 * per zone. 2183 */ 2184 zone->wait_table_hash_nr_entries = 2185 wait_table_hash_nr_entries(zone_size_pages); 2186 zone->wait_table_bits = 2187 wait_table_bits(zone->wait_table_hash_nr_entries); 2188 alloc_size = zone->wait_table_hash_nr_entries 2189 * sizeof(wait_queue_head_t); 2190 2191 if (system_state == SYSTEM_BOOTING) { 2192 zone->wait_table = (wait_queue_head_t *) 2193 alloc_bootmem_node(pgdat, alloc_size); 2194 } else { 2195 /* 2196 * This case means that a zone whose size was 0 gets new memory 2197 * via memory hot-add. 2198 * But it may be the case that a new node was hot-added. In 2199 * this case vmalloc() will not be able to use this new node's 2200 * memory - this wait_table must be initialized to use this new 2201 * node itself as well. 2202 * To use this new node's memory, further consideration will be 2203 * necessary. 2204 */ 2205 zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); 2206 } 2207 if (!zone->wait_table) 2208 return -ENOMEM; 2209 2210 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 2211 init_waitqueue_head(zone->wait_table + i); 2212 2213 return 0; 2214} 2215 2216static __meminit void zone_pcp_init(struct zone *zone) 2217{ 2218 int cpu; 2219 unsigned long batch = zone_batchsize(zone); 2220 2221 for (cpu = 0; cpu < NR_CPUS; cpu++) { 2222#ifdef CONFIG_NUMA 2223 /* Early boot. Slab allocator not functional yet */ 2224 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 2225 setup_pageset(&boot_pageset[cpu],0); 2226#else 2227 setup_pageset(zone_pcp(zone,cpu), batch); 2228#endif 2229 } 2230 if (zone->present_pages) 2231 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 2232 zone->name, zone->present_pages, batch); 2233} 2234 2235__meminit int init_currently_empty_zone(struct zone *zone, 2236 unsigned long zone_start_pfn, 2237 unsigned long size) 2238{ 2239 struct pglist_data *pgdat = zone->zone_pgdat; 2240 int ret; 2241 ret = zone_wait_table_init(zone, size); 2242 if (ret) 2243 return ret; 2244 pgdat->nr_zones = zone_idx(zone) + 1; 2245 2246 zone->zone_start_pfn = zone_start_pfn; 2247 2248 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2249 2250 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 2251 2252 return 0; 2253} 2254 2255#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2256/* 2257 * Basic iterator support. Return the first range of PFNs for a node 2258 * Note: nid == MAX_NUMNODES returns first region regardless of node 2259 */ 2260static int __init first_active_region_index_in_nid(int nid) 2261{ 2262 int i; 2263 2264 for (i = 0; i < nr_nodemap_entries; i++) 2265 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 2266 return i; 2267 2268 return -1; 2269} 2270 2271/* 2272 * Basic iterator support. Return the next active range of PFNs for a node 2273 * Note: nid == MAX_NUMNODES returns next region regardles of node 2274 */ 2275static int __init next_active_region_index_in_nid(int index, int nid) 2276{ 2277 for (index = index + 1; index < nr_nodemap_entries; index++) 2278 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2279 return index; 2280 2281 return -1; 2282} 2283 2284#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 2285/* 2286 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 2287 * Architectures may implement their own version but if add_active_range() 2288 * was used and there are no special requirements, this is a convenient 2289 * alternative 2290 */ 2291int __init early_pfn_to_nid(unsigned long pfn) 2292{ 2293 int i; 2294 2295 for (i = 0; i < nr_nodemap_entries; i++) { 2296 unsigned long start_pfn = early_node_map[i].start_pfn; 2297 unsigned long end_pfn = early_node_map[i].end_pfn; 2298 2299 if (start_pfn <= pfn && pfn < end_pfn) 2300 return early_node_map[i].nid; 2301 } 2302 2303 return 0; 2304} 2305#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 2306 2307/* Basic iterator support to walk early_node_map[] */ 2308#define for_each_active_range_index_in_nid(i, nid) \ 2309 for (i = first_active_region_index_in_nid(nid); i != -1; \ 2310 i = next_active_region_index_in_nid(i, nid)) 2311 2312/** 2313 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2314 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 2315 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 2316 * 2317 * If an architecture guarantees that all ranges registered with 2318 * add_active_ranges() contain no holes and may be freed, this 2319 * this function may be used instead of calling free_bootmem() manually. 2320 */ 2321void __init free_bootmem_with_active_regions(int nid, 2322 unsigned long max_low_pfn) 2323{ 2324 int i; 2325 2326 for_each_active_range_index_in_nid(i, nid) { 2327 unsigned long size_pages = 0; 2328 unsigned long end_pfn = early_node_map[i].end_pfn; 2329 2330 if (early_node_map[i].start_pfn >= max_low_pfn) 2331 continue; 2332 2333 if (end_pfn > max_low_pfn) 2334 end_pfn = max_low_pfn; 2335 2336 size_pages = end_pfn - early_node_map[i].start_pfn; 2337 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 2338 PFN_PHYS(early_node_map[i].start_pfn), 2339 size_pages << PAGE_SHIFT); 2340 } 2341} 2342 2343/** 2344 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2345 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2346 * 2347 * If an architecture guarantees that all ranges registered with 2348 * add_active_ranges() contain no holes and may be freed, this 2349 * function may be used instead of calling memory_present() manually. 2350 */ 2351void __init sparse_memory_present_with_active_regions(int nid) 2352{ 2353 int i; 2354 2355 for_each_active_range_index_in_nid(i, nid) 2356 memory_present(early_node_map[i].nid, 2357 early_node_map[i].start_pfn, 2358 early_node_map[i].end_pfn); 2359} 2360 2361/** 2362 * push_node_boundaries - Push node boundaries to at least the requested boundary 2363 * @nid: The nid of the node to push the boundary for 2364 * @start_pfn: The start pfn of the node 2365 * @end_pfn: The end pfn of the node 2366 * 2367 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd 2368 * time. Specifically, on x86_64, SRAT will report ranges that can potentially 2369 * be hotplugged even though no physical memory exists. This function allows 2370 * an arch to push out the node boundaries so mem_map is allocated that can 2371 * be used later. 2372 */ 2373#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2374void __init push_node_boundaries(unsigned int nid, 2375 unsigned long start_pfn, unsigned long end_pfn) 2376{ 2377 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", 2378 nid, start_pfn, end_pfn); 2379 2380 /* Initialise the boundary for this node if necessary */ 2381 if (node_boundary_end_pfn[nid] == 0) 2382 node_boundary_start_pfn[nid] = -1UL; 2383 2384 /* Update the boundaries */ 2385 if (node_boundary_start_pfn[nid] > start_pfn) 2386 node_boundary_start_pfn[nid] = start_pfn; 2387 if (node_boundary_end_pfn[nid] < end_pfn) 2388 node_boundary_end_pfn[nid] = end_pfn; 2389} 2390 2391/* If necessary, push the node boundary out for reserve hotadd */ 2392static void __init account_node_boundary(unsigned int nid, 2393 unsigned long *start_pfn, unsigned long *end_pfn) 2394{ 2395 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", 2396 nid, *start_pfn, *end_pfn); 2397 2398 /* Return if boundary information has not been provided */ 2399 if (node_boundary_end_pfn[nid] == 0) 2400 return; 2401 2402 /* Check the boundaries and update if necessary */ 2403 if (node_boundary_start_pfn[nid] < *start_pfn) 2404 *start_pfn = node_boundary_start_pfn[nid]; 2405 if (node_boundary_end_pfn[nid] > *end_pfn) 2406 *end_pfn = node_boundary_end_pfn[nid]; 2407} 2408#else 2409void __init push_node_boundaries(unsigned int nid, 2410 unsigned long start_pfn, unsigned long end_pfn) {} 2411 2412static void __init account_node_boundary(unsigned int nid, 2413 unsigned long *start_pfn, unsigned long *end_pfn) {} 2414#endif 2415 2416 2417/** 2418 * get_pfn_range_for_nid - Return the start and end page frames for a node 2419 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 2420 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 2421 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 2422 * 2423 * It returns the start and end page frame of a node based on information 2424 * provided by an arch calling add_active_range(). If called for a node 2425 * with no available memory, a warning is printed and the start and end 2426 * PFNs will be 0. 2427 */ 2428void __init get_pfn_range_for_nid(unsigned int nid, 2429 unsigned long *start_pfn, unsigned long *end_pfn) 2430{ 2431 int i; 2432 *start_pfn = -1UL; 2433 *end_pfn = 0; 2434 2435 for_each_active_range_index_in_nid(i, nid) { 2436 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 2437 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 2438 } 2439 2440 if (*start_pfn == -1UL) { 2441 printk(KERN_WARNING "Node %u active with no memory\n", nid); 2442 *start_pfn = 0; 2443 } 2444 2445 /* Push the node boundaries out if requested */ 2446 account_node_boundary(nid, start_pfn, end_pfn); 2447} 2448 2449/* 2450 * Return the number of pages a zone spans in a node, including holes 2451 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 2452 */ 2453unsigned long __init zone_spanned_pages_in_node(int nid, 2454 unsigned long zone_type, 2455 unsigned long *ignored) 2456{ 2457 unsigned long node_start_pfn, node_end_pfn; 2458 unsigned long zone_start_pfn, zone_end_pfn; 2459 2460 /* Get the start and end of the node and zone */ 2461 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2462 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 2463 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 2464 2465 /* Check that this node has pages within the zone's required range */ 2466 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 2467 return 0; 2468 2469 /* Move the zone boundaries inside the node if necessary */ 2470 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 2471 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 2472 2473 /* Return the spanned pages */ 2474 return zone_end_pfn - zone_start_pfn; 2475} 2476 2477/* 2478 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 2479 * then all holes in the requested range will be accounted for. 2480 */ 2481unsigned long __init __absent_pages_in_range(int nid, 2482 unsigned long range_start_pfn, 2483 unsigned long range_end_pfn) 2484{ 2485 int i = 0; 2486 unsigned long prev_end_pfn = 0, hole_pages = 0; 2487 unsigned long start_pfn; 2488 2489 /* Find the end_pfn of the first active range of pfns in the node */ 2490 i = first_active_region_index_in_nid(nid); 2491 if (i == -1) 2492 return 0; 2493 2494 /* Account for ranges before physical memory on this node */ 2495 if (early_node_map[i].start_pfn > range_start_pfn) 2496 hole_pages = early_node_map[i].start_pfn - range_start_pfn; 2497 2498 prev_end_pfn = early_node_map[i].start_pfn; 2499 2500 /* Find all holes for the zone within the node */ 2501 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 2502 2503 /* No need to continue if prev_end_pfn is outside the zone */ 2504 if (prev_end_pfn >= range_end_pfn) 2505 break; 2506 2507 /* Make sure the end of the zone is not within the hole */ 2508 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 2509 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 2510 2511 /* Update the hole size cound and move on */ 2512 if (start_pfn > range_start_pfn) { 2513 BUG_ON(prev_end_pfn > start_pfn); 2514 hole_pages += start_pfn - prev_end_pfn; 2515 } 2516 prev_end_pfn = early_node_map[i].end_pfn; 2517 } 2518 2519 /* Account for ranges past physical memory on this node */ 2520 if (range_end_pfn > prev_end_pfn) 2521 hole_pages += range_end_pfn - 2522 max(range_start_pfn, prev_end_pfn); 2523 2524 return hole_pages; 2525} 2526 2527/** 2528 * absent_pages_in_range - Return number of page frames in holes within a range 2529 * @start_pfn: The start PFN to start searching for holes 2530 * @end_pfn: The end PFN to stop searching for holes 2531 * 2532 * It returns the number of pages frames in memory holes within a range. 2533 */ 2534unsigned long __init absent_pages_in_range(unsigned long start_pfn, 2535 unsigned long end_pfn) 2536{ 2537 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 2538} 2539 2540/* Return the number of page frames in holes in a zone on a node */ 2541unsigned long __init zone_absent_pages_in_node(int nid, 2542 unsigned long zone_type, 2543 unsigned long *ignored) 2544{ 2545 unsigned long node_start_pfn, node_end_pfn; 2546 unsigned long zone_start_pfn, zone_end_pfn; 2547 2548 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2549 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 2550 node_start_pfn); 2551 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 2552 node_end_pfn); 2553 2554 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 2555} 2556 2557#else 2558static inline unsigned long zone_spanned_pages_in_node(int nid, 2559 unsigned long zone_type, 2560 unsigned long *zones_size) 2561{ 2562 return zones_size[zone_type]; 2563} 2564 2565static inline unsigned long zone_absent_pages_in_node(int nid, 2566 unsigned long zone_type, 2567 unsigned long *zholes_size) 2568{ 2569 if (!zholes_size) 2570 return 0; 2571 2572 return zholes_size[zone_type]; 2573} 2574 2575#endif 2576 2577static void __init calculate_node_totalpages(struct pglist_data *pgdat, 2578 unsigned long *zones_size, unsigned long *zholes_size) 2579{ 2580 unsigned long realtotalpages, totalpages = 0; 2581 enum zone_type i; 2582 2583 for (i = 0; i < MAX_NR_ZONES; i++) 2584 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 2585 zones_size); 2586 pgdat->node_spanned_pages = totalpages; 2587 2588 realtotalpages = totalpages; 2589 for (i = 0; i < MAX_NR_ZONES; i++) 2590 realtotalpages -= 2591 zone_absent_pages_in_node(pgdat->node_id, i, 2592 zholes_size); 2593 pgdat->node_present_pages = realtotalpages; 2594 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 2595 realtotalpages); 2596} 2597 2598/* 2599 * Set up the zone data structures: 2600 * - mark all pages reserved 2601 * - mark all memory queues empty 2602 * - clear the memory bitmaps 2603 */ 2604static void __meminit free_area_init_core(struct pglist_data *pgdat, 2605 unsigned long *zones_size, unsigned long *zholes_size) 2606{ 2607 enum zone_type j; 2608 int nid = pgdat->node_id; 2609 unsigned long zone_start_pfn = pgdat->node_start_pfn; 2610 int ret; 2611 2612 pgdat_resize_init(pgdat); 2613 pgdat->nr_zones = 0; 2614 init_waitqueue_head(&pgdat->kswapd_wait); 2615 pgdat->kswapd_max_order = 0; 2616 2617 for (j = 0; j < MAX_NR_ZONES; j++) { 2618 struct zone *zone = pgdat->node_zones + j; 2619 unsigned long size, realsize, memmap_pages; 2620 2621 size = zone_spanned_pages_in_node(nid, j, zones_size); 2622 realsize = size - zone_absent_pages_in_node(nid, j, 2623 zholes_size); 2624 2625 /* 2626 * Adjust realsize so that it accounts for how much memory 2627 * is used by this zone for memmap. This affects the watermark 2628 * and per-cpu initialisations 2629 */ 2630 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; 2631 if (realsize >= memmap_pages) { 2632 realsize -= memmap_pages; 2633 printk(KERN_DEBUG 2634 " %s zone: %lu pages used for memmap\n", 2635 zone_names[j], memmap_pages); 2636 } else 2637 printk(KERN_WARNING 2638 " %s zone: %lu pages exceeds realsize %lu\n", 2639 zone_names[j], memmap_pages, realsize); 2640 2641 /* Account for reserved DMA pages */ 2642 if (j == ZONE_DMA && realsize > dma_reserve) { 2643 realsize -= dma_reserve; 2644 printk(KERN_DEBUG " DMA zone: %lu pages reserved\n", 2645 dma_reserve); 2646 } 2647 2648 if (!is_highmem_idx(j)) 2649 nr_kernel_pages += realsize; 2650 nr_all_pages += realsize; 2651 2652 zone->spanned_pages = size; 2653 zone->present_pages = realsize; 2654#ifdef CONFIG_NUMA 2655 zone->node = nid; 2656 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 2657 / 100; 2658 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 2659#endif 2660 zone->name = zone_names[j]; 2661 spin_lock_init(&zone->lock); 2662 spin_lock_init(&zone->lru_lock); 2663 zone_seqlock_init(zone); 2664 zone->zone_pgdat = pgdat; 2665 zone->free_pages = 0; 2666 2667 zone->prev_priority = DEF_PRIORITY; 2668 2669 zone_pcp_init(zone); 2670 INIT_LIST_HEAD(&zone->active_list); 2671 INIT_LIST_HEAD(&zone->inactive_list); 2672 zone->nr_scan_active = 0; 2673 zone->nr_scan_inactive = 0; 2674 zone->nr_active = 0; 2675 zone->nr_inactive = 0; 2676 zap_zone_vm_stats(zone); 2677 atomic_set(&zone->reclaim_in_progress, 0); 2678 if (!size) 2679 continue; 2680 2681 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 2682 BUG_ON(ret); 2683 zone_start_pfn += size; 2684 } 2685} 2686 2687static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2688{ 2689 /* Skip empty nodes */ 2690 if (!pgdat->node_spanned_pages) 2691 return; 2692 2693#ifdef CONFIG_FLAT_NODE_MEM_MAP 2694 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2695 if (!pgdat->node_mem_map) { 2696 unsigned long size, start, end; 2697 struct page *map; 2698 2699 /* 2700 * The zone's endpoints aren't required to be MAX_ORDER 2701 * aligned but the node_mem_map endpoints must be in order 2702 * for the buddy allocator to function correctly. 2703 */ 2704 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 2705 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 2706 end = ALIGN(end, MAX_ORDER_NR_PAGES); 2707 size = (end - start) * sizeof(struct page); 2708 map = alloc_remap(pgdat->node_id, size); 2709 if (!map) 2710 map = alloc_bootmem_node(pgdat, size); 2711 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 2712 } 2713#ifdef CONFIG_FLATMEM 2714 /* 2715 * With no DISCONTIG, the global mem_map is just set as node 0's 2716 */ 2717 if (pgdat == NODE_DATA(0)) { 2718 mem_map = NODE_DATA(0)->node_mem_map; 2719#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2720 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 2721 mem_map -= pgdat->node_start_pfn; 2722#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2723 } 2724#endif 2725#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2726} 2727 2728void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, 2729 unsigned long *zones_size, unsigned long node_start_pfn, 2730 unsigned long *zholes_size) 2731{ 2732 pgdat->node_id = nid; 2733 pgdat->node_start_pfn = node_start_pfn; 2734 calculate_node_totalpages(pgdat, zones_size, zholes_size); 2735 2736 alloc_node_mem_map(pgdat); 2737 2738 free_area_init_core(pgdat, zones_size, zholes_size); 2739} 2740 2741#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2742/** 2743 * add_active_range - Register a range of PFNs backed by physical memory 2744 * @nid: The node ID the range resides on 2745 * @start_pfn: The start PFN of the available physical memory 2746 * @end_pfn: The end PFN of the available physical memory 2747 * 2748 * These ranges are stored in an early_node_map[] and later used by 2749 * free_area_init_nodes() to calculate zone sizes and holes. If the 2750 * range spans a memory hole, it is up to the architecture to ensure 2751 * the memory is not freed by the bootmem allocator. If possible 2752 * the range being registered will be merged with existing ranges. 2753 */ 2754void __init add_active_range(unsigned int nid, unsigned long start_pfn, 2755 unsigned long end_pfn) 2756{ 2757 int i; 2758 2759 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 2760 "%d entries of %d used\n", 2761 nid, start_pfn, end_pfn, 2762 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 2763 2764 /* Merge with existing active regions if possible */ 2765 for (i = 0; i < nr_nodemap_entries; i++) { 2766 if (early_node_map[i].nid != nid) 2767 continue; 2768 2769 /* Skip if an existing region covers this new one */ 2770 if (start_pfn >= early_node_map[i].start_pfn && 2771 end_pfn <= early_node_map[i].end_pfn) 2772 return; 2773 2774 /* Merge forward if suitable */ 2775 if (start_pfn <= early_node_map[i].end_pfn && 2776 end_pfn > early_node_map[i].end_pfn) { 2777 early_node_map[i].end_pfn = end_pfn; 2778 return; 2779 } 2780 2781 /* Merge backward if suitable */ 2782 if (start_pfn < early_node_map[i].end_pfn && 2783 end_pfn >= early_node_map[i].start_pfn) { 2784 early_node_map[i].start_pfn = start_pfn; 2785 return; 2786 } 2787 } 2788 2789 /* Check that early_node_map is large enough */ 2790 if (i >= MAX_ACTIVE_REGIONS) { 2791 printk(KERN_CRIT "More than %d memory regions, truncating\n", 2792 MAX_ACTIVE_REGIONS); 2793 return; 2794 } 2795 2796 early_node_map[i].nid = nid; 2797 early_node_map[i].start_pfn = start_pfn; 2798 early_node_map[i].end_pfn = end_pfn; 2799 nr_nodemap_entries = i + 1; 2800} 2801 2802/** 2803 * shrink_active_range - Shrink an existing registered range of PFNs 2804 * @nid: The node id the range is on that should be shrunk 2805 * @old_end_pfn: The old end PFN of the range 2806 * @new_end_pfn: The new PFN of the range 2807 * 2808 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 2809 * The map is kept at the end physical page range that has already been 2810 * registered with add_active_range(). This function allows an arch to shrink 2811 * an existing registered range. 2812 */ 2813void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 2814 unsigned long new_end_pfn) 2815{ 2816 int i; 2817 2818 /* Find the old active region end and shrink */ 2819 for_each_active_range_index_in_nid(i, nid) 2820 if (early_node_map[i].end_pfn == old_end_pfn) { 2821 early_node_map[i].end_pfn = new_end_pfn; 2822 break; 2823 } 2824} 2825 2826/** 2827 * remove_all_active_ranges - Remove all currently registered regions 2828 * 2829 * During discovery, it may be found that a table like SRAT is invalid 2830 * and an alternative discovery method must be used. This function removes 2831 * all currently registered regions. 2832 */ 2833void __init remove_all_active_ranges(void) 2834{ 2835 memset(early_node_map, 0, sizeof(early_node_map)); 2836 nr_nodemap_entries = 0; 2837#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2838 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); 2839 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); 2840#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 2841} 2842 2843/* Compare two active node_active_regions */ 2844static int __init cmp_node_active_region(const void *a, const void *b) 2845{ 2846 struct node_active_region *arange = (struct node_active_region *)a; 2847 struct node_active_region *brange = (struct node_active_region *)b; 2848 2849 /* Done this way to avoid overflows */ 2850 if (arange->start_pfn > brange->start_pfn) 2851 return 1; 2852 if (arange->start_pfn < brange->start_pfn) 2853 return -1; 2854 2855 return 0; 2856} 2857 2858/* sort the node_map by start_pfn */ 2859static void __init sort_node_map(void) 2860{ 2861 sort(early_node_map, (size_t)nr_nodemap_entries, 2862 sizeof(struct node_active_region), 2863 cmp_node_active_region, NULL); 2864} 2865 2866/* Find the lowest pfn for a node. This depends on a sorted early_node_map */ 2867unsigned long __init find_min_pfn_for_node(unsigned long nid) 2868{ 2869 int i; 2870 2871 /* Regions in the early_node_map can be in any order */ 2872 sort_node_map(); 2873 2874 /* Assuming a sorted map, the first range found has the starting pfn */ 2875 for_each_active_range_index_in_nid(i, nid) 2876 return early_node_map[i].start_pfn; 2877 2878 printk(KERN_WARNING "Could not find start_pfn for node %lu\n", nid); 2879 return 0; 2880} 2881 2882/** 2883 * find_min_pfn_with_active_regions - Find the minimum PFN registered 2884 * 2885 * It returns the minimum PFN based on information provided via 2886 * add_active_range(). 2887 */ 2888unsigned long __init find_min_pfn_with_active_regions(void) 2889{ 2890 return find_min_pfn_for_node(MAX_NUMNODES); 2891} 2892 2893/** 2894 * find_max_pfn_with_active_regions - Find the maximum PFN registered 2895 * 2896 * It returns the maximum PFN based on information provided via 2897 * add_active_range(). 2898 */ 2899unsigned long __init find_max_pfn_with_active_regions(void) 2900{ 2901 int i; 2902 unsigned long max_pfn = 0; 2903 2904 for (i = 0; i < nr_nodemap_entries; i++) 2905 max_pfn = max(max_pfn, early_node_map[i].end_pfn); 2906 2907 return max_pfn; 2908} 2909 2910/** 2911 * free_area_init_nodes - Initialise all pg_data_t and zone data 2912 * @max_zone_pfn: an array of max PFNs for each zone 2913 * 2914 * This will call free_area_init_node() for each active node in the system. 2915 * Using the page ranges provided by add_active_range(), the size of each 2916 * zone in each node and their holes is calculated. If the maximum PFN 2917 * between two adjacent zones match, it is assumed that the zone is empty. 2918 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 2919 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 2920 * starts where the previous one ended. For example, ZONE_DMA32 starts 2921 * at arch_max_dma_pfn. 2922 */ 2923void __init free_area_init_nodes(unsigned long *max_zone_pfn) 2924{ 2925 unsigned long nid; 2926 enum zone_type i; 2927 2928 /* Record where the zone boundaries are */ 2929 memset(arch_zone_lowest_possible_pfn, 0, 2930 sizeof(arch_zone_lowest_possible_pfn)); 2931 memset(arch_zone_highest_possible_pfn, 0, 2932 sizeof(arch_zone_highest_possible_pfn)); 2933 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 2934 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 2935 for (i = 1; i < MAX_NR_ZONES; i++) { 2936 arch_zone_lowest_possible_pfn[i] = 2937 arch_zone_highest_possible_pfn[i-1]; 2938 arch_zone_highest_possible_pfn[i] = 2939 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 2940 } 2941 2942 /* Print out the zone ranges */ 2943 printk("Zone PFN ranges:\n"); 2944 for (i = 0; i < MAX_NR_ZONES; i++) 2945 printk(" %-8s %8lu -> %8lu\n", 2946 zone_names[i], 2947 arch_zone_lowest_possible_pfn[i], 2948 arch_zone_highest_possible_pfn[i]); 2949 2950 /* Print out the early_node_map[] */ 2951 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 2952 for (i = 0; i < nr_nodemap_entries; i++) 2953 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 2954 early_node_map[i].start_pfn, 2955 early_node_map[i].end_pfn); 2956 2957 /* Initialise every node */ 2958 for_each_online_node(nid) { 2959 pg_data_t *pgdat = NODE_DATA(nid); 2960 free_area_init_node(nid, pgdat, NULL, 2961 find_min_pfn_for_node(nid), NULL); 2962 } 2963} 2964#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2965 2966/** 2967 * set_dma_reserve - set the specified number of pages reserved in the first zone 2968 * @new_dma_reserve: The number of pages to mark reserved 2969 * 2970 * The per-cpu batchsize and zone watermarks are determined by present_pages. 2971 * In the DMA zone, a significant percentage may be consumed by kernel image 2972 * and other unfreeable allocations which can skew the watermarks badly. This 2973 * function may optionally be used to account for unfreeable pages in the 2974 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 2975 * smaller per-cpu batchsize. 2976 */ 2977void __init set_dma_reserve(unsigned long new_dma_reserve) 2978{ 2979 dma_reserve = new_dma_reserve; 2980} 2981 2982#ifndef CONFIG_NEED_MULTIPLE_NODES 2983static bootmem_data_t contig_bootmem_data; 2984struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2985 2986EXPORT_SYMBOL(contig_page_data); 2987#endif 2988 2989void __init free_area_init(unsigned long *zones_size) 2990{ 2991 free_area_init_node(0, NODE_DATA(0), zones_size, 2992 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2993} 2994 2995static int page_alloc_cpu_notify(struct notifier_block *self, 2996 unsigned long action, void *hcpu) 2997{ 2998 int cpu = (unsigned long)hcpu; 2999 3000 if (action == CPU_DEAD) { 3001 local_irq_disable(); 3002 __drain_pages(cpu); 3003 vm_events_fold_cpu(cpu); 3004 local_irq_enable(); 3005 refresh_cpu_vm_stats(cpu); 3006 } 3007 return NOTIFY_OK; 3008} 3009 3010void __init page_alloc_init(void) 3011{ 3012 hotcpu_notifier(page_alloc_cpu_notify, 0); 3013} 3014 3015/* 3016 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 3017 * or min_free_kbytes changes. 3018 */ 3019static void calculate_totalreserve_pages(void) 3020{ 3021 struct pglist_data *pgdat; 3022 unsigned long reserve_pages = 0; 3023 enum zone_type i, j; 3024 3025 for_each_online_pgdat(pgdat) { 3026 for (i = 0; i < MAX_NR_ZONES; i++) { 3027 struct zone *zone = pgdat->node_zones + i; 3028 unsigned long max = 0; 3029 3030 /* Find valid and maximum lowmem_reserve in the zone */ 3031 for (j = i; j < MAX_NR_ZONES; j++) { 3032 if (zone->lowmem_reserve[j] > max) 3033 max = zone->lowmem_reserve[j]; 3034 } 3035 3036 /* we treat pages_high as reserved pages. */ 3037 max += zone->pages_high; 3038 3039 if (max > zone->present_pages) 3040 max = zone->present_pages; 3041 reserve_pages += max; 3042 } 3043 } 3044 totalreserve_pages = reserve_pages; 3045} 3046 3047/* 3048 * setup_per_zone_lowmem_reserve - called whenever 3049 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 3050 * has a correct pages reserved value, so an adequate number of 3051 * pages are left in the zone after a successful __alloc_pages(). 3052 */ 3053static void setup_per_zone_lowmem_reserve(void) 3054{ 3055 struct pglist_data *pgdat; 3056 enum zone_type j, idx; 3057 3058 for_each_online_pgdat(pgdat) { 3059 for (j = 0; j < MAX_NR_ZONES; j++) { 3060 struct zone *zone = pgdat->node_zones + j; 3061 unsigned long present_pages = zone->present_pages; 3062 3063 zone->lowmem_reserve[j] = 0; 3064 3065 idx = j; 3066 while (idx) { 3067 struct zone *lower_zone; 3068 3069 idx--; 3070 3071 if (sysctl_lowmem_reserve_ratio[idx] < 1) 3072 sysctl_lowmem_reserve_ratio[idx] = 1; 3073 3074 lower_zone = pgdat->node_zones + idx; 3075 lower_zone->lowmem_reserve[j] = present_pages / 3076 sysctl_lowmem_reserve_ratio[idx]; 3077 present_pages += lower_zone->present_pages; 3078 } 3079 } 3080 } 3081 3082 /* update totalreserve_pages */ 3083 calculate_totalreserve_pages(); 3084} 3085 3086/** 3087 * setup_per_zone_pages_min - called when min_free_kbytes changes. 3088 * 3089 * Ensures that the pages_{min,low,high} values for each zone are set correctly 3090 * with respect to min_free_kbytes. 3091 */ 3092void setup_per_zone_pages_min(void) 3093{ 3094 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 3095 unsigned long lowmem_pages = 0; 3096 struct zone *zone; 3097 unsigned long flags; 3098 3099 /* Calculate total number of !ZONE_HIGHMEM pages */ 3100 for_each_zone(zone) { 3101 if (!is_highmem(zone)) 3102 lowmem_pages += zone->present_pages; 3103 } 3104 3105 for_each_zone(zone) { 3106 u64 tmp; 3107 3108 spin_lock_irqsave(&zone->lru_lock, flags); 3109 tmp = (u64)pages_min * zone->present_pages; 3110 do_div(tmp, lowmem_pages); 3111 if (is_highmem(zone)) { 3112 /* 3113 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 3114 * need highmem pages, so cap pages_min to a small 3115 * value here. 3116 * 3117 * The (pages_high-pages_low) and (pages_low-pages_min) 3118 * deltas controls asynch page reclaim, and so should 3119 * not be capped for highmem. 3120 */ 3121 int min_pages; 3122 3123 min_pages = zone->present_pages / 1024; 3124 if (min_pages < SWAP_CLUSTER_MAX) 3125 min_pages = SWAP_CLUSTER_MAX; 3126 if (min_pages > 128) 3127 min_pages = 128; 3128 zone->pages_min = min_pages; 3129 } else { 3130 /* 3131 * If it's a lowmem zone, reserve a number of pages 3132 * proportionate to the zone's size. 3133 */ 3134 zone->pages_min = tmp; 3135 } 3136 3137 zone->pages_low = zone->pages_min + (tmp >> 2); 3138 zone->pages_high = zone->pages_min + (tmp >> 1); 3139 spin_unlock_irqrestore(&zone->lru_lock, flags); 3140 } 3141 3142 /* update totalreserve_pages */ 3143 calculate_totalreserve_pages(); 3144} 3145 3146/* 3147 * Initialise min_free_kbytes. 3148 * 3149 * For small machines we want it small (128k min). For large machines 3150 * we want it large (64MB max). But it is not linear, because network 3151 * bandwidth does not increase linearly with machine size. We use 3152 * 3153 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 3154 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 3155 * 3156 * which yields 3157 * 3158 * 16MB: 512k 3159 * 32MB: 724k 3160 * 64MB: 1024k 3161 * 128MB: 1448k 3162 * 256MB: 2048k 3163 * 512MB: 2896k 3164 * 1024MB: 4096k 3165 * 2048MB: 5792k 3166 * 4096MB: 8192k 3167 * 8192MB: 11584k 3168 * 16384MB: 16384k 3169 */ 3170static int __init init_per_zone_pages_min(void) 3171{ 3172 unsigned long lowmem_kbytes; 3173 3174 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 3175 3176 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 3177 if (min_free_kbytes < 128) 3178 min_free_kbytes = 128; 3179 if (min_free_kbytes > 65536) 3180 min_free_kbytes = 65536; 3181 setup_per_zone_pages_min(); 3182 setup_per_zone_lowmem_reserve(); 3183 return 0; 3184} 3185module_init(init_per_zone_pages_min) 3186 3187/* 3188 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 3189 * that we can call two helper functions whenever min_free_kbytes 3190 * changes. 3191 */ 3192int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 3193 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3194{ 3195 proc_dointvec(table, write, file, buffer, length, ppos); 3196 setup_per_zone_pages_min(); 3197 return 0; 3198} 3199 3200#ifdef CONFIG_NUMA 3201int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 3202 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3203{ 3204 struct zone *zone; 3205 int rc; 3206 3207 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3208 if (rc) 3209 return rc; 3210 3211 for_each_zone(zone) 3212 zone->min_unmapped_pages = (zone->present_pages * 3213 sysctl_min_unmapped_ratio) / 100; 3214 return 0; 3215} 3216 3217int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 3218 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3219{ 3220 struct zone *zone; 3221 int rc; 3222 3223 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3224 if (rc) 3225 return rc; 3226 3227 for_each_zone(zone) 3228 zone->min_slab_pages = (zone->present_pages * 3229 sysctl_min_slab_ratio) / 100; 3230 return 0; 3231} 3232#endif 3233 3234/* 3235 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 3236 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 3237 * whenever sysctl_lowmem_reserve_ratio changes. 3238 * 3239 * The reserve ratio obviously has absolutely no relation with the 3240 * pages_min watermarks. The lowmem reserve ratio can only make sense 3241 * if in function of the boot time zone sizes. 3242 */ 3243int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 3244 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3245{ 3246 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3247 setup_per_zone_lowmem_reserve(); 3248 return 0; 3249} 3250 3251/* 3252 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 3253 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 3254 * can have before it gets flushed back to buddy allocator. 3255 */ 3256 3257int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 3258 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3259{ 3260 struct zone *zone; 3261 unsigned int cpu; 3262 int ret; 3263 3264 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3265 if (!write || (ret == -EINVAL)) 3266 return ret; 3267 for_each_zone(zone) { 3268 for_each_online_cpu(cpu) { 3269 unsigned long high; 3270 high = zone->present_pages / percpu_pagelist_fraction; 3271 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 3272 } 3273 } 3274 return 0; 3275} 3276 3277int hashdist = HASHDIST_DEFAULT; 3278 3279#ifdef CONFIG_NUMA 3280static int __init set_hashdist(char *str) 3281{ 3282 if (!str) 3283 return 0; 3284 hashdist = simple_strtoul(str, &str, 0); 3285 return 1; 3286} 3287__setup("hashdist=", set_hashdist); 3288#endif 3289 3290/* 3291 * allocate a large system hash table from bootmem 3292 * - it is assumed that the hash table must contain an exact power-of-2 3293 * quantity of entries 3294 * - limit is the number of hash buckets, not the total allocation size 3295 */ 3296void *__init alloc_large_system_hash(const char *tablename, 3297 unsigned long bucketsize, 3298 unsigned long numentries, 3299 int scale, 3300 int flags, 3301 unsigned int *_hash_shift, 3302 unsigned int *_hash_mask, 3303 unsigned long limit) 3304{ 3305 unsigned long long max = limit; 3306 unsigned long log2qty, size; 3307 void *table = NULL; 3308 3309 /* allow the kernel cmdline to have a say */ 3310 if (!numentries) { 3311 /* round applicable memory size up to nearest megabyte */ 3312 numentries = nr_kernel_pages; 3313 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 3314 numentries >>= 20 - PAGE_SHIFT; 3315 numentries <<= 20 - PAGE_SHIFT; 3316 3317 /* limit to 1 bucket per 2^scale bytes of low memory */ 3318 if (scale > PAGE_SHIFT) 3319 numentries >>= (scale - PAGE_SHIFT); 3320 else 3321 numentries <<= (PAGE_SHIFT - scale); 3322 } 3323 numentries = roundup_pow_of_two(numentries); 3324 3325 /* limit allocation size to 1/16 total memory by default */ 3326 if (max == 0) { 3327 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 3328 do_div(max, bucketsize); 3329 } 3330 3331 if (numentries > max) 3332 numentries = max; 3333 3334 log2qty = ilog2(numentries); 3335 3336 do { 3337 size = bucketsize << log2qty; 3338 if (flags & HASH_EARLY) 3339 table = alloc_bootmem(size); 3340 else if (hashdist) 3341 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 3342 else { 3343 unsigned long order; 3344 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 3345 ; 3346 table = (void*) __get_free_pages(GFP_ATOMIC, order); 3347 } 3348 } while (!table && size > PAGE_SIZE && --log2qty); 3349 3350 if (!table) 3351 panic("Failed to allocate %s hash table\n", tablename); 3352 3353 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 3354 tablename, 3355 (1U << log2qty), 3356 ilog2(size) - PAGE_SHIFT, 3357 size); 3358 3359 if (_hash_shift) 3360 *_hash_shift = log2qty; 3361 if (_hash_mask) 3362 *_hash_mask = (1 << log2qty) - 1; 3363 3364 return table; 3365} 3366 3367#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 3368struct page *pfn_to_page(unsigned long pfn) 3369{ 3370 return __pfn_to_page(pfn); 3371} 3372unsigned long page_to_pfn(struct page *page) 3373{ 3374 return __page_to_pfn(page); 3375} 3376EXPORT_SYMBOL(pfn_to_page); 3377EXPORT_SYMBOL(page_to_pfn); 3378#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 3379 3380#if MAX_NUMNODES > 1 3381/* 3382 * Find the highest possible node id. 3383 */ 3384int highest_possible_node_id(void) 3385{ 3386 unsigned int node; 3387 unsigned int highest = 0; 3388 3389 for_each_node_mask(node, node_possible_map) 3390 highest = node; 3391 return highest; 3392} 3393EXPORT_SYMBOL(highest_possible_node_id); 3394#endif 3395