page_alloc.c revision d23ad42324cc4378132e51f2fc5c9ba6cbe75182
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/bootmem.h> 23#include <linux/compiler.h> 24#include <linux/kernel.h> 25#include <linux/module.h> 26#include <linux/suspend.h> 27#include <linux/pagevec.h> 28#include <linux/blkdev.h> 29#include <linux/slab.h> 30#include <linux/notifier.h> 31#include <linux/topology.h> 32#include <linux/sysctl.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/memory_hotplug.h> 36#include <linux/nodemask.h> 37#include <linux/vmalloc.h> 38#include <linux/mempolicy.h> 39#include <linux/stop_machine.h> 40#include <linux/sort.h> 41#include <linux/pfn.h> 42#include <linux/backing-dev.h> 43#include <linux/fault-inject.h> 44 45#include <asm/tlbflush.h> 46#include <asm/div64.h> 47#include "internal.h" 48 49/* 50 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 51 * initializer cleaner 52 */ 53nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 54EXPORT_SYMBOL(node_online_map); 55nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 56EXPORT_SYMBOL(node_possible_map); 57unsigned long totalram_pages __read_mostly; 58unsigned long totalreserve_pages __read_mostly; 59long nr_swap_pages; 60int percpu_pagelist_fraction; 61 62static void __free_pages_ok(struct page *page, unsigned int order); 63 64/* 65 * results with 256, 32 in the lowmem_reserve sysctl: 66 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 67 * 1G machine -> (16M dma, 784M normal, 224M high) 68 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 69 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 70 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 71 * 72 * TBD: should special case ZONE_DMA32 machines here - in those we normally 73 * don't need any ZONE_NORMAL reservation 74 */ 75int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 76 256, 77#ifdef CONFIG_ZONE_DMA32 78 256, 79#endif 80#ifdef CONFIG_HIGHMEM 81 32 82#endif 83}; 84 85EXPORT_SYMBOL(totalram_pages); 86 87static char * const zone_names[MAX_NR_ZONES] = { 88 "DMA", 89#ifdef CONFIG_ZONE_DMA32 90 "DMA32", 91#endif 92 "Normal", 93#ifdef CONFIG_HIGHMEM 94 "HighMem" 95#endif 96}; 97 98int min_free_kbytes = 1024; 99 100unsigned long __meminitdata nr_kernel_pages; 101unsigned long __meminitdata nr_all_pages; 102static unsigned long __initdata dma_reserve; 103 104#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 105 /* 106 * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct 107 * ranges of memory (RAM) that may be registered with add_active_range(). 108 * Ranges passed to add_active_range() will be merged if possible 109 * so the number of times add_active_range() can be called is 110 * related to the number of nodes and the number of holes 111 */ 112 #ifdef CONFIG_MAX_ACTIVE_REGIONS 113 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 114 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 115 #else 116 #if MAX_NUMNODES >= 32 117 /* If there can be many nodes, allow up to 50 holes per node */ 118 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 119 #else 120 /* By default, allow up to 256 distinct regions */ 121 #define MAX_ACTIVE_REGIONS 256 122 #endif 123 #endif 124 125 struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; 126 int __initdata nr_nodemap_entries; 127 unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 128 unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 129#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 130 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; 131 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; 132#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 133#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 134 135#ifdef CONFIG_DEBUG_VM 136static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 137{ 138 int ret = 0; 139 unsigned seq; 140 unsigned long pfn = page_to_pfn(page); 141 142 do { 143 seq = zone_span_seqbegin(zone); 144 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 145 ret = 1; 146 else if (pfn < zone->zone_start_pfn) 147 ret = 1; 148 } while (zone_span_seqretry(zone, seq)); 149 150 return ret; 151} 152 153static int page_is_consistent(struct zone *zone, struct page *page) 154{ 155#ifdef CONFIG_HOLES_IN_ZONE 156 if (!pfn_valid(page_to_pfn(page))) 157 return 0; 158#endif 159 if (zone != page_zone(page)) 160 return 0; 161 162 return 1; 163} 164/* 165 * Temporary debugging check for pages not lying within a given zone. 166 */ 167static int bad_range(struct zone *zone, struct page *page) 168{ 169 if (page_outside_zone_boundaries(zone, page)) 170 return 1; 171 if (!page_is_consistent(zone, page)) 172 return 1; 173 174 return 0; 175} 176#else 177static inline int bad_range(struct zone *zone, struct page *page) 178{ 179 return 0; 180} 181#endif 182 183static void bad_page(struct page *page) 184{ 185 printk(KERN_EMERG "Bad page state in process '%s'\n" 186 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 187 KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 188 KERN_EMERG "Backtrace:\n", 189 current->comm, page, (int)(2*sizeof(unsigned long)), 190 (unsigned long)page->flags, page->mapping, 191 page_mapcount(page), page_count(page)); 192 dump_stack(); 193 page->flags &= ~(1 << PG_lru | 194 1 << PG_private | 195 1 << PG_locked | 196 1 << PG_active | 197 1 << PG_dirty | 198 1 << PG_reclaim | 199 1 << PG_slab | 200 1 << PG_swapcache | 201 1 << PG_writeback | 202 1 << PG_buddy ); 203 set_page_count(page, 0); 204 reset_page_mapcount(page); 205 page->mapping = NULL; 206 add_taint(TAINT_BAD_PAGE); 207} 208 209/* 210 * Higher-order pages are called "compound pages". They are structured thusly: 211 * 212 * The first PAGE_SIZE page is called the "head page". 213 * 214 * The remaining PAGE_SIZE pages are called "tail pages". 215 * 216 * All pages have PG_compound set. All pages have their ->private pointing at 217 * the head page (even the head page has this). 218 * 219 * The first tail page's ->lru.next holds the address of the compound page's 220 * put_page() function. Its ->lru.prev holds the order of allocation. 221 * This usage means that zero-order pages may not be compound. 222 */ 223 224static void free_compound_page(struct page *page) 225{ 226 __free_pages_ok(page, (unsigned long)page[1].lru.prev); 227} 228 229static void prep_compound_page(struct page *page, unsigned long order) 230{ 231 int i; 232 int nr_pages = 1 << order; 233 234 set_compound_page_dtor(page, free_compound_page); 235 page[1].lru.prev = (void *)order; 236 for (i = 0; i < nr_pages; i++) { 237 struct page *p = page + i; 238 239 __SetPageCompound(p); 240 set_page_private(p, (unsigned long)page); 241 } 242} 243 244static void destroy_compound_page(struct page *page, unsigned long order) 245{ 246 int i; 247 int nr_pages = 1 << order; 248 249 if (unlikely((unsigned long)page[1].lru.prev != order)) 250 bad_page(page); 251 252 for (i = 0; i < nr_pages; i++) { 253 struct page *p = page + i; 254 255 if (unlikely(!PageCompound(p) | 256 (page_private(p) != (unsigned long)page))) 257 bad_page(page); 258 __ClearPageCompound(p); 259 } 260} 261 262static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 263{ 264 int i; 265 266 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 267 /* 268 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 269 * and __GFP_HIGHMEM from hard or soft interrupt context. 270 */ 271 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 272 for (i = 0; i < (1 << order); i++) 273 clear_highpage(page + i); 274} 275 276/* 277 * function for dealing with page's order in buddy system. 278 * zone->lock is already acquired when we use these. 279 * So, we don't need atomic page->flags operations here. 280 */ 281static inline unsigned long page_order(struct page *page) 282{ 283 return page_private(page); 284} 285 286static inline void set_page_order(struct page *page, int order) 287{ 288 set_page_private(page, order); 289 __SetPageBuddy(page); 290} 291 292static inline void rmv_page_order(struct page *page) 293{ 294 __ClearPageBuddy(page); 295 set_page_private(page, 0); 296} 297 298/* 299 * Locate the struct page for both the matching buddy in our 300 * pair (buddy1) and the combined O(n+1) page they form (page). 301 * 302 * 1) Any buddy B1 will have an order O twin B2 which satisfies 303 * the following equation: 304 * B2 = B1 ^ (1 << O) 305 * For example, if the starting buddy (buddy2) is #8 its order 306 * 1 buddy is #10: 307 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 308 * 309 * 2) Any buddy B will have an order O+1 parent P which 310 * satisfies the following equation: 311 * P = B & ~(1 << O) 312 * 313 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 314 */ 315static inline struct page * 316__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 317{ 318 unsigned long buddy_idx = page_idx ^ (1 << order); 319 320 return page + (buddy_idx - page_idx); 321} 322 323static inline unsigned long 324__find_combined_index(unsigned long page_idx, unsigned int order) 325{ 326 return (page_idx & ~(1 << order)); 327} 328 329/* 330 * This function checks whether a page is free && is the buddy 331 * we can do coalesce a page and its buddy if 332 * (a) the buddy is not in a hole && 333 * (b) the buddy is in the buddy system && 334 * (c) a page and its buddy have the same order && 335 * (d) a page and its buddy are in the same zone. 336 * 337 * For recording whether a page is in the buddy system, we use PG_buddy. 338 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 339 * 340 * For recording page's order, we use page_private(page). 341 */ 342static inline int page_is_buddy(struct page *page, struct page *buddy, 343 int order) 344{ 345#ifdef CONFIG_HOLES_IN_ZONE 346 if (!pfn_valid(page_to_pfn(buddy))) 347 return 0; 348#endif 349 350 if (page_zone_id(page) != page_zone_id(buddy)) 351 return 0; 352 353 if (PageBuddy(buddy) && page_order(buddy) == order) { 354 BUG_ON(page_count(buddy) != 0); 355 return 1; 356 } 357 return 0; 358} 359 360/* 361 * Freeing function for a buddy system allocator. 362 * 363 * The concept of a buddy system is to maintain direct-mapped table 364 * (containing bit values) for memory blocks of various "orders". 365 * The bottom level table contains the map for the smallest allocatable 366 * units of memory (here, pages), and each level above it describes 367 * pairs of units from the levels below, hence, "buddies". 368 * At a high level, all that happens here is marking the table entry 369 * at the bottom level available, and propagating the changes upward 370 * as necessary, plus some accounting needed to play nicely with other 371 * parts of the VM system. 372 * At each level, we keep a list of pages, which are heads of continuous 373 * free pages of length of (1 << order) and marked with PG_buddy. Page's 374 * order is recorded in page_private(page) field. 375 * So when we are allocating or freeing one, we can derive the state of the 376 * other. That is, if we allocate a small block, and both were 377 * free, the remainder of the region must be split into blocks. 378 * If a block is freed, and its buddy is also free, then this 379 * triggers coalescing into a block of larger size. 380 * 381 * -- wli 382 */ 383 384static inline void __free_one_page(struct page *page, 385 struct zone *zone, unsigned int order) 386{ 387 unsigned long page_idx; 388 int order_size = 1 << order; 389 390 if (unlikely(PageCompound(page))) 391 destroy_compound_page(page, order); 392 393 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 394 395 VM_BUG_ON(page_idx & (order_size - 1)); 396 VM_BUG_ON(bad_range(zone, page)); 397 398 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); 399 while (order < MAX_ORDER-1) { 400 unsigned long combined_idx; 401 struct free_area *area; 402 struct page *buddy; 403 404 buddy = __page_find_buddy(page, page_idx, order); 405 if (!page_is_buddy(page, buddy, order)) 406 break; /* Move the buddy up one level. */ 407 408 list_del(&buddy->lru); 409 area = zone->free_area + order; 410 area->nr_free--; 411 rmv_page_order(buddy); 412 combined_idx = __find_combined_index(page_idx, order); 413 page = page + (combined_idx - page_idx); 414 page_idx = combined_idx; 415 order++; 416 } 417 set_page_order(page, order); 418 list_add(&page->lru, &zone->free_area[order].free_list); 419 zone->free_area[order].nr_free++; 420} 421 422static inline int free_pages_check(struct page *page) 423{ 424 if (unlikely(page_mapcount(page) | 425 (page->mapping != NULL) | 426 (page_count(page) != 0) | 427 (page->flags & ( 428 1 << PG_lru | 429 1 << PG_private | 430 1 << PG_locked | 431 1 << PG_active | 432 1 << PG_reclaim | 433 1 << PG_slab | 434 1 << PG_swapcache | 435 1 << PG_writeback | 436 1 << PG_reserved | 437 1 << PG_buddy )))) 438 bad_page(page); 439 if (PageDirty(page)) 440 __ClearPageDirty(page); 441 /* 442 * For now, we report if PG_reserved was found set, but do not 443 * clear it, and do not free the page. But we shall soon need 444 * to do more, for when the ZERO_PAGE count wraps negative. 445 */ 446 return PageReserved(page); 447} 448 449/* 450 * Frees a list of pages. 451 * Assumes all pages on list are in same zone, and of same order. 452 * count is the number of pages to free. 453 * 454 * If the zone was previously in an "all pages pinned" state then look to 455 * see if this freeing clears that state. 456 * 457 * And clear the zone's pages_scanned counter, to hold off the "all pages are 458 * pinned" detection logic. 459 */ 460static void free_pages_bulk(struct zone *zone, int count, 461 struct list_head *list, int order) 462{ 463 spin_lock(&zone->lock); 464 zone->all_unreclaimable = 0; 465 zone->pages_scanned = 0; 466 while (count--) { 467 struct page *page; 468 469 VM_BUG_ON(list_empty(list)); 470 page = list_entry(list->prev, struct page, lru); 471 /* have to delete it as __free_one_page list manipulates */ 472 list_del(&page->lru); 473 __free_one_page(page, zone, order); 474 } 475 spin_unlock(&zone->lock); 476} 477 478static void free_one_page(struct zone *zone, struct page *page, int order) 479{ 480 spin_lock(&zone->lock); 481 zone->all_unreclaimable = 0; 482 zone->pages_scanned = 0; 483 __free_one_page(page, zone, order); 484 spin_unlock(&zone->lock); 485} 486 487static void __free_pages_ok(struct page *page, unsigned int order) 488{ 489 unsigned long flags; 490 int i; 491 int reserved = 0; 492 493 for (i = 0 ; i < (1 << order) ; ++i) 494 reserved += free_pages_check(page + i); 495 if (reserved) 496 return; 497 498 if (!PageHighMem(page)) 499 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 500 arch_free_page(page, order); 501 kernel_map_pages(page, 1 << order, 0); 502 503 local_irq_save(flags); 504 __count_vm_events(PGFREE, 1 << order); 505 free_one_page(page_zone(page), page, order); 506 local_irq_restore(flags); 507} 508 509/* 510 * permit the bootmem allocator to evade page validation on high-order frees 511 */ 512void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) 513{ 514 if (order == 0) { 515 __ClearPageReserved(page); 516 set_page_count(page, 0); 517 set_page_refcounted(page); 518 __free_page(page); 519 } else { 520 int loop; 521 522 prefetchw(page); 523 for (loop = 0; loop < BITS_PER_LONG; loop++) { 524 struct page *p = &page[loop]; 525 526 if (loop + 1 < BITS_PER_LONG) 527 prefetchw(p + 1); 528 __ClearPageReserved(p); 529 set_page_count(p, 0); 530 } 531 532 set_page_refcounted(page); 533 __free_pages(page, order); 534 } 535} 536 537 538/* 539 * The order of subdivision here is critical for the IO subsystem. 540 * Please do not alter this order without good reasons and regression 541 * testing. Specifically, as large blocks of memory are subdivided, 542 * the order in which smaller blocks are delivered depends on the order 543 * they're subdivided in this function. This is the primary factor 544 * influencing the order in which pages are delivered to the IO 545 * subsystem according to empirical testing, and this is also justified 546 * by considering the behavior of a buddy system containing a single 547 * large block of memory acted on by a series of small allocations. 548 * This behavior is a critical factor in sglist merging's success. 549 * 550 * -- wli 551 */ 552static inline void expand(struct zone *zone, struct page *page, 553 int low, int high, struct free_area *area) 554{ 555 unsigned long size = 1 << high; 556 557 while (high > low) { 558 area--; 559 high--; 560 size >>= 1; 561 VM_BUG_ON(bad_range(zone, &page[size])); 562 list_add(&page[size].lru, &area->free_list); 563 area->nr_free++; 564 set_page_order(&page[size], high); 565 } 566} 567 568/* 569 * This page is about to be returned from the page allocator 570 */ 571static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 572{ 573 if (unlikely(page_mapcount(page) | 574 (page->mapping != NULL) | 575 (page_count(page) != 0) | 576 (page->flags & ( 577 1 << PG_lru | 578 1 << PG_private | 579 1 << PG_locked | 580 1 << PG_active | 581 1 << PG_dirty | 582 1 << PG_reclaim | 583 1 << PG_slab | 584 1 << PG_swapcache | 585 1 << PG_writeback | 586 1 << PG_reserved | 587 1 << PG_buddy )))) 588 bad_page(page); 589 590 /* 591 * For now, we report if PG_reserved was found set, but do not 592 * clear it, and do not allocate the page: as a safety net. 593 */ 594 if (PageReserved(page)) 595 return 1; 596 597 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 598 1 << PG_referenced | 1 << PG_arch_1 | 599 1 << PG_checked | 1 << PG_mappedtodisk); 600 set_page_private(page, 0); 601 set_page_refcounted(page); 602 603 arch_alloc_page(page, order); 604 kernel_map_pages(page, 1 << order, 1); 605 606 if (gfp_flags & __GFP_ZERO) 607 prep_zero_page(page, order, gfp_flags); 608 609 if (order && (gfp_flags & __GFP_COMP)) 610 prep_compound_page(page, order); 611 612 return 0; 613} 614 615/* 616 * Do the hard work of removing an element from the buddy allocator. 617 * Call me with the zone->lock already held. 618 */ 619static struct page *__rmqueue(struct zone *zone, unsigned int order) 620{ 621 struct free_area * area; 622 unsigned int current_order; 623 struct page *page; 624 625 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 626 area = zone->free_area + current_order; 627 if (list_empty(&area->free_list)) 628 continue; 629 630 page = list_entry(area->free_list.next, struct page, lru); 631 list_del(&page->lru); 632 rmv_page_order(page); 633 area->nr_free--; 634 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); 635 expand(zone, page, order, current_order, area); 636 return page; 637 } 638 639 return NULL; 640} 641 642/* 643 * Obtain a specified number of elements from the buddy allocator, all under 644 * a single hold of the lock, for efficiency. Add them to the supplied list. 645 * Returns the number of new pages which were placed at *list. 646 */ 647static int rmqueue_bulk(struct zone *zone, unsigned int order, 648 unsigned long count, struct list_head *list) 649{ 650 int i; 651 652 spin_lock(&zone->lock); 653 for (i = 0; i < count; ++i) { 654 struct page *page = __rmqueue(zone, order); 655 if (unlikely(page == NULL)) 656 break; 657 list_add_tail(&page->lru, list); 658 } 659 spin_unlock(&zone->lock); 660 return i; 661} 662 663#ifdef CONFIG_NUMA 664/* 665 * Called from the slab reaper to drain pagesets on a particular node that 666 * belongs to the currently executing processor. 667 * Note that this function must be called with the thread pinned to 668 * a single processor. 669 */ 670void drain_node_pages(int nodeid) 671{ 672 int i; 673 enum zone_type z; 674 unsigned long flags; 675 676 for (z = 0; z < MAX_NR_ZONES; z++) { 677 struct zone *zone = NODE_DATA(nodeid)->node_zones + z; 678 struct per_cpu_pageset *pset; 679 680 if (!populated_zone(zone)) 681 continue; 682 683 pset = zone_pcp(zone, smp_processor_id()); 684 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 685 struct per_cpu_pages *pcp; 686 687 pcp = &pset->pcp[i]; 688 if (pcp->count) { 689 int to_drain; 690 691 local_irq_save(flags); 692 if (pcp->count >= pcp->batch) 693 to_drain = pcp->batch; 694 else 695 to_drain = pcp->count; 696 free_pages_bulk(zone, to_drain, &pcp->list, 0); 697 pcp->count -= to_drain; 698 local_irq_restore(flags); 699 } 700 } 701 } 702} 703#endif 704 705static void __drain_pages(unsigned int cpu) 706{ 707 unsigned long flags; 708 struct zone *zone; 709 int i; 710 711 for_each_zone(zone) { 712 struct per_cpu_pageset *pset; 713 714 if (!populated_zone(zone)) 715 continue; 716 717 pset = zone_pcp(zone, cpu); 718 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 719 struct per_cpu_pages *pcp; 720 721 pcp = &pset->pcp[i]; 722 local_irq_save(flags); 723 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 724 pcp->count = 0; 725 local_irq_restore(flags); 726 } 727 } 728} 729 730#ifdef CONFIG_PM 731 732void mark_free_pages(struct zone *zone) 733{ 734 unsigned long pfn, max_zone_pfn; 735 unsigned long flags; 736 int order; 737 struct list_head *curr; 738 739 if (!zone->spanned_pages) 740 return; 741 742 spin_lock_irqsave(&zone->lock, flags); 743 744 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 745 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 746 if (pfn_valid(pfn)) { 747 struct page *page = pfn_to_page(pfn); 748 749 if (!PageNosave(page)) 750 ClearPageNosaveFree(page); 751 } 752 753 for (order = MAX_ORDER - 1; order >= 0; --order) 754 list_for_each(curr, &zone->free_area[order].free_list) { 755 unsigned long i; 756 757 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 758 for (i = 0; i < (1UL << order); i++) 759 SetPageNosaveFree(pfn_to_page(pfn + i)); 760 } 761 762 spin_unlock_irqrestore(&zone->lock, flags); 763} 764 765/* 766 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 767 */ 768void drain_local_pages(void) 769{ 770 unsigned long flags; 771 772 local_irq_save(flags); 773 __drain_pages(smp_processor_id()); 774 local_irq_restore(flags); 775} 776#endif /* CONFIG_PM */ 777 778/* 779 * Free a 0-order page 780 */ 781static void fastcall free_hot_cold_page(struct page *page, int cold) 782{ 783 struct zone *zone = page_zone(page); 784 struct per_cpu_pages *pcp; 785 unsigned long flags; 786 787 if (PageAnon(page)) 788 page->mapping = NULL; 789 if (free_pages_check(page)) 790 return; 791 792 if (!PageHighMem(page)) 793 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 794 arch_free_page(page, 0); 795 kernel_map_pages(page, 1, 0); 796 797 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 798 local_irq_save(flags); 799 __count_vm_event(PGFREE); 800 list_add(&page->lru, &pcp->list); 801 pcp->count++; 802 if (pcp->count >= pcp->high) { 803 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 804 pcp->count -= pcp->batch; 805 } 806 local_irq_restore(flags); 807 put_cpu(); 808} 809 810void fastcall free_hot_page(struct page *page) 811{ 812 free_hot_cold_page(page, 0); 813} 814 815void fastcall free_cold_page(struct page *page) 816{ 817 free_hot_cold_page(page, 1); 818} 819 820/* 821 * split_page takes a non-compound higher-order page, and splits it into 822 * n (1<<order) sub-pages: page[0..n] 823 * Each sub-page must be freed individually. 824 * 825 * Note: this is probably too low level an operation for use in drivers. 826 * Please consult with lkml before using this in your driver. 827 */ 828void split_page(struct page *page, unsigned int order) 829{ 830 int i; 831 832 VM_BUG_ON(PageCompound(page)); 833 VM_BUG_ON(!page_count(page)); 834 for (i = 1; i < (1 << order); i++) 835 set_page_refcounted(page + i); 836} 837 838/* 839 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 840 * we cheat by calling it from here, in the order > 0 path. Saves a branch 841 * or two. 842 */ 843static struct page *buffered_rmqueue(struct zonelist *zonelist, 844 struct zone *zone, int order, gfp_t gfp_flags) 845{ 846 unsigned long flags; 847 struct page *page; 848 int cold = !!(gfp_flags & __GFP_COLD); 849 int cpu; 850 851again: 852 cpu = get_cpu(); 853 if (likely(order == 0)) { 854 struct per_cpu_pages *pcp; 855 856 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 857 local_irq_save(flags); 858 if (!pcp->count) { 859 pcp->count = rmqueue_bulk(zone, 0, 860 pcp->batch, &pcp->list); 861 if (unlikely(!pcp->count)) 862 goto failed; 863 } 864 page = list_entry(pcp->list.next, struct page, lru); 865 list_del(&page->lru); 866 pcp->count--; 867 } else { 868 spin_lock_irqsave(&zone->lock, flags); 869 page = __rmqueue(zone, order); 870 spin_unlock(&zone->lock); 871 if (!page) 872 goto failed; 873 } 874 875 __count_zone_vm_events(PGALLOC, zone, 1 << order); 876 zone_statistics(zonelist, zone); 877 local_irq_restore(flags); 878 put_cpu(); 879 880 VM_BUG_ON(bad_range(zone, page)); 881 if (prep_new_page(page, order, gfp_flags)) 882 goto again; 883 return page; 884 885failed: 886 local_irq_restore(flags); 887 put_cpu(); 888 return NULL; 889} 890 891#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 892#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 893#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 894#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 895#define ALLOC_HARDER 0x10 /* try to alloc harder */ 896#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 897#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 898 899#ifdef CONFIG_FAIL_PAGE_ALLOC 900 901static struct fail_page_alloc_attr { 902 struct fault_attr attr; 903 904 u32 ignore_gfp_highmem; 905 u32 ignore_gfp_wait; 906 907#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 908 909 struct dentry *ignore_gfp_highmem_file; 910 struct dentry *ignore_gfp_wait_file; 911 912#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 913 914} fail_page_alloc = { 915 .attr = FAULT_ATTR_INITIALIZER, 916 .ignore_gfp_wait = 1, 917 .ignore_gfp_highmem = 1, 918}; 919 920static int __init setup_fail_page_alloc(char *str) 921{ 922 return setup_fault_attr(&fail_page_alloc.attr, str); 923} 924__setup("fail_page_alloc=", setup_fail_page_alloc); 925 926static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 927{ 928 if (gfp_mask & __GFP_NOFAIL) 929 return 0; 930 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 931 return 0; 932 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 933 return 0; 934 935 return should_fail(&fail_page_alloc.attr, 1 << order); 936} 937 938#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 939 940static int __init fail_page_alloc_debugfs(void) 941{ 942 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 943 struct dentry *dir; 944 int err; 945 946 err = init_fault_attr_dentries(&fail_page_alloc.attr, 947 "fail_page_alloc"); 948 if (err) 949 return err; 950 dir = fail_page_alloc.attr.dentries.dir; 951 952 fail_page_alloc.ignore_gfp_wait_file = 953 debugfs_create_bool("ignore-gfp-wait", mode, dir, 954 &fail_page_alloc.ignore_gfp_wait); 955 956 fail_page_alloc.ignore_gfp_highmem_file = 957 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 958 &fail_page_alloc.ignore_gfp_highmem); 959 960 if (!fail_page_alloc.ignore_gfp_wait_file || 961 !fail_page_alloc.ignore_gfp_highmem_file) { 962 err = -ENOMEM; 963 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 964 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 965 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 966 } 967 968 return err; 969} 970 971late_initcall(fail_page_alloc_debugfs); 972 973#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 974 975#else /* CONFIG_FAIL_PAGE_ALLOC */ 976 977static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 978{ 979 return 0; 980} 981 982#endif /* CONFIG_FAIL_PAGE_ALLOC */ 983 984/* 985 * Return 1 if free pages are above 'mark'. This takes into account the order 986 * of the allocation. 987 */ 988int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 989 int classzone_idx, int alloc_flags) 990{ 991 /* free_pages my go negative - that's OK */ 992 long min = mark; 993 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 994 int o; 995 996 if (alloc_flags & ALLOC_HIGH) 997 min -= min / 2; 998 if (alloc_flags & ALLOC_HARDER) 999 min -= min / 4; 1000 1001 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 1002 return 0; 1003 for (o = 0; o < order; o++) { 1004 /* At the next order, this order's pages become unavailable */ 1005 free_pages -= z->free_area[o].nr_free << o; 1006 1007 /* Require fewer higher order pages to be free */ 1008 min >>= 1; 1009 1010 if (free_pages <= min) 1011 return 0; 1012 } 1013 return 1; 1014} 1015 1016#ifdef CONFIG_NUMA 1017/* 1018 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1019 * skip over zones that are not allowed by the cpuset, or that have 1020 * been recently (in last second) found to be nearly full. See further 1021 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1022 * that have to skip over alot of full or unallowed zones. 1023 * 1024 * If the zonelist cache is present in the passed in zonelist, then 1025 * returns a pointer to the allowed node mask (either the current 1026 * tasks mems_allowed, or node_online_map.) 1027 * 1028 * If the zonelist cache is not available for this zonelist, does 1029 * nothing and returns NULL. 1030 * 1031 * If the fullzones BITMAP in the zonelist cache is stale (more than 1032 * a second since last zap'd) then we zap it out (clear its bits.) 1033 * 1034 * We hold off even calling zlc_setup, until after we've checked the 1035 * first zone in the zonelist, on the theory that most allocations will 1036 * be satisfied from that first zone, so best to examine that zone as 1037 * quickly as we can. 1038 */ 1039static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1040{ 1041 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1042 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1043 1044 zlc = zonelist->zlcache_ptr; 1045 if (!zlc) 1046 return NULL; 1047 1048 if (jiffies - zlc->last_full_zap > 1 * HZ) { 1049 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1050 zlc->last_full_zap = jiffies; 1051 } 1052 1053 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1054 &cpuset_current_mems_allowed : 1055 &node_online_map; 1056 return allowednodes; 1057} 1058 1059/* 1060 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1061 * if it is worth looking at further for free memory: 1062 * 1) Check that the zone isn't thought to be full (doesn't have its 1063 * bit set in the zonelist_cache fullzones BITMAP). 1064 * 2) Check that the zones node (obtained from the zonelist_cache 1065 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1066 * Return true (non-zero) if zone is worth looking at further, or 1067 * else return false (zero) if it is not. 1068 * 1069 * This check -ignores- the distinction between various watermarks, 1070 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1071 * found to be full for any variation of these watermarks, it will 1072 * be considered full for up to one second by all requests, unless 1073 * we are so low on memory on all allowed nodes that we are forced 1074 * into the second scan of the zonelist. 1075 * 1076 * In the second scan we ignore this zonelist cache and exactly 1077 * apply the watermarks to all zones, even it is slower to do so. 1078 * We are low on memory in the second scan, and should leave no stone 1079 * unturned looking for a free page. 1080 */ 1081static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, 1082 nodemask_t *allowednodes) 1083{ 1084 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1085 int i; /* index of *z in zonelist zones */ 1086 int n; /* node that zone *z is on */ 1087 1088 zlc = zonelist->zlcache_ptr; 1089 if (!zlc) 1090 return 1; 1091 1092 i = z - zonelist->zones; 1093 n = zlc->z_to_n[i]; 1094 1095 /* This zone is worth trying if it is allowed but not full */ 1096 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1097} 1098 1099/* 1100 * Given 'z' scanning a zonelist, set the corresponding bit in 1101 * zlc->fullzones, so that subsequent attempts to allocate a page 1102 * from that zone don't waste time re-examining it. 1103 */ 1104static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) 1105{ 1106 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1107 int i; /* index of *z in zonelist zones */ 1108 1109 zlc = zonelist->zlcache_ptr; 1110 if (!zlc) 1111 return; 1112 1113 i = z - zonelist->zones; 1114 1115 set_bit(i, zlc->fullzones); 1116} 1117 1118#else /* CONFIG_NUMA */ 1119 1120static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1121{ 1122 return NULL; 1123} 1124 1125static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, 1126 nodemask_t *allowednodes) 1127{ 1128 return 1; 1129} 1130 1131static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) 1132{ 1133} 1134#endif /* CONFIG_NUMA */ 1135 1136/* 1137 * get_page_from_freelist goes through the zonelist trying to allocate 1138 * a page. 1139 */ 1140static struct page * 1141get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 1142 struct zonelist *zonelist, int alloc_flags) 1143{ 1144 struct zone **z; 1145 struct page *page = NULL; 1146 int classzone_idx = zone_idx(zonelist->zones[0]); 1147 struct zone *zone; 1148 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1149 int zlc_active = 0; /* set if using zonelist_cache */ 1150 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1151 1152zonelist_scan: 1153 /* 1154 * Scan zonelist, looking for a zone with enough free. 1155 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1156 */ 1157 z = zonelist->zones; 1158 1159 do { 1160 if (NUMA_BUILD && zlc_active && 1161 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1162 continue; 1163 zone = *z; 1164 if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && 1165 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) 1166 break; 1167 if ((alloc_flags & ALLOC_CPUSET) && 1168 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1169 goto try_next_zone; 1170 1171 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1172 unsigned long mark; 1173 if (alloc_flags & ALLOC_WMARK_MIN) 1174 mark = zone->pages_min; 1175 else if (alloc_flags & ALLOC_WMARK_LOW) 1176 mark = zone->pages_low; 1177 else 1178 mark = zone->pages_high; 1179 if (!zone_watermark_ok(zone, order, mark, 1180 classzone_idx, alloc_flags)) { 1181 if (!zone_reclaim_mode || 1182 !zone_reclaim(zone, gfp_mask, order)) 1183 goto this_zone_full; 1184 } 1185 } 1186 1187 page = buffered_rmqueue(zonelist, zone, order, gfp_mask); 1188 if (page) 1189 break; 1190this_zone_full: 1191 if (NUMA_BUILD) 1192 zlc_mark_zone_full(zonelist, z); 1193try_next_zone: 1194 if (NUMA_BUILD && !did_zlc_setup) { 1195 /* we do zlc_setup after the first zone is tried */ 1196 allowednodes = zlc_setup(zonelist, alloc_flags); 1197 zlc_active = 1; 1198 did_zlc_setup = 1; 1199 } 1200 } while (*(++z) != NULL); 1201 1202 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1203 /* Disable zlc cache for second zonelist scan */ 1204 zlc_active = 0; 1205 goto zonelist_scan; 1206 } 1207 return page; 1208} 1209 1210/* 1211 * This is the 'heart' of the zoned buddy allocator. 1212 */ 1213struct page * fastcall 1214__alloc_pages(gfp_t gfp_mask, unsigned int order, 1215 struct zonelist *zonelist) 1216{ 1217 const gfp_t wait = gfp_mask & __GFP_WAIT; 1218 struct zone **z; 1219 struct page *page; 1220 struct reclaim_state reclaim_state; 1221 struct task_struct *p = current; 1222 int do_retry; 1223 int alloc_flags; 1224 int did_some_progress; 1225 1226 might_sleep_if(wait); 1227 1228 if (should_fail_alloc_page(gfp_mask, order)) 1229 return NULL; 1230 1231restart: 1232 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 1233 1234 if (unlikely(*z == NULL)) { 1235 /* Should this ever happen?? */ 1236 return NULL; 1237 } 1238 1239 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1240 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1241 if (page) 1242 goto got_pg; 1243 1244 /* 1245 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1246 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1247 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1248 * using a larger set of nodes after it has established that the 1249 * allowed per node queues are empty and that nodes are 1250 * over allocated. 1251 */ 1252 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1253 goto nopage; 1254 1255 for (z = zonelist->zones; *z; z++) 1256 wakeup_kswapd(*z, order); 1257 1258 /* 1259 * OK, we're below the kswapd watermark and have kicked background 1260 * reclaim. Now things get more complex, so set up alloc_flags according 1261 * to how we want to proceed. 1262 * 1263 * The caller may dip into page reserves a bit more if the caller 1264 * cannot run direct reclaim, or if the caller has realtime scheduling 1265 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1266 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1267 */ 1268 alloc_flags = ALLOC_WMARK_MIN; 1269 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 1270 alloc_flags |= ALLOC_HARDER; 1271 if (gfp_mask & __GFP_HIGH) 1272 alloc_flags |= ALLOC_HIGH; 1273 if (wait) 1274 alloc_flags |= ALLOC_CPUSET; 1275 1276 /* 1277 * Go through the zonelist again. Let __GFP_HIGH and allocations 1278 * coming from realtime tasks go deeper into reserves. 1279 * 1280 * This is the last chance, in general, before the goto nopage. 1281 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1282 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1283 */ 1284 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 1285 if (page) 1286 goto got_pg; 1287 1288 /* This allocation should allow future memory freeing. */ 1289 1290rebalance: 1291 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1292 && !in_interrupt()) { 1293 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1294nofail_alloc: 1295 /* go through the zonelist yet again, ignoring mins */ 1296 page = get_page_from_freelist(gfp_mask, order, 1297 zonelist, ALLOC_NO_WATERMARKS); 1298 if (page) 1299 goto got_pg; 1300 if (gfp_mask & __GFP_NOFAIL) { 1301 congestion_wait(WRITE, HZ/50); 1302 goto nofail_alloc; 1303 } 1304 } 1305 goto nopage; 1306 } 1307 1308 /* Atomic allocations - we can't balance anything */ 1309 if (!wait) 1310 goto nopage; 1311 1312 cond_resched(); 1313 1314 /* We now go into synchronous reclaim */ 1315 cpuset_memory_pressure_bump(); 1316 p->flags |= PF_MEMALLOC; 1317 reclaim_state.reclaimed_slab = 0; 1318 p->reclaim_state = &reclaim_state; 1319 1320 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 1321 1322 p->reclaim_state = NULL; 1323 p->flags &= ~PF_MEMALLOC; 1324 1325 cond_resched(); 1326 1327 if (likely(did_some_progress)) { 1328 page = get_page_from_freelist(gfp_mask, order, 1329 zonelist, alloc_flags); 1330 if (page) 1331 goto got_pg; 1332 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1333 /* 1334 * Go through the zonelist yet one more time, keep 1335 * very high watermark here, this is only to catch 1336 * a parallel oom killing, we must fail if we're still 1337 * under heavy pressure. 1338 */ 1339 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1340 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1341 if (page) 1342 goto got_pg; 1343 1344 out_of_memory(zonelist, gfp_mask, order); 1345 goto restart; 1346 } 1347 1348 /* 1349 * Don't let big-order allocations loop unless the caller explicitly 1350 * requests that. Wait for some write requests to complete then retry. 1351 * 1352 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 1353 * <= 3, but that may not be true in other implementations. 1354 */ 1355 do_retry = 0; 1356 if (!(gfp_mask & __GFP_NORETRY)) { 1357 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1358 do_retry = 1; 1359 if (gfp_mask & __GFP_NOFAIL) 1360 do_retry = 1; 1361 } 1362 if (do_retry) { 1363 congestion_wait(WRITE, HZ/50); 1364 goto rebalance; 1365 } 1366 1367nopage: 1368 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1369 printk(KERN_WARNING "%s: page allocation failure." 1370 " order:%d, mode:0x%x\n", 1371 p->comm, order, gfp_mask); 1372 dump_stack(); 1373 show_mem(); 1374 } 1375got_pg: 1376 return page; 1377} 1378 1379EXPORT_SYMBOL(__alloc_pages); 1380 1381/* 1382 * Common helper functions. 1383 */ 1384fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1385{ 1386 struct page * page; 1387 page = alloc_pages(gfp_mask, order); 1388 if (!page) 1389 return 0; 1390 return (unsigned long) page_address(page); 1391} 1392 1393EXPORT_SYMBOL(__get_free_pages); 1394 1395fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1396{ 1397 struct page * page; 1398 1399 /* 1400 * get_zeroed_page() returns a 32-bit address, which cannot represent 1401 * a highmem page 1402 */ 1403 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1404 1405 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1406 if (page) 1407 return (unsigned long) page_address(page); 1408 return 0; 1409} 1410 1411EXPORT_SYMBOL(get_zeroed_page); 1412 1413void __pagevec_free(struct pagevec *pvec) 1414{ 1415 int i = pagevec_count(pvec); 1416 1417 while (--i >= 0) 1418 free_hot_cold_page(pvec->pages[i], pvec->cold); 1419} 1420 1421fastcall void __free_pages(struct page *page, unsigned int order) 1422{ 1423 if (put_page_testzero(page)) { 1424 if (order == 0) 1425 free_hot_page(page); 1426 else 1427 __free_pages_ok(page, order); 1428 } 1429} 1430 1431EXPORT_SYMBOL(__free_pages); 1432 1433fastcall void free_pages(unsigned long addr, unsigned int order) 1434{ 1435 if (addr != 0) { 1436 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1437 __free_pages(virt_to_page((void *)addr), order); 1438 } 1439} 1440 1441EXPORT_SYMBOL(free_pages); 1442 1443/* 1444 * Total amount of free (allocatable) RAM: 1445 */ 1446unsigned int nr_free_pages(void) 1447{ 1448 return global_page_state(NR_FREE_PAGES); 1449} 1450 1451EXPORT_SYMBOL(nr_free_pages); 1452 1453#ifdef CONFIG_NUMA 1454unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1455{ 1456 return node_page_state(pgdat->node_id, NR_FREE_PAGES); 1457} 1458#endif 1459 1460static unsigned int nr_free_zone_pages(int offset) 1461{ 1462 /* Just pick one node, since fallback list is circular */ 1463 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1464 unsigned int sum = 0; 1465 1466 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1467 struct zone **zonep = zonelist->zones; 1468 struct zone *zone; 1469 1470 for (zone = *zonep++; zone; zone = *zonep++) { 1471 unsigned long size = zone->present_pages; 1472 unsigned long high = zone->pages_high; 1473 if (size > high) 1474 sum += size - high; 1475 } 1476 1477 return sum; 1478} 1479 1480/* 1481 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1482 */ 1483unsigned int nr_free_buffer_pages(void) 1484{ 1485 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1486} 1487 1488/* 1489 * Amount of free RAM allocatable within all zones 1490 */ 1491unsigned int nr_free_pagecache_pages(void) 1492{ 1493 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1494} 1495 1496static inline void show_node(struct zone *zone) 1497{ 1498 if (NUMA_BUILD) 1499 printk("Node %d ", zone_to_nid(zone)); 1500} 1501 1502void si_meminfo(struct sysinfo *val) 1503{ 1504 val->totalram = totalram_pages; 1505 val->sharedram = 0; 1506 val->freeram = global_page_state(NR_FREE_PAGES); 1507 val->bufferram = nr_blockdev_pages(); 1508 val->totalhigh = totalhigh_pages; 1509 val->freehigh = nr_free_highpages(); 1510 val->mem_unit = PAGE_SIZE; 1511} 1512 1513EXPORT_SYMBOL(si_meminfo); 1514 1515#ifdef CONFIG_NUMA 1516void si_meminfo_node(struct sysinfo *val, int nid) 1517{ 1518 pg_data_t *pgdat = NODE_DATA(nid); 1519 1520 val->totalram = pgdat->node_present_pages; 1521 val->freeram = node_page_state(nid, NR_FREE_PAGES); 1522#ifdef CONFIG_HIGHMEM 1523 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1524 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 1525 NR_FREE_PAGES); 1526#else 1527 val->totalhigh = 0; 1528 val->freehigh = 0; 1529#endif 1530 val->mem_unit = PAGE_SIZE; 1531} 1532#endif 1533 1534#define K(x) ((x) << (PAGE_SHIFT-10)) 1535 1536/* 1537 * Show free area list (used inside shift_scroll-lock stuff) 1538 * We also calculate the percentage fragmentation. We do this by counting the 1539 * memory on each free list with the exception of the first item on the list. 1540 */ 1541void show_free_areas(void) 1542{ 1543 int cpu; 1544 unsigned long active; 1545 unsigned long inactive; 1546 unsigned long free; 1547 struct zone *zone; 1548 1549 for_each_zone(zone) { 1550 if (!populated_zone(zone)) 1551 continue; 1552 1553 show_node(zone); 1554 printk("%s per-cpu:\n", zone->name); 1555 1556 for_each_online_cpu(cpu) { 1557 struct per_cpu_pageset *pageset; 1558 1559 pageset = zone_pcp(zone, cpu); 1560 1561 printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d " 1562 "Cold: hi:%5d, btch:%4d usd:%4d\n", 1563 cpu, pageset->pcp[0].high, 1564 pageset->pcp[0].batch, pageset->pcp[0].count, 1565 pageset->pcp[1].high, pageset->pcp[1].batch, 1566 pageset->pcp[1].count); 1567 } 1568 } 1569 1570 get_zone_counts(&active, &inactive, &free); 1571 1572 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" 1573 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 1574 active, 1575 inactive, 1576 global_page_state(NR_FILE_DIRTY), 1577 global_page_state(NR_WRITEBACK), 1578 global_page_state(NR_UNSTABLE_NFS), 1579 global_page_state(NR_FREE_PAGES), 1580 global_page_state(NR_SLAB_RECLAIMABLE) + 1581 global_page_state(NR_SLAB_UNRECLAIMABLE), 1582 global_page_state(NR_FILE_MAPPED), 1583 global_page_state(NR_PAGETABLE), 1584 global_page_state(NR_BOUNCE)); 1585 1586 for_each_zone(zone) { 1587 int i; 1588 1589 if (!populated_zone(zone)) 1590 continue; 1591 1592 show_node(zone); 1593 printk("%s" 1594 " free:%lukB" 1595 " min:%lukB" 1596 " low:%lukB" 1597 " high:%lukB" 1598 " active:%lukB" 1599 " inactive:%lukB" 1600 " present:%lukB" 1601 " pages_scanned:%lu" 1602 " all_unreclaimable? %s" 1603 "\n", 1604 zone->name, 1605 K(zone_page_state(zone, NR_FREE_PAGES)), 1606 K(zone->pages_min), 1607 K(zone->pages_low), 1608 K(zone->pages_high), 1609 K(zone_page_state(zone, NR_ACTIVE)), 1610 K(zone_page_state(zone, NR_INACTIVE)), 1611 K(zone->present_pages), 1612 zone->pages_scanned, 1613 (zone->all_unreclaimable ? "yes" : "no") 1614 ); 1615 printk("lowmem_reserve[]:"); 1616 for (i = 0; i < MAX_NR_ZONES; i++) 1617 printk(" %lu", zone->lowmem_reserve[i]); 1618 printk("\n"); 1619 } 1620 1621 for_each_zone(zone) { 1622 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1623 1624 if (!populated_zone(zone)) 1625 continue; 1626 1627 show_node(zone); 1628 printk("%s: ", zone->name); 1629 1630 spin_lock_irqsave(&zone->lock, flags); 1631 for (order = 0; order < MAX_ORDER; order++) { 1632 nr[order] = zone->free_area[order].nr_free; 1633 total += nr[order] << order; 1634 } 1635 spin_unlock_irqrestore(&zone->lock, flags); 1636 for (order = 0; order < MAX_ORDER; order++) 1637 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1638 printk("= %lukB\n", K(total)); 1639 } 1640 1641 show_swap_cache_info(); 1642} 1643 1644/* 1645 * Builds allocation fallback zone lists. 1646 * 1647 * Add all populated zones of a node to the zonelist. 1648 */ 1649static int __meminit build_zonelists_node(pg_data_t *pgdat, 1650 struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) 1651{ 1652 struct zone *zone; 1653 1654 BUG_ON(zone_type >= MAX_NR_ZONES); 1655 zone_type++; 1656 1657 do { 1658 zone_type--; 1659 zone = pgdat->node_zones + zone_type; 1660 if (populated_zone(zone)) { 1661 zonelist->zones[nr_zones++] = zone; 1662 check_highest_zone(zone_type); 1663 } 1664 1665 } while (zone_type); 1666 return nr_zones; 1667} 1668 1669#ifdef CONFIG_NUMA 1670#define MAX_NODE_LOAD (num_online_nodes()) 1671static int __meminitdata node_load[MAX_NUMNODES]; 1672/** 1673 * find_next_best_node - find the next node that should appear in a given node's fallback list 1674 * @node: node whose fallback list we're appending 1675 * @used_node_mask: nodemask_t of already used nodes 1676 * 1677 * We use a number of factors to determine which is the next node that should 1678 * appear on a given node's fallback list. The node should not have appeared 1679 * already in @node's fallback list, and it should be the next closest node 1680 * according to the distance array (which contains arbitrary distance values 1681 * from each node to each node in the system), and should also prefer nodes 1682 * with no CPUs, since presumably they'll have very little allocation pressure 1683 * on them otherwise. 1684 * It returns -1 if no node is found. 1685 */ 1686static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) 1687{ 1688 int n, val; 1689 int min_val = INT_MAX; 1690 int best_node = -1; 1691 1692 /* Use the local node if we haven't already */ 1693 if (!node_isset(node, *used_node_mask)) { 1694 node_set(node, *used_node_mask); 1695 return node; 1696 } 1697 1698 for_each_online_node(n) { 1699 cpumask_t tmp; 1700 1701 /* Don't want a node to appear more than once */ 1702 if (node_isset(n, *used_node_mask)) 1703 continue; 1704 1705 /* Use the distance array to find the distance */ 1706 val = node_distance(node, n); 1707 1708 /* Penalize nodes under us ("prefer the next node") */ 1709 val += (n < node); 1710 1711 /* Give preference to headless and unused nodes */ 1712 tmp = node_to_cpumask(n); 1713 if (!cpus_empty(tmp)) 1714 val += PENALTY_FOR_NODE_WITH_CPUS; 1715 1716 /* Slight preference for less loaded node */ 1717 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1718 val += node_load[n]; 1719 1720 if (val < min_val) { 1721 min_val = val; 1722 best_node = n; 1723 } 1724 } 1725 1726 if (best_node >= 0) 1727 node_set(best_node, *used_node_mask); 1728 1729 return best_node; 1730} 1731 1732static void __meminit build_zonelists(pg_data_t *pgdat) 1733{ 1734 int j, node, local_node; 1735 enum zone_type i; 1736 int prev_node, load; 1737 struct zonelist *zonelist; 1738 nodemask_t used_mask; 1739 1740 /* initialize zonelists */ 1741 for (i = 0; i < MAX_NR_ZONES; i++) { 1742 zonelist = pgdat->node_zonelists + i; 1743 zonelist->zones[0] = NULL; 1744 } 1745 1746 /* NUMA-aware ordering of nodes */ 1747 local_node = pgdat->node_id; 1748 load = num_online_nodes(); 1749 prev_node = local_node; 1750 nodes_clear(used_mask); 1751 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1752 int distance = node_distance(local_node, node); 1753 1754 /* 1755 * If another node is sufficiently far away then it is better 1756 * to reclaim pages in a zone before going off node. 1757 */ 1758 if (distance > RECLAIM_DISTANCE) 1759 zone_reclaim_mode = 1; 1760 1761 /* 1762 * We don't want to pressure a particular node. 1763 * So adding penalty to the first node in same 1764 * distance group to make it round-robin. 1765 */ 1766 1767 if (distance != node_distance(local_node, prev_node)) 1768 node_load[node] += load; 1769 prev_node = node; 1770 load--; 1771 for (i = 0; i < MAX_NR_ZONES; i++) { 1772 zonelist = pgdat->node_zonelists + i; 1773 for (j = 0; zonelist->zones[j] != NULL; j++); 1774 1775 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1776 zonelist->zones[j] = NULL; 1777 } 1778 } 1779} 1780 1781/* Construct the zonelist performance cache - see further mmzone.h */ 1782static void __meminit build_zonelist_cache(pg_data_t *pgdat) 1783{ 1784 int i; 1785 1786 for (i = 0; i < MAX_NR_ZONES; i++) { 1787 struct zonelist *zonelist; 1788 struct zonelist_cache *zlc; 1789 struct zone **z; 1790 1791 zonelist = pgdat->node_zonelists + i; 1792 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 1793 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1794 for (z = zonelist->zones; *z; z++) 1795 zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z); 1796 } 1797} 1798 1799#else /* CONFIG_NUMA */ 1800 1801static void __meminit build_zonelists(pg_data_t *pgdat) 1802{ 1803 int node, local_node; 1804 enum zone_type i,j; 1805 1806 local_node = pgdat->node_id; 1807 for (i = 0; i < MAX_NR_ZONES; i++) { 1808 struct zonelist *zonelist; 1809 1810 zonelist = pgdat->node_zonelists + i; 1811 1812 j = build_zonelists_node(pgdat, zonelist, 0, i); 1813 /* 1814 * Now we build the zonelist so that it contains the zones 1815 * of all the other nodes. 1816 * We don't want to pressure a particular node, so when 1817 * building the zones for node N, we make sure that the 1818 * zones coming right after the local ones are those from 1819 * node N+1 (modulo N) 1820 */ 1821 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1822 if (!node_online(node)) 1823 continue; 1824 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1825 } 1826 for (node = 0; node < local_node; node++) { 1827 if (!node_online(node)) 1828 continue; 1829 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1830 } 1831 1832 zonelist->zones[j] = NULL; 1833 } 1834} 1835 1836/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 1837static void __meminit build_zonelist_cache(pg_data_t *pgdat) 1838{ 1839 int i; 1840 1841 for (i = 0; i < MAX_NR_ZONES; i++) 1842 pgdat->node_zonelists[i].zlcache_ptr = NULL; 1843} 1844 1845#endif /* CONFIG_NUMA */ 1846 1847/* return values int ....just for stop_machine_run() */ 1848static int __meminit __build_all_zonelists(void *dummy) 1849{ 1850 int nid; 1851 1852 for_each_online_node(nid) { 1853 build_zonelists(NODE_DATA(nid)); 1854 build_zonelist_cache(NODE_DATA(nid)); 1855 } 1856 return 0; 1857} 1858 1859void __meminit build_all_zonelists(void) 1860{ 1861 if (system_state == SYSTEM_BOOTING) { 1862 __build_all_zonelists(NULL); 1863 cpuset_init_current_mems_allowed(); 1864 } else { 1865 /* we have to stop all cpus to guaranntee there is no user 1866 of zonelist */ 1867 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 1868 /* cpuset refresh routine should be here */ 1869 } 1870 vm_total_pages = nr_free_pagecache_pages(); 1871 printk("Built %i zonelists. Total pages: %ld\n", 1872 num_online_nodes(), vm_total_pages); 1873} 1874 1875/* 1876 * Helper functions to size the waitqueue hash table. 1877 * Essentially these want to choose hash table sizes sufficiently 1878 * large so that collisions trying to wait on pages are rare. 1879 * But in fact, the number of active page waitqueues on typical 1880 * systems is ridiculously low, less than 200. So this is even 1881 * conservative, even though it seems large. 1882 * 1883 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1884 * waitqueues, i.e. the size of the waitq table given the number of pages. 1885 */ 1886#define PAGES_PER_WAITQUEUE 256 1887 1888#ifndef CONFIG_MEMORY_HOTPLUG 1889static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1890{ 1891 unsigned long size = 1; 1892 1893 pages /= PAGES_PER_WAITQUEUE; 1894 1895 while (size < pages) 1896 size <<= 1; 1897 1898 /* 1899 * Once we have dozens or even hundreds of threads sleeping 1900 * on IO we've got bigger problems than wait queue collision. 1901 * Limit the size of the wait table to a reasonable size. 1902 */ 1903 size = min(size, 4096UL); 1904 1905 return max(size, 4UL); 1906} 1907#else 1908/* 1909 * A zone's size might be changed by hot-add, so it is not possible to determine 1910 * a suitable size for its wait_table. So we use the maximum size now. 1911 * 1912 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 1913 * 1914 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 1915 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 1916 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 1917 * 1918 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 1919 * or more by the traditional way. (See above). It equals: 1920 * 1921 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 1922 * ia64(16K page size) : = ( 8G + 4M)byte. 1923 * powerpc (64K page size) : = (32G +16M)byte. 1924 */ 1925static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1926{ 1927 return 4096UL; 1928} 1929#endif 1930 1931/* 1932 * This is an integer logarithm so that shifts can be used later 1933 * to extract the more random high bits from the multiplicative 1934 * hash function before the remainder is taken. 1935 */ 1936static inline unsigned long wait_table_bits(unsigned long size) 1937{ 1938 return ffz(~size); 1939} 1940 1941#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1942 1943/* 1944 * Initially all pages are reserved - free ones are freed 1945 * up by free_all_bootmem() once the early boot process is 1946 * done. Non-atomic initialization, single-pass. 1947 */ 1948void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1949 unsigned long start_pfn, enum memmap_context context) 1950{ 1951 struct page *page; 1952 unsigned long end_pfn = start_pfn + size; 1953 unsigned long pfn; 1954 1955 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1956 /* 1957 * There can be holes in boot-time mem_map[]s 1958 * handed to this function. They do not 1959 * exist on hotplugged memory. 1960 */ 1961 if (context == MEMMAP_EARLY) { 1962 if (!early_pfn_valid(pfn)) 1963 continue; 1964 if (!early_pfn_in_nid(pfn, nid)) 1965 continue; 1966 } 1967 page = pfn_to_page(pfn); 1968 set_page_links(page, zone, nid, pfn); 1969 init_page_count(page); 1970 reset_page_mapcount(page); 1971 SetPageReserved(page); 1972 INIT_LIST_HEAD(&page->lru); 1973#ifdef WANT_PAGE_VIRTUAL 1974 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1975 if (!is_highmem_idx(zone)) 1976 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1977#endif 1978 } 1979} 1980 1981void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1982 unsigned long size) 1983{ 1984 int order; 1985 for (order = 0; order < MAX_ORDER ; order++) { 1986 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1987 zone->free_area[order].nr_free = 0; 1988 } 1989} 1990 1991#ifndef __HAVE_ARCH_MEMMAP_INIT 1992#define memmap_init(size, nid, zone, start_pfn) \ 1993 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 1994#endif 1995 1996static int __cpuinit zone_batchsize(struct zone *zone) 1997{ 1998 int batch; 1999 2000 /* 2001 * The per-cpu-pages pools are set to around 1000th of the 2002 * size of the zone. But no more than 1/2 of a meg. 2003 * 2004 * OK, so we don't know how big the cache is. So guess. 2005 */ 2006 batch = zone->present_pages / 1024; 2007 if (batch * PAGE_SIZE > 512 * 1024) 2008 batch = (512 * 1024) / PAGE_SIZE; 2009 batch /= 4; /* We effectively *= 4 below */ 2010 if (batch < 1) 2011 batch = 1; 2012 2013 /* 2014 * Clamp the batch to a 2^n - 1 value. Having a power 2015 * of 2 value was found to be more likely to have 2016 * suboptimal cache aliasing properties in some cases. 2017 * 2018 * For example if 2 tasks are alternately allocating 2019 * batches of pages, one task can end up with a lot 2020 * of pages of one half of the possible page colors 2021 * and the other with pages of the other colors. 2022 */ 2023 batch = (1 << (fls(batch + batch/2)-1)) - 1; 2024 2025 return batch; 2026} 2027 2028inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2029{ 2030 struct per_cpu_pages *pcp; 2031 2032 memset(p, 0, sizeof(*p)); 2033 2034 pcp = &p->pcp[0]; /* hot */ 2035 pcp->count = 0; 2036 pcp->high = 6 * batch; 2037 pcp->batch = max(1UL, 1 * batch); 2038 INIT_LIST_HEAD(&pcp->list); 2039 2040 pcp = &p->pcp[1]; /* cold*/ 2041 pcp->count = 0; 2042 pcp->high = 2 * batch; 2043 pcp->batch = max(1UL, batch/2); 2044 INIT_LIST_HEAD(&pcp->list); 2045} 2046 2047/* 2048 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 2049 * to the value high for the pageset p. 2050 */ 2051 2052static void setup_pagelist_highmark(struct per_cpu_pageset *p, 2053 unsigned long high) 2054{ 2055 struct per_cpu_pages *pcp; 2056 2057 pcp = &p->pcp[0]; /* hot list */ 2058 pcp->high = high; 2059 pcp->batch = max(1UL, high/4); 2060 if ((high/4) > (PAGE_SHIFT * 8)) 2061 pcp->batch = PAGE_SHIFT * 8; 2062} 2063 2064 2065#ifdef CONFIG_NUMA 2066/* 2067 * Boot pageset table. One per cpu which is going to be used for all 2068 * zones and all nodes. The parameters will be set in such a way 2069 * that an item put on a list will immediately be handed over to 2070 * the buddy list. This is safe since pageset manipulation is done 2071 * with interrupts disabled. 2072 * 2073 * Some NUMA counter updates may also be caught by the boot pagesets. 2074 * 2075 * The boot_pagesets must be kept even after bootup is complete for 2076 * unused processors and/or zones. They do play a role for bootstrapping 2077 * hotplugged processors. 2078 * 2079 * zoneinfo_show() and maybe other functions do 2080 * not check if the processor is online before following the pageset pointer. 2081 * Other parts of the kernel may not check if the zone is available. 2082 */ 2083static struct per_cpu_pageset boot_pageset[NR_CPUS]; 2084 2085/* 2086 * Dynamically allocate memory for the 2087 * per cpu pageset array in struct zone. 2088 */ 2089static int __cpuinit process_zones(int cpu) 2090{ 2091 struct zone *zone, *dzone; 2092 2093 for_each_zone(zone) { 2094 2095 if (!populated_zone(zone)) 2096 continue; 2097 2098 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2099 GFP_KERNEL, cpu_to_node(cpu)); 2100 if (!zone_pcp(zone, cpu)) 2101 goto bad; 2102 2103 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 2104 2105 if (percpu_pagelist_fraction) 2106 setup_pagelist_highmark(zone_pcp(zone, cpu), 2107 (zone->present_pages / percpu_pagelist_fraction)); 2108 } 2109 2110 return 0; 2111bad: 2112 for_each_zone(dzone) { 2113 if (dzone == zone) 2114 break; 2115 kfree(zone_pcp(dzone, cpu)); 2116 zone_pcp(dzone, cpu) = NULL; 2117 } 2118 return -ENOMEM; 2119} 2120 2121static inline void free_zone_pagesets(int cpu) 2122{ 2123 struct zone *zone; 2124 2125 for_each_zone(zone) { 2126 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 2127 2128 /* Free per_cpu_pageset if it is slab allocated */ 2129 if (pset != &boot_pageset[cpu]) 2130 kfree(pset); 2131 zone_pcp(zone, cpu) = NULL; 2132 } 2133} 2134 2135static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 2136 unsigned long action, 2137 void *hcpu) 2138{ 2139 int cpu = (long)hcpu; 2140 int ret = NOTIFY_OK; 2141 2142 switch (action) { 2143 case CPU_UP_PREPARE: 2144 if (process_zones(cpu)) 2145 ret = NOTIFY_BAD; 2146 break; 2147 case CPU_UP_CANCELED: 2148 case CPU_DEAD: 2149 free_zone_pagesets(cpu); 2150 break; 2151 default: 2152 break; 2153 } 2154 return ret; 2155} 2156 2157static struct notifier_block __cpuinitdata pageset_notifier = 2158 { &pageset_cpuup_callback, NULL, 0 }; 2159 2160void __init setup_per_cpu_pageset(void) 2161{ 2162 int err; 2163 2164 /* Initialize per_cpu_pageset for cpu 0. 2165 * A cpuup callback will do this for every cpu 2166 * as it comes online 2167 */ 2168 err = process_zones(smp_processor_id()); 2169 BUG_ON(err); 2170 register_cpu_notifier(&pageset_notifier); 2171} 2172 2173#endif 2174 2175static __meminit 2176int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2177{ 2178 int i; 2179 struct pglist_data *pgdat = zone->zone_pgdat; 2180 size_t alloc_size; 2181 2182 /* 2183 * The per-page waitqueue mechanism uses hashed waitqueues 2184 * per zone. 2185 */ 2186 zone->wait_table_hash_nr_entries = 2187 wait_table_hash_nr_entries(zone_size_pages); 2188 zone->wait_table_bits = 2189 wait_table_bits(zone->wait_table_hash_nr_entries); 2190 alloc_size = zone->wait_table_hash_nr_entries 2191 * sizeof(wait_queue_head_t); 2192 2193 if (system_state == SYSTEM_BOOTING) { 2194 zone->wait_table = (wait_queue_head_t *) 2195 alloc_bootmem_node(pgdat, alloc_size); 2196 } else { 2197 /* 2198 * This case means that a zone whose size was 0 gets new memory 2199 * via memory hot-add. 2200 * But it may be the case that a new node was hot-added. In 2201 * this case vmalloc() will not be able to use this new node's 2202 * memory - this wait_table must be initialized to use this new 2203 * node itself as well. 2204 * To use this new node's memory, further consideration will be 2205 * necessary. 2206 */ 2207 zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); 2208 } 2209 if (!zone->wait_table) 2210 return -ENOMEM; 2211 2212 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 2213 init_waitqueue_head(zone->wait_table + i); 2214 2215 return 0; 2216} 2217 2218static __meminit void zone_pcp_init(struct zone *zone) 2219{ 2220 int cpu; 2221 unsigned long batch = zone_batchsize(zone); 2222 2223 for (cpu = 0; cpu < NR_CPUS; cpu++) { 2224#ifdef CONFIG_NUMA 2225 /* Early boot. Slab allocator not functional yet */ 2226 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 2227 setup_pageset(&boot_pageset[cpu],0); 2228#else 2229 setup_pageset(zone_pcp(zone,cpu), batch); 2230#endif 2231 } 2232 if (zone->present_pages) 2233 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 2234 zone->name, zone->present_pages, batch); 2235} 2236 2237__meminit int init_currently_empty_zone(struct zone *zone, 2238 unsigned long zone_start_pfn, 2239 unsigned long size, 2240 enum memmap_context context) 2241{ 2242 struct pglist_data *pgdat = zone->zone_pgdat; 2243 int ret; 2244 ret = zone_wait_table_init(zone, size); 2245 if (ret) 2246 return ret; 2247 pgdat->nr_zones = zone_idx(zone) + 1; 2248 2249 zone->zone_start_pfn = zone_start_pfn; 2250 2251 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2252 2253 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 2254 2255 return 0; 2256} 2257 2258#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2259/* 2260 * Basic iterator support. Return the first range of PFNs for a node 2261 * Note: nid == MAX_NUMNODES returns first region regardless of node 2262 */ 2263static int __init first_active_region_index_in_nid(int nid) 2264{ 2265 int i; 2266 2267 for (i = 0; i < nr_nodemap_entries; i++) 2268 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 2269 return i; 2270 2271 return -1; 2272} 2273 2274/* 2275 * Basic iterator support. Return the next active range of PFNs for a node 2276 * Note: nid == MAX_NUMNODES returns next region regardles of node 2277 */ 2278static int __init next_active_region_index_in_nid(int index, int nid) 2279{ 2280 for (index = index + 1; index < nr_nodemap_entries; index++) 2281 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2282 return index; 2283 2284 return -1; 2285} 2286 2287#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 2288/* 2289 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 2290 * Architectures may implement their own version but if add_active_range() 2291 * was used and there are no special requirements, this is a convenient 2292 * alternative 2293 */ 2294int __init early_pfn_to_nid(unsigned long pfn) 2295{ 2296 int i; 2297 2298 for (i = 0; i < nr_nodemap_entries; i++) { 2299 unsigned long start_pfn = early_node_map[i].start_pfn; 2300 unsigned long end_pfn = early_node_map[i].end_pfn; 2301 2302 if (start_pfn <= pfn && pfn < end_pfn) 2303 return early_node_map[i].nid; 2304 } 2305 2306 return 0; 2307} 2308#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 2309 2310/* Basic iterator support to walk early_node_map[] */ 2311#define for_each_active_range_index_in_nid(i, nid) \ 2312 for (i = first_active_region_index_in_nid(nid); i != -1; \ 2313 i = next_active_region_index_in_nid(i, nid)) 2314 2315/** 2316 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2317 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 2318 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 2319 * 2320 * If an architecture guarantees that all ranges registered with 2321 * add_active_ranges() contain no holes and may be freed, this 2322 * this function may be used instead of calling free_bootmem() manually. 2323 */ 2324void __init free_bootmem_with_active_regions(int nid, 2325 unsigned long max_low_pfn) 2326{ 2327 int i; 2328 2329 for_each_active_range_index_in_nid(i, nid) { 2330 unsigned long size_pages = 0; 2331 unsigned long end_pfn = early_node_map[i].end_pfn; 2332 2333 if (early_node_map[i].start_pfn >= max_low_pfn) 2334 continue; 2335 2336 if (end_pfn > max_low_pfn) 2337 end_pfn = max_low_pfn; 2338 2339 size_pages = end_pfn - early_node_map[i].start_pfn; 2340 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 2341 PFN_PHYS(early_node_map[i].start_pfn), 2342 size_pages << PAGE_SHIFT); 2343 } 2344} 2345 2346/** 2347 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2348 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2349 * 2350 * If an architecture guarantees that all ranges registered with 2351 * add_active_ranges() contain no holes and may be freed, this 2352 * function may be used instead of calling memory_present() manually. 2353 */ 2354void __init sparse_memory_present_with_active_regions(int nid) 2355{ 2356 int i; 2357 2358 for_each_active_range_index_in_nid(i, nid) 2359 memory_present(early_node_map[i].nid, 2360 early_node_map[i].start_pfn, 2361 early_node_map[i].end_pfn); 2362} 2363 2364/** 2365 * push_node_boundaries - Push node boundaries to at least the requested boundary 2366 * @nid: The nid of the node to push the boundary for 2367 * @start_pfn: The start pfn of the node 2368 * @end_pfn: The end pfn of the node 2369 * 2370 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd 2371 * time. Specifically, on x86_64, SRAT will report ranges that can potentially 2372 * be hotplugged even though no physical memory exists. This function allows 2373 * an arch to push out the node boundaries so mem_map is allocated that can 2374 * be used later. 2375 */ 2376#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2377void __init push_node_boundaries(unsigned int nid, 2378 unsigned long start_pfn, unsigned long end_pfn) 2379{ 2380 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", 2381 nid, start_pfn, end_pfn); 2382 2383 /* Initialise the boundary for this node if necessary */ 2384 if (node_boundary_end_pfn[nid] == 0) 2385 node_boundary_start_pfn[nid] = -1UL; 2386 2387 /* Update the boundaries */ 2388 if (node_boundary_start_pfn[nid] > start_pfn) 2389 node_boundary_start_pfn[nid] = start_pfn; 2390 if (node_boundary_end_pfn[nid] < end_pfn) 2391 node_boundary_end_pfn[nid] = end_pfn; 2392} 2393 2394/* If necessary, push the node boundary out for reserve hotadd */ 2395static void __init account_node_boundary(unsigned int nid, 2396 unsigned long *start_pfn, unsigned long *end_pfn) 2397{ 2398 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", 2399 nid, *start_pfn, *end_pfn); 2400 2401 /* Return if boundary information has not been provided */ 2402 if (node_boundary_end_pfn[nid] == 0) 2403 return; 2404 2405 /* Check the boundaries and update if necessary */ 2406 if (node_boundary_start_pfn[nid] < *start_pfn) 2407 *start_pfn = node_boundary_start_pfn[nid]; 2408 if (node_boundary_end_pfn[nid] > *end_pfn) 2409 *end_pfn = node_boundary_end_pfn[nid]; 2410} 2411#else 2412void __init push_node_boundaries(unsigned int nid, 2413 unsigned long start_pfn, unsigned long end_pfn) {} 2414 2415static void __init account_node_boundary(unsigned int nid, 2416 unsigned long *start_pfn, unsigned long *end_pfn) {} 2417#endif 2418 2419 2420/** 2421 * get_pfn_range_for_nid - Return the start and end page frames for a node 2422 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 2423 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 2424 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 2425 * 2426 * It returns the start and end page frame of a node based on information 2427 * provided by an arch calling add_active_range(). If called for a node 2428 * with no available memory, a warning is printed and the start and end 2429 * PFNs will be 0. 2430 */ 2431void __init get_pfn_range_for_nid(unsigned int nid, 2432 unsigned long *start_pfn, unsigned long *end_pfn) 2433{ 2434 int i; 2435 *start_pfn = -1UL; 2436 *end_pfn = 0; 2437 2438 for_each_active_range_index_in_nid(i, nid) { 2439 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 2440 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 2441 } 2442 2443 if (*start_pfn == -1UL) { 2444 printk(KERN_WARNING "Node %u active with no memory\n", nid); 2445 *start_pfn = 0; 2446 } 2447 2448 /* Push the node boundaries out if requested */ 2449 account_node_boundary(nid, start_pfn, end_pfn); 2450} 2451 2452/* 2453 * Return the number of pages a zone spans in a node, including holes 2454 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 2455 */ 2456unsigned long __init zone_spanned_pages_in_node(int nid, 2457 unsigned long zone_type, 2458 unsigned long *ignored) 2459{ 2460 unsigned long node_start_pfn, node_end_pfn; 2461 unsigned long zone_start_pfn, zone_end_pfn; 2462 2463 /* Get the start and end of the node and zone */ 2464 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2465 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 2466 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 2467 2468 /* Check that this node has pages within the zone's required range */ 2469 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 2470 return 0; 2471 2472 /* Move the zone boundaries inside the node if necessary */ 2473 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 2474 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 2475 2476 /* Return the spanned pages */ 2477 return zone_end_pfn - zone_start_pfn; 2478} 2479 2480/* 2481 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 2482 * then all holes in the requested range will be accounted for. 2483 */ 2484unsigned long __init __absent_pages_in_range(int nid, 2485 unsigned long range_start_pfn, 2486 unsigned long range_end_pfn) 2487{ 2488 int i = 0; 2489 unsigned long prev_end_pfn = 0, hole_pages = 0; 2490 unsigned long start_pfn; 2491 2492 /* Find the end_pfn of the first active range of pfns in the node */ 2493 i = first_active_region_index_in_nid(nid); 2494 if (i == -1) 2495 return 0; 2496 2497 /* Account for ranges before physical memory on this node */ 2498 if (early_node_map[i].start_pfn > range_start_pfn) 2499 hole_pages = early_node_map[i].start_pfn - range_start_pfn; 2500 2501 prev_end_pfn = early_node_map[i].start_pfn; 2502 2503 /* Find all holes for the zone within the node */ 2504 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 2505 2506 /* No need to continue if prev_end_pfn is outside the zone */ 2507 if (prev_end_pfn >= range_end_pfn) 2508 break; 2509 2510 /* Make sure the end of the zone is not within the hole */ 2511 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 2512 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 2513 2514 /* Update the hole size cound and move on */ 2515 if (start_pfn > range_start_pfn) { 2516 BUG_ON(prev_end_pfn > start_pfn); 2517 hole_pages += start_pfn - prev_end_pfn; 2518 } 2519 prev_end_pfn = early_node_map[i].end_pfn; 2520 } 2521 2522 /* Account for ranges past physical memory on this node */ 2523 if (range_end_pfn > prev_end_pfn) 2524 hole_pages += range_end_pfn - 2525 max(range_start_pfn, prev_end_pfn); 2526 2527 return hole_pages; 2528} 2529 2530/** 2531 * absent_pages_in_range - Return number of page frames in holes within a range 2532 * @start_pfn: The start PFN to start searching for holes 2533 * @end_pfn: The end PFN to stop searching for holes 2534 * 2535 * It returns the number of pages frames in memory holes within a range. 2536 */ 2537unsigned long __init absent_pages_in_range(unsigned long start_pfn, 2538 unsigned long end_pfn) 2539{ 2540 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 2541} 2542 2543/* Return the number of page frames in holes in a zone on a node */ 2544unsigned long __init zone_absent_pages_in_node(int nid, 2545 unsigned long zone_type, 2546 unsigned long *ignored) 2547{ 2548 unsigned long node_start_pfn, node_end_pfn; 2549 unsigned long zone_start_pfn, zone_end_pfn; 2550 2551 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2552 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 2553 node_start_pfn); 2554 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 2555 node_end_pfn); 2556 2557 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 2558} 2559 2560#else 2561static inline unsigned long zone_spanned_pages_in_node(int nid, 2562 unsigned long zone_type, 2563 unsigned long *zones_size) 2564{ 2565 return zones_size[zone_type]; 2566} 2567 2568static inline unsigned long zone_absent_pages_in_node(int nid, 2569 unsigned long zone_type, 2570 unsigned long *zholes_size) 2571{ 2572 if (!zholes_size) 2573 return 0; 2574 2575 return zholes_size[zone_type]; 2576} 2577 2578#endif 2579 2580static void __init calculate_node_totalpages(struct pglist_data *pgdat, 2581 unsigned long *zones_size, unsigned long *zholes_size) 2582{ 2583 unsigned long realtotalpages, totalpages = 0; 2584 enum zone_type i; 2585 2586 for (i = 0; i < MAX_NR_ZONES; i++) 2587 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 2588 zones_size); 2589 pgdat->node_spanned_pages = totalpages; 2590 2591 realtotalpages = totalpages; 2592 for (i = 0; i < MAX_NR_ZONES; i++) 2593 realtotalpages -= 2594 zone_absent_pages_in_node(pgdat->node_id, i, 2595 zholes_size); 2596 pgdat->node_present_pages = realtotalpages; 2597 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 2598 realtotalpages); 2599} 2600 2601/* 2602 * Set up the zone data structures: 2603 * - mark all pages reserved 2604 * - mark all memory queues empty 2605 * - clear the memory bitmaps 2606 */ 2607static void __meminit free_area_init_core(struct pglist_data *pgdat, 2608 unsigned long *zones_size, unsigned long *zholes_size) 2609{ 2610 enum zone_type j; 2611 int nid = pgdat->node_id; 2612 unsigned long zone_start_pfn = pgdat->node_start_pfn; 2613 int ret; 2614 2615 pgdat_resize_init(pgdat); 2616 pgdat->nr_zones = 0; 2617 init_waitqueue_head(&pgdat->kswapd_wait); 2618 pgdat->kswapd_max_order = 0; 2619 2620 for (j = 0; j < MAX_NR_ZONES; j++) { 2621 struct zone *zone = pgdat->node_zones + j; 2622 unsigned long size, realsize, memmap_pages; 2623 2624 size = zone_spanned_pages_in_node(nid, j, zones_size); 2625 realsize = size - zone_absent_pages_in_node(nid, j, 2626 zholes_size); 2627 2628 /* 2629 * Adjust realsize so that it accounts for how much memory 2630 * is used by this zone for memmap. This affects the watermark 2631 * and per-cpu initialisations 2632 */ 2633 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; 2634 if (realsize >= memmap_pages) { 2635 realsize -= memmap_pages; 2636 printk(KERN_DEBUG 2637 " %s zone: %lu pages used for memmap\n", 2638 zone_names[j], memmap_pages); 2639 } else 2640 printk(KERN_WARNING 2641 " %s zone: %lu pages exceeds realsize %lu\n", 2642 zone_names[j], memmap_pages, realsize); 2643 2644 /* Account for reserved DMA pages */ 2645 if (j == ZONE_DMA && realsize > dma_reserve) { 2646 realsize -= dma_reserve; 2647 printk(KERN_DEBUG " DMA zone: %lu pages reserved\n", 2648 dma_reserve); 2649 } 2650 2651 if (!is_highmem_idx(j)) 2652 nr_kernel_pages += realsize; 2653 nr_all_pages += realsize; 2654 2655 zone->spanned_pages = size; 2656 zone->present_pages = realsize; 2657#ifdef CONFIG_NUMA 2658 zone->node = nid; 2659 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 2660 / 100; 2661 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 2662#endif 2663 zone->name = zone_names[j]; 2664 spin_lock_init(&zone->lock); 2665 spin_lock_init(&zone->lru_lock); 2666 zone_seqlock_init(zone); 2667 zone->zone_pgdat = pgdat; 2668 2669 zone->prev_priority = DEF_PRIORITY; 2670 2671 zone_pcp_init(zone); 2672 INIT_LIST_HEAD(&zone->active_list); 2673 INIT_LIST_HEAD(&zone->inactive_list); 2674 zone->nr_scan_active = 0; 2675 zone->nr_scan_inactive = 0; 2676 zap_zone_vm_stats(zone); 2677 atomic_set(&zone->reclaim_in_progress, 0); 2678 if (!size) 2679 continue; 2680 2681 ret = init_currently_empty_zone(zone, zone_start_pfn, 2682 size, MEMMAP_EARLY); 2683 BUG_ON(ret); 2684 zone_start_pfn += size; 2685 } 2686} 2687 2688static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2689{ 2690 /* Skip empty nodes */ 2691 if (!pgdat->node_spanned_pages) 2692 return; 2693 2694#ifdef CONFIG_FLAT_NODE_MEM_MAP 2695 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2696 if (!pgdat->node_mem_map) { 2697 unsigned long size, start, end; 2698 struct page *map; 2699 2700 /* 2701 * The zone's endpoints aren't required to be MAX_ORDER 2702 * aligned but the node_mem_map endpoints must be in order 2703 * for the buddy allocator to function correctly. 2704 */ 2705 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 2706 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 2707 end = ALIGN(end, MAX_ORDER_NR_PAGES); 2708 size = (end - start) * sizeof(struct page); 2709 map = alloc_remap(pgdat->node_id, size); 2710 if (!map) 2711 map = alloc_bootmem_node(pgdat, size); 2712 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 2713 } 2714#ifdef CONFIG_FLATMEM 2715 /* 2716 * With no DISCONTIG, the global mem_map is just set as node 0's 2717 */ 2718 if (pgdat == NODE_DATA(0)) { 2719 mem_map = NODE_DATA(0)->node_mem_map; 2720#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2721 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 2722 mem_map -= pgdat->node_start_pfn; 2723#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2724 } 2725#endif 2726#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2727} 2728 2729void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, 2730 unsigned long *zones_size, unsigned long node_start_pfn, 2731 unsigned long *zholes_size) 2732{ 2733 pgdat->node_id = nid; 2734 pgdat->node_start_pfn = node_start_pfn; 2735 calculate_node_totalpages(pgdat, zones_size, zholes_size); 2736 2737 alloc_node_mem_map(pgdat); 2738 2739 free_area_init_core(pgdat, zones_size, zholes_size); 2740} 2741 2742#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2743/** 2744 * add_active_range - Register a range of PFNs backed by physical memory 2745 * @nid: The node ID the range resides on 2746 * @start_pfn: The start PFN of the available physical memory 2747 * @end_pfn: The end PFN of the available physical memory 2748 * 2749 * These ranges are stored in an early_node_map[] and later used by 2750 * free_area_init_nodes() to calculate zone sizes and holes. If the 2751 * range spans a memory hole, it is up to the architecture to ensure 2752 * the memory is not freed by the bootmem allocator. If possible 2753 * the range being registered will be merged with existing ranges. 2754 */ 2755void __init add_active_range(unsigned int nid, unsigned long start_pfn, 2756 unsigned long end_pfn) 2757{ 2758 int i; 2759 2760 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 2761 "%d entries of %d used\n", 2762 nid, start_pfn, end_pfn, 2763 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 2764 2765 /* Merge with existing active regions if possible */ 2766 for (i = 0; i < nr_nodemap_entries; i++) { 2767 if (early_node_map[i].nid != nid) 2768 continue; 2769 2770 /* Skip if an existing region covers this new one */ 2771 if (start_pfn >= early_node_map[i].start_pfn && 2772 end_pfn <= early_node_map[i].end_pfn) 2773 return; 2774 2775 /* Merge forward if suitable */ 2776 if (start_pfn <= early_node_map[i].end_pfn && 2777 end_pfn > early_node_map[i].end_pfn) { 2778 early_node_map[i].end_pfn = end_pfn; 2779 return; 2780 } 2781 2782 /* Merge backward if suitable */ 2783 if (start_pfn < early_node_map[i].end_pfn && 2784 end_pfn >= early_node_map[i].start_pfn) { 2785 early_node_map[i].start_pfn = start_pfn; 2786 return; 2787 } 2788 } 2789 2790 /* Check that early_node_map is large enough */ 2791 if (i >= MAX_ACTIVE_REGIONS) { 2792 printk(KERN_CRIT "More than %d memory regions, truncating\n", 2793 MAX_ACTIVE_REGIONS); 2794 return; 2795 } 2796 2797 early_node_map[i].nid = nid; 2798 early_node_map[i].start_pfn = start_pfn; 2799 early_node_map[i].end_pfn = end_pfn; 2800 nr_nodemap_entries = i + 1; 2801} 2802 2803/** 2804 * shrink_active_range - Shrink an existing registered range of PFNs 2805 * @nid: The node id the range is on that should be shrunk 2806 * @old_end_pfn: The old end PFN of the range 2807 * @new_end_pfn: The new PFN of the range 2808 * 2809 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 2810 * The map is kept at the end physical page range that has already been 2811 * registered with add_active_range(). This function allows an arch to shrink 2812 * an existing registered range. 2813 */ 2814void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 2815 unsigned long new_end_pfn) 2816{ 2817 int i; 2818 2819 /* Find the old active region end and shrink */ 2820 for_each_active_range_index_in_nid(i, nid) 2821 if (early_node_map[i].end_pfn == old_end_pfn) { 2822 early_node_map[i].end_pfn = new_end_pfn; 2823 break; 2824 } 2825} 2826 2827/** 2828 * remove_all_active_ranges - Remove all currently registered regions 2829 * 2830 * During discovery, it may be found that a table like SRAT is invalid 2831 * and an alternative discovery method must be used. This function removes 2832 * all currently registered regions. 2833 */ 2834void __init remove_all_active_ranges(void) 2835{ 2836 memset(early_node_map, 0, sizeof(early_node_map)); 2837 nr_nodemap_entries = 0; 2838#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2839 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); 2840 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); 2841#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 2842} 2843 2844/* Compare two active node_active_regions */ 2845static int __init cmp_node_active_region(const void *a, const void *b) 2846{ 2847 struct node_active_region *arange = (struct node_active_region *)a; 2848 struct node_active_region *brange = (struct node_active_region *)b; 2849 2850 /* Done this way to avoid overflows */ 2851 if (arange->start_pfn > brange->start_pfn) 2852 return 1; 2853 if (arange->start_pfn < brange->start_pfn) 2854 return -1; 2855 2856 return 0; 2857} 2858 2859/* sort the node_map by start_pfn */ 2860static void __init sort_node_map(void) 2861{ 2862 sort(early_node_map, (size_t)nr_nodemap_entries, 2863 sizeof(struct node_active_region), 2864 cmp_node_active_region, NULL); 2865} 2866 2867/* Find the lowest pfn for a node */ 2868unsigned long __init find_min_pfn_for_node(unsigned long nid) 2869{ 2870 int i; 2871 unsigned long min_pfn = ULONG_MAX; 2872 2873 /* Assuming a sorted map, the first range found has the starting pfn */ 2874 for_each_active_range_index_in_nid(i, nid) 2875 min_pfn = min(min_pfn, early_node_map[i].start_pfn); 2876 2877 if (min_pfn == ULONG_MAX) { 2878 printk(KERN_WARNING 2879 "Could not find start_pfn for node %lu\n", nid); 2880 return 0; 2881 } 2882 2883 return min_pfn; 2884} 2885 2886/** 2887 * find_min_pfn_with_active_regions - Find the minimum PFN registered 2888 * 2889 * It returns the minimum PFN based on information provided via 2890 * add_active_range(). 2891 */ 2892unsigned long __init find_min_pfn_with_active_regions(void) 2893{ 2894 return find_min_pfn_for_node(MAX_NUMNODES); 2895} 2896 2897/** 2898 * find_max_pfn_with_active_regions - Find the maximum PFN registered 2899 * 2900 * It returns the maximum PFN based on information provided via 2901 * add_active_range(). 2902 */ 2903unsigned long __init find_max_pfn_with_active_regions(void) 2904{ 2905 int i; 2906 unsigned long max_pfn = 0; 2907 2908 for (i = 0; i < nr_nodemap_entries; i++) 2909 max_pfn = max(max_pfn, early_node_map[i].end_pfn); 2910 2911 return max_pfn; 2912} 2913 2914/** 2915 * free_area_init_nodes - Initialise all pg_data_t and zone data 2916 * @max_zone_pfn: an array of max PFNs for each zone 2917 * 2918 * This will call free_area_init_node() for each active node in the system. 2919 * Using the page ranges provided by add_active_range(), the size of each 2920 * zone in each node and their holes is calculated. If the maximum PFN 2921 * between two adjacent zones match, it is assumed that the zone is empty. 2922 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 2923 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 2924 * starts where the previous one ended. For example, ZONE_DMA32 starts 2925 * at arch_max_dma_pfn. 2926 */ 2927void __init free_area_init_nodes(unsigned long *max_zone_pfn) 2928{ 2929 unsigned long nid; 2930 enum zone_type i; 2931 2932 /* Sort early_node_map as initialisation assumes it is sorted */ 2933 sort_node_map(); 2934 2935 /* Record where the zone boundaries are */ 2936 memset(arch_zone_lowest_possible_pfn, 0, 2937 sizeof(arch_zone_lowest_possible_pfn)); 2938 memset(arch_zone_highest_possible_pfn, 0, 2939 sizeof(arch_zone_highest_possible_pfn)); 2940 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 2941 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 2942 for (i = 1; i < MAX_NR_ZONES; i++) { 2943 arch_zone_lowest_possible_pfn[i] = 2944 arch_zone_highest_possible_pfn[i-1]; 2945 arch_zone_highest_possible_pfn[i] = 2946 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 2947 } 2948 2949 /* Print out the zone ranges */ 2950 printk("Zone PFN ranges:\n"); 2951 for (i = 0; i < MAX_NR_ZONES; i++) 2952 printk(" %-8s %8lu -> %8lu\n", 2953 zone_names[i], 2954 arch_zone_lowest_possible_pfn[i], 2955 arch_zone_highest_possible_pfn[i]); 2956 2957 /* Print out the early_node_map[] */ 2958 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 2959 for (i = 0; i < nr_nodemap_entries; i++) 2960 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 2961 early_node_map[i].start_pfn, 2962 early_node_map[i].end_pfn); 2963 2964 /* Initialise every node */ 2965 for_each_online_node(nid) { 2966 pg_data_t *pgdat = NODE_DATA(nid); 2967 free_area_init_node(nid, pgdat, NULL, 2968 find_min_pfn_for_node(nid), NULL); 2969 } 2970} 2971#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2972 2973/** 2974 * set_dma_reserve - set the specified number of pages reserved in the first zone 2975 * @new_dma_reserve: The number of pages to mark reserved 2976 * 2977 * The per-cpu batchsize and zone watermarks are determined by present_pages. 2978 * In the DMA zone, a significant percentage may be consumed by kernel image 2979 * and other unfreeable allocations which can skew the watermarks badly. This 2980 * function may optionally be used to account for unfreeable pages in the 2981 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 2982 * smaller per-cpu batchsize. 2983 */ 2984void __init set_dma_reserve(unsigned long new_dma_reserve) 2985{ 2986 dma_reserve = new_dma_reserve; 2987} 2988 2989#ifndef CONFIG_NEED_MULTIPLE_NODES 2990static bootmem_data_t contig_bootmem_data; 2991struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2992 2993EXPORT_SYMBOL(contig_page_data); 2994#endif 2995 2996void __init free_area_init(unsigned long *zones_size) 2997{ 2998 free_area_init_node(0, NODE_DATA(0), zones_size, 2999 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 3000} 3001 3002static int page_alloc_cpu_notify(struct notifier_block *self, 3003 unsigned long action, void *hcpu) 3004{ 3005 int cpu = (unsigned long)hcpu; 3006 3007 if (action == CPU_DEAD) { 3008 local_irq_disable(); 3009 __drain_pages(cpu); 3010 vm_events_fold_cpu(cpu); 3011 local_irq_enable(); 3012 refresh_cpu_vm_stats(cpu); 3013 } 3014 return NOTIFY_OK; 3015} 3016 3017void __init page_alloc_init(void) 3018{ 3019 hotcpu_notifier(page_alloc_cpu_notify, 0); 3020} 3021 3022/* 3023 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 3024 * or min_free_kbytes changes. 3025 */ 3026static void calculate_totalreserve_pages(void) 3027{ 3028 struct pglist_data *pgdat; 3029 unsigned long reserve_pages = 0; 3030 enum zone_type i, j; 3031 3032 for_each_online_pgdat(pgdat) { 3033 for (i = 0; i < MAX_NR_ZONES; i++) { 3034 struct zone *zone = pgdat->node_zones + i; 3035 unsigned long max = 0; 3036 3037 /* Find valid and maximum lowmem_reserve in the zone */ 3038 for (j = i; j < MAX_NR_ZONES; j++) { 3039 if (zone->lowmem_reserve[j] > max) 3040 max = zone->lowmem_reserve[j]; 3041 } 3042 3043 /* we treat pages_high as reserved pages. */ 3044 max += zone->pages_high; 3045 3046 if (max > zone->present_pages) 3047 max = zone->present_pages; 3048 reserve_pages += max; 3049 } 3050 } 3051 totalreserve_pages = reserve_pages; 3052} 3053 3054/* 3055 * setup_per_zone_lowmem_reserve - called whenever 3056 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 3057 * has a correct pages reserved value, so an adequate number of 3058 * pages are left in the zone after a successful __alloc_pages(). 3059 */ 3060static void setup_per_zone_lowmem_reserve(void) 3061{ 3062 struct pglist_data *pgdat; 3063 enum zone_type j, idx; 3064 3065 for_each_online_pgdat(pgdat) { 3066 for (j = 0; j < MAX_NR_ZONES; j++) { 3067 struct zone *zone = pgdat->node_zones + j; 3068 unsigned long present_pages = zone->present_pages; 3069 3070 zone->lowmem_reserve[j] = 0; 3071 3072 idx = j; 3073 while (idx) { 3074 struct zone *lower_zone; 3075 3076 idx--; 3077 3078 if (sysctl_lowmem_reserve_ratio[idx] < 1) 3079 sysctl_lowmem_reserve_ratio[idx] = 1; 3080 3081 lower_zone = pgdat->node_zones + idx; 3082 lower_zone->lowmem_reserve[j] = present_pages / 3083 sysctl_lowmem_reserve_ratio[idx]; 3084 present_pages += lower_zone->present_pages; 3085 } 3086 } 3087 } 3088 3089 /* update totalreserve_pages */ 3090 calculate_totalreserve_pages(); 3091} 3092 3093/** 3094 * setup_per_zone_pages_min - called when min_free_kbytes changes. 3095 * 3096 * Ensures that the pages_{min,low,high} values for each zone are set correctly 3097 * with respect to min_free_kbytes. 3098 */ 3099void setup_per_zone_pages_min(void) 3100{ 3101 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 3102 unsigned long lowmem_pages = 0; 3103 struct zone *zone; 3104 unsigned long flags; 3105 3106 /* Calculate total number of !ZONE_HIGHMEM pages */ 3107 for_each_zone(zone) { 3108 if (!is_highmem(zone)) 3109 lowmem_pages += zone->present_pages; 3110 } 3111 3112 for_each_zone(zone) { 3113 u64 tmp; 3114 3115 spin_lock_irqsave(&zone->lru_lock, flags); 3116 tmp = (u64)pages_min * zone->present_pages; 3117 do_div(tmp, lowmem_pages); 3118 if (is_highmem(zone)) { 3119 /* 3120 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 3121 * need highmem pages, so cap pages_min to a small 3122 * value here. 3123 * 3124 * The (pages_high-pages_low) and (pages_low-pages_min) 3125 * deltas controls asynch page reclaim, and so should 3126 * not be capped for highmem. 3127 */ 3128 int min_pages; 3129 3130 min_pages = zone->present_pages / 1024; 3131 if (min_pages < SWAP_CLUSTER_MAX) 3132 min_pages = SWAP_CLUSTER_MAX; 3133 if (min_pages > 128) 3134 min_pages = 128; 3135 zone->pages_min = min_pages; 3136 } else { 3137 /* 3138 * If it's a lowmem zone, reserve a number of pages 3139 * proportionate to the zone's size. 3140 */ 3141 zone->pages_min = tmp; 3142 } 3143 3144 zone->pages_low = zone->pages_min + (tmp >> 2); 3145 zone->pages_high = zone->pages_min + (tmp >> 1); 3146 spin_unlock_irqrestore(&zone->lru_lock, flags); 3147 } 3148 3149 /* update totalreserve_pages */ 3150 calculate_totalreserve_pages(); 3151} 3152 3153/* 3154 * Initialise min_free_kbytes. 3155 * 3156 * For small machines we want it small (128k min). For large machines 3157 * we want it large (64MB max). But it is not linear, because network 3158 * bandwidth does not increase linearly with machine size. We use 3159 * 3160 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 3161 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 3162 * 3163 * which yields 3164 * 3165 * 16MB: 512k 3166 * 32MB: 724k 3167 * 64MB: 1024k 3168 * 128MB: 1448k 3169 * 256MB: 2048k 3170 * 512MB: 2896k 3171 * 1024MB: 4096k 3172 * 2048MB: 5792k 3173 * 4096MB: 8192k 3174 * 8192MB: 11584k 3175 * 16384MB: 16384k 3176 */ 3177static int __init init_per_zone_pages_min(void) 3178{ 3179 unsigned long lowmem_kbytes; 3180 3181 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 3182 3183 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 3184 if (min_free_kbytes < 128) 3185 min_free_kbytes = 128; 3186 if (min_free_kbytes > 65536) 3187 min_free_kbytes = 65536; 3188 setup_per_zone_pages_min(); 3189 setup_per_zone_lowmem_reserve(); 3190 return 0; 3191} 3192module_init(init_per_zone_pages_min) 3193 3194/* 3195 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 3196 * that we can call two helper functions whenever min_free_kbytes 3197 * changes. 3198 */ 3199int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 3200 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3201{ 3202 proc_dointvec(table, write, file, buffer, length, ppos); 3203 setup_per_zone_pages_min(); 3204 return 0; 3205} 3206 3207#ifdef CONFIG_NUMA 3208int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 3209 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3210{ 3211 struct zone *zone; 3212 int rc; 3213 3214 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3215 if (rc) 3216 return rc; 3217 3218 for_each_zone(zone) 3219 zone->min_unmapped_pages = (zone->present_pages * 3220 sysctl_min_unmapped_ratio) / 100; 3221 return 0; 3222} 3223 3224int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 3225 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3226{ 3227 struct zone *zone; 3228 int rc; 3229 3230 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3231 if (rc) 3232 return rc; 3233 3234 for_each_zone(zone) 3235 zone->min_slab_pages = (zone->present_pages * 3236 sysctl_min_slab_ratio) / 100; 3237 return 0; 3238} 3239#endif 3240 3241/* 3242 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 3243 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 3244 * whenever sysctl_lowmem_reserve_ratio changes. 3245 * 3246 * The reserve ratio obviously has absolutely no relation with the 3247 * pages_min watermarks. The lowmem reserve ratio can only make sense 3248 * if in function of the boot time zone sizes. 3249 */ 3250int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 3251 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3252{ 3253 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3254 setup_per_zone_lowmem_reserve(); 3255 return 0; 3256} 3257 3258/* 3259 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 3260 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 3261 * can have before it gets flushed back to buddy allocator. 3262 */ 3263 3264int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 3265 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3266{ 3267 struct zone *zone; 3268 unsigned int cpu; 3269 int ret; 3270 3271 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3272 if (!write || (ret == -EINVAL)) 3273 return ret; 3274 for_each_zone(zone) { 3275 for_each_online_cpu(cpu) { 3276 unsigned long high; 3277 high = zone->present_pages / percpu_pagelist_fraction; 3278 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 3279 } 3280 } 3281 return 0; 3282} 3283 3284int hashdist = HASHDIST_DEFAULT; 3285 3286#ifdef CONFIG_NUMA 3287static int __init set_hashdist(char *str) 3288{ 3289 if (!str) 3290 return 0; 3291 hashdist = simple_strtoul(str, &str, 0); 3292 return 1; 3293} 3294__setup("hashdist=", set_hashdist); 3295#endif 3296 3297/* 3298 * allocate a large system hash table from bootmem 3299 * - it is assumed that the hash table must contain an exact power-of-2 3300 * quantity of entries 3301 * - limit is the number of hash buckets, not the total allocation size 3302 */ 3303void *__init alloc_large_system_hash(const char *tablename, 3304 unsigned long bucketsize, 3305 unsigned long numentries, 3306 int scale, 3307 int flags, 3308 unsigned int *_hash_shift, 3309 unsigned int *_hash_mask, 3310 unsigned long limit) 3311{ 3312 unsigned long long max = limit; 3313 unsigned long log2qty, size; 3314 void *table = NULL; 3315 3316 /* allow the kernel cmdline to have a say */ 3317 if (!numentries) { 3318 /* round applicable memory size up to nearest megabyte */ 3319 numentries = nr_kernel_pages; 3320 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 3321 numentries >>= 20 - PAGE_SHIFT; 3322 numentries <<= 20 - PAGE_SHIFT; 3323 3324 /* limit to 1 bucket per 2^scale bytes of low memory */ 3325 if (scale > PAGE_SHIFT) 3326 numentries >>= (scale - PAGE_SHIFT); 3327 else 3328 numentries <<= (PAGE_SHIFT - scale); 3329 3330 /* Make sure we've got at least a 0-order allocation.. */ 3331 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 3332 numentries = PAGE_SIZE / bucketsize; 3333 } 3334 numentries = roundup_pow_of_two(numentries); 3335 3336 /* limit allocation size to 1/16 total memory by default */ 3337 if (max == 0) { 3338 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 3339 do_div(max, bucketsize); 3340 } 3341 3342 if (numentries > max) 3343 numentries = max; 3344 3345 log2qty = ilog2(numentries); 3346 3347 do { 3348 size = bucketsize << log2qty; 3349 if (flags & HASH_EARLY) 3350 table = alloc_bootmem(size); 3351 else if (hashdist) 3352 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 3353 else { 3354 unsigned long order; 3355 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 3356 ; 3357 table = (void*) __get_free_pages(GFP_ATOMIC, order); 3358 } 3359 } while (!table && size > PAGE_SIZE && --log2qty); 3360 3361 if (!table) 3362 panic("Failed to allocate %s hash table\n", tablename); 3363 3364 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 3365 tablename, 3366 (1U << log2qty), 3367 ilog2(size) - PAGE_SHIFT, 3368 size); 3369 3370 if (_hash_shift) 3371 *_hash_shift = log2qty; 3372 if (_hash_mask) 3373 *_hash_mask = (1 << log2qty) - 1; 3374 3375 return table; 3376} 3377 3378#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 3379struct page *pfn_to_page(unsigned long pfn) 3380{ 3381 return __pfn_to_page(pfn); 3382} 3383unsigned long page_to_pfn(struct page *page) 3384{ 3385 return __page_to_pfn(page); 3386} 3387EXPORT_SYMBOL(pfn_to_page); 3388EXPORT_SYMBOL(page_to_pfn); 3389#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 3390 3391#if MAX_NUMNODES > 1 3392/* 3393 * Find the highest possible node id. 3394 */ 3395int highest_possible_node_id(void) 3396{ 3397 unsigned int node; 3398 unsigned int highest = 0; 3399 3400 for_each_node_mask(node, node_possible_map) 3401 highest = node; 3402 return highest; 3403} 3404EXPORT_SYMBOL(highest_possible_node_id); 3405#endif 3406