page_alloc.c revision 8417bba4b151346ed475fcc923693c9e3be89063
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/bootmem.h> 23#include <linux/compiler.h> 24#include <linux/kernel.h> 25#include <linux/module.h> 26#include <linux/suspend.h> 27#include <linux/pagevec.h> 28#include <linux/blkdev.h> 29#include <linux/slab.h> 30#include <linux/notifier.h> 31#include <linux/topology.h> 32#include <linux/sysctl.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/memory_hotplug.h> 36#include <linux/nodemask.h> 37#include <linux/vmalloc.h> 38#include <linux/mempolicy.h> 39#include <linux/stop_machine.h> 40 41#include <asm/tlbflush.h> 42#include <asm/div64.h> 43#include "internal.h" 44 45/* 46 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 47 * initializer cleaner 48 */ 49nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 50EXPORT_SYMBOL(node_online_map); 51nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 52EXPORT_SYMBOL(node_possible_map); 53unsigned long totalram_pages __read_mostly; 54unsigned long totalreserve_pages __read_mostly; 55long nr_swap_pages; 56int percpu_pagelist_fraction; 57 58static void __free_pages_ok(struct page *page, unsigned int order); 59 60/* 61 * results with 256, 32 in the lowmem_reserve sysctl: 62 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 63 * 1G machine -> (16M dma, 784M normal, 224M high) 64 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 65 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 66 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 67 * 68 * TBD: should special case ZONE_DMA32 machines here - in those we normally 69 * don't need any ZONE_NORMAL reservation 70 */ 71int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 72 256, 73#ifdef CONFIG_ZONE_DMA32 74 256, 75#endif 76#ifdef CONFIG_HIGHMEM 77 32 78#endif 79}; 80 81EXPORT_SYMBOL(totalram_pages); 82 83/* 84 * Used by page_zone() to look up the address of the struct zone whose 85 * id is encoded in the upper bits of page->flags 86 */ 87struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; 88EXPORT_SYMBOL(zone_table); 89 90static char *zone_names[MAX_NR_ZONES] = { 91 "DMA", 92#ifdef CONFIG_ZONE_DMA32 93 "DMA32", 94#endif 95 "Normal", 96#ifdef CONFIG_HIGHMEM 97 "HighMem" 98#endif 99}; 100 101int min_free_kbytes = 1024; 102 103unsigned long __meminitdata nr_kernel_pages; 104unsigned long __meminitdata nr_all_pages; 105 106#ifdef CONFIG_DEBUG_VM 107static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 108{ 109 int ret = 0; 110 unsigned seq; 111 unsigned long pfn = page_to_pfn(page); 112 113 do { 114 seq = zone_span_seqbegin(zone); 115 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 116 ret = 1; 117 else if (pfn < zone->zone_start_pfn) 118 ret = 1; 119 } while (zone_span_seqretry(zone, seq)); 120 121 return ret; 122} 123 124static int page_is_consistent(struct zone *zone, struct page *page) 125{ 126#ifdef CONFIG_HOLES_IN_ZONE 127 if (!pfn_valid(page_to_pfn(page))) 128 return 0; 129#endif 130 if (zone != page_zone(page)) 131 return 0; 132 133 return 1; 134} 135/* 136 * Temporary debugging check for pages not lying within a given zone. 137 */ 138static int bad_range(struct zone *zone, struct page *page) 139{ 140 if (page_outside_zone_boundaries(zone, page)) 141 return 1; 142 if (!page_is_consistent(zone, page)) 143 return 1; 144 145 return 0; 146} 147#else 148static inline int bad_range(struct zone *zone, struct page *page) 149{ 150 return 0; 151} 152#endif 153 154static void bad_page(struct page *page) 155{ 156 printk(KERN_EMERG "Bad page state in process '%s'\n" 157 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 158 KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 159 KERN_EMERG "Backtrace:\n", 160 current->comm, page, (int)(2*sizeof(unsigned long)), 161 (unsigned long)page->flags, page->mapping, 162 page_mapcount(page), page_count(page)); 163 dump_stack(); 164 page->flags &= ~(1 << PG_lru | 165 1 << PG_private | 166 1 << PG_locked | 167 1 << PG_active | 168 1 << PG_dirty | 169 1 << PG_reclaim | 170 1 << PG_slab | 171 1 << PG_swapcache | 172 1 << PG_writeback | 173 1 << PG_buddy ); 174 set_page_count(page, 0); 175 reset_page_mapcount(page); 176 page->mapping = NULL; 177 add_taint(TAINT_BAD_PAGE); 178} 179 180/* 181 * Higher-order pages are called "compound pages". They are structured thusly: 182 * 183 * The first PAGE_SIZE page is called the "head page". 184 * 185 * The remaining PAGE_SIZE pages are called "tail pages". 186 * 187 * All pages have PG_compound set. All pages have their ->private pointing at 188 * the head page (even the head page has this). 189 * 190 * The first tail page's ->lru.next holds the address of the compound page's 191 * put_page() function. Its ->lru.prev holds the order of allocation. 192 * This usage means that zero-order pages may not be compound. 193 */ 194 195static void free_compound_page(struct page *page) 196{ 197 __free_pages_ok(page, (unsigned long)page[1].lru.prev); 198} 199 200static void prep_compound_page(struct page *page, unsigned long order) 201{ 202 int i; 203 int nr_pages = 1 << order; 204 205 page[1].lru.next = (void *)free_compound_page; /* set dtor */ 206 page[1].lru.prev = (void *)order; 207 for (i = 0; i < nr_pages; i++) { 208 struct page *p = page + i; 209 210 __SetPageCompound(p); 211 set_page_private(p, (unsigned long)page); 212 } 213} 214 215static void destroy_compound_page(struct page *page, unsigned long order) 216{ 217 int i; 218 int nr_pages = 1 << order; 219 220 if (unlikely((unsigned long)page[1].lru.prev != order)) 221 bad_page(page); 222 223 for (i = 0; i < nr_pages; i++) { 224 struct page *p = page + i; 225 226 if (unlikely(!PageCompound(p) | 227 (page_private(p) != (unsigned long)page))) 228 bad_page(page); 229 __ClearPageCompound(p); 230 } 231} 232 233static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 234{ 235 int i; 236 237 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 238 /* 239 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 240 * and __GFP_HIGHMEM from hard or soft interrupt context. 241 */ 242 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 243 for (i = 0; i < (1 << order); i++) 244 clear_highpage(page + i); 245} 246 247/* 248 * function for dealing with page's order in buddy system. 249 * zone->lock is already acquired when we use these. 250 * So, we don't need atomic page->flags operations here. 251 */ 252static inline unsigned long page_order(struct page *page) 253{ 254 return page_private(page); 255} 256 257static inline void set_page_order(struct page *page, int order) 258{ 259 set_page_private(page, order); 260 __SetPageBuddy(page); 261} 262 263static inline void rmv_page_order(struct page *page) 264{ 265 __ClearPageBuddy(page); 266 set_page_private(page, 0); 267} 268 269/* 270 * Locate the struct page for both the matching buddy in our 271 * pair (buddy1) and the combined O(n+1) page they form (page). 272 * 273 * 1) Any buddy B1 will have an order O twin B2 which satisfies 274 * the following equation: 275 * B2 = B1 ^ (1 << O) 276 * For example, if the starting buddy (buddy2) is #8 its order 277 * 1 buddy is #10: 278 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 279 * 280 * 2) Any buddy B will have an order O+1 parent P which 281 * satisfies the following equation: 282 * P = B & ~(1 << O) 283 * 284 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 285 */ 286static inline struct page * 287__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 288{ 289 unsigned long buddy_idx = page_idx ^ (1 << order); 290 291 return page + (buddy_idx - page_idx); 292} 293 294static inline unsigned long 295__find_combined_index(unsigned long page_idx, unsigned int order) 296{ 297 return (page_idx & ~(1 << order)); 298} 299 300/* 301 * This function checks whether a page is free && is the buddy 302 * we can do coalesce a page and its buddy if 303 * (a) the buddy is not in a hole && 304 * (b) the buddy is in the buddy system && 305 * (c) a page and its buddy have the same order && 306 * (d) a page and its buddy are in the same zone. 307 * 308 * For recording whether a page is in the buddy system, we use PG_buddy. 309 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 310 * 311 * For recording page's order, we use page_private(page). 312 */ 313static inline int page_is_buddy(struct page *page, struct page *buddy, 314 int order) 315{ 316#ifdef CONFIG_HOLES_IN_ZONE 317 if (!pfn_valid(page_to_pfn(buddy))) 318 return 0; 319#endif 320 321 if (page_zone_id(page) != page_zone_id(buddy)) 322 return 0; 323 324 if (PageBuddy(buddy) && page_order(buddy) == order) { 325 BUG_ON(page_count(buddy) != 0); 326 return 1; 327 } 328 return 0; 329} 330 331/* 332 * Freeing function for a buddy system allocator. 333 * 334 * The concept of a buddy system is to maintain direct-mapped table 335 * (containing bit values) for memory blocks of various "orders". 336 * The bottom level table contains the map for the smallest allocatable 337 * units of memory (here, pages), and each level above it describes 338 * pairs of units from the levels below, hence, "buddies". 339 * At a high level, all that happens here is marking the table entry 340 * at the bottom level available, and propagating the changes upward 341 * as necessary, plus some accounting needed to play nicely with other 342 * parts of the VM system. 343 * At each level, we keep a list of pages, which are heads of continuous 344 * free pages of length of (1 << order) and marked with PG_buddy. Page's 345 * order is recorded in page_private(page) field. 346 * So when we are allocating or freeing one, we can derive the state of the 347 * other. That is, if we allocate a small block, and both were 348 * free, the remainder of the region must be split into blocks. 349 * If a block is freed, and its buddy is also free, then this 350 * triggers coalescing into a block of larger size. 351 * 352 * -- wli 353 */ 354 355static inline void __free_one_page(struct page *page, 356 struct zone *zone, unsigned int order) 357{ 358 unsigned long page_idx; 359 int order_size = 1 << order; 360 361 if (unlikely(PageCompound(page))) 362 destroy_compound_page(page, order); 363 364 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 365 366 VM_BUG_ON(page_idx & (order_size - 1)); 367 VM_BUG_ON(bad_range(zone, page)); 368 369 zone->free_pages += order_size; 370 while (order < MAX_ORDER-1) { 371 unsigned long combined_idx; 372 struct free_area *area; 373 struct page *buddy; 374 375 buddy = __page_find_buddy(page, page_idx, order); 376 if (!page_is_buddy(page, buddy, order)) 377 break; /* Move the buddy up one level. */ 378 379 list_del(&buddy->lru); 380 area = zone->free_area + order; 381 area->nr_free--; 382 rmv_page_order(buddy); 383 combined_idx = __find_combined_index(page_idx, order); 384 page = page + (combined_idx - page_idx); 385 page_idx = combined_idx; 386 order++; 387 } 388 set_page_order(page, order); 389 list_add(&page->lru, &zone->free_area[order].free_list); 390 zone->free_area[order].nr_free++; 391} 392 393static inline int free_pages_check(struct page *page) 394{ 395 if (unlikely(page_mapcount(page) | 396 (page->mapping != NULL) | 397 (page_count(page) != 0) | 398 (page->flags & ( 399 1 << PG_lru | 400 1 << PG_private | 401 1 << PG_locked | 402 1 << PG_active | 403 1 << PG_reclaim | 404 1 << PG_slab | 405 1 << PG_swapcache | 406 1 << PG_writeback | 407 1 << PG_reserved | 408 1 << PG_buddy )))) 409 bad_page(page); 410 if (PageDirty(page)) 411 __ClearPageDirty(page); 412 /* 413 * For now, we report if PG_reserved was found set, but do not 414 * clear it, and do not free the page. But we shall soon need 415 * to do more, for when the ZERO_PAGE count wraps negative. 416 */ 417 return PageReserved(page); 418} 419 420/* 421 * Frees a list of pages. 422 * Assumes all pages on list are in same zone, and of same order. 423 * count is the number of pages to free. 424 * 425 * If the zone was previously in an "all pages pinned" state then look to 426 * see if this freeing clears that state. 427 * 428 * And clear the zone's pages_scanned counter, to hold off the "all pages are 429 * pinned" detection logic. 430 */ 431static void free_pages_bulk(struct zone *zone, int count, 432 struct list_head *list, int order) 433{ 434 spin_lock(&zone->lock); 435 zone->all_unreclaimable = 0; 436 zone->pages_scanned = 0; 437 while (count--) { 438 struct page *page; 439 440 VM_BUG_ON(list_empty(list)); 441 page = list_entry(list->prev, struct page, lru); 442 /* have to delete it as __free_one_page list manipulates */ 443 list_del(&page->lru); 444 __free_one_page(page, zone, order); 445 } 446 spin_unlock(&zone->lock); 447} 448 449static void free_one_page(struct zone *zone, struct page *page, int order) 450{ 451 spin_lock(&zone->lock); 452 zone->all_unreclaimable = 0; 453 zone->pages_scanned = 0; 454 __free_one_page(page, zone ,order); 455 spin_unlock(&zone->lock); 456} 457 458static void __free_pages_ok(struct page *page, unsigned int order) 459{ 460 unsigned long flags; 461 int i; 462 int reserved = 0; 463 464 arch_free_page(page, order); 465 if (!PageHighMem(page)) 466 debug_check_no_locks_freed(page_address(page), 467 PAGE_SIZE<<order); 468 469 for (i = 0 ; i < (1 << order) ; ++i) 470 reserved += free_pages_check(page + i); 471 if (reserved) 472 return; 473 474 kernel_map_pages(page, 1 << order, 0); 475 local_irq_save(flags); 476 __count_vm_events(PGFREE, 1 << order); 477 free_one_page(page_zone(page), page, order); 478 local_irq_restore(flags); 479} 480 481/* 482 * permit the bootmem allocator to evade page validation on high-order frees 483 */ 484void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) 485{ 486 if (order == 0) { 487 __ClearPageReserved(page); 488 set_page_count(page, 0); 489 set_page_refcounted(page); 490 __free_page(page); 491 } else { 492 int loop; 493 494 prefetchw(page); 495 for (loop = 0; loop < BITS_PER_LONG; loop++) { 496 struct page *p = &page[loop]; 497 498 if (loop + 1 < BITS_PER_LONG) 499 prefetchw(p + 1); 500 __ClearPageReserved(p); 501 set_page_count(p, 0); 502 } 503 504 set_page_refcounted(page); 505 __free_pages(page, order); 506 } 507} 508 509 510/* 511 * The order of subdivision here is critical for the IO subsystem. 512 * Please do not alter this order without good reasons and regression 513 * testing. Specifically, as large blocks of memory are subdivided, 514 * the order in which smaller blocks are delivered depends on the order 515 * they're subdivided in this function. This is the primary factor 516 * influencing the order in which pages are delivered to the IO 517 * subsystem according to empirical testing, and this is also justified 518 * by considering the behavior of a buddy system containing a single 519 * large block of memory acted on by a series of small allocations. 520 * This behavior is a critical factor in sglist merging's success. 521 * 522 * -- wli 523 */ 524static inline void expand(struct zone *zone, struct page *page, 525 int low, int high, struct free_area *area) 526{ 527 unsigned long size = 1 << high; 528 529 while (high > low) { 530 area--; 531 high--; 532 size >>= 1; 533 VM_BUG_ON(bad_range(zone, &page[size])); 534 list_add(&page[size].lru, &area->free_list); 535 area->nr_free++; 536 set_page_order(&page[size], high); 537 } 538} 539 540/* 541 * This page is about to be returned from the page allocator 542 */ 543static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 544{ 545 if (unlikely(page_mapcount(page) | 546 (page->mapping != NULL) | 547 (page_count(page) != 0) | 548 (page->flags & ( 549 1 << PG_lru | 550 1 << PG_private | 551 1 << PG_locked | 552 1 << PG_active | 553 1 << PG_dirty | 554 1 << PG_reclaim | 555 1 << PG_slab | 556 1 << PG_swapcache | 557 1 << PG_writeback | 558 1 << PG_reserved | 559 1 << PG_buddy )))) 560 bad_page(page); 561 562 /* 563 * For now, we report if PG_reserved was found set, but do not 564 * clear it, and do not allocate the page: as a safety net. 565 */ 566 if (PageReserved(page)) 567 return 1; 568 569 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 570 1 << PG_referenced | 1 << PG_arch_1 | 571 1 << PG_checked | 1 << PG_mappedtodisk); 572 set_page_private(page, 0); 573 set_page_refcounted(page); 574 kernel_map_pages(page, 1 << order, 1); 575 576 if (gfp_flags & __GFP_ZERO) 577 prep_zero_page(page, order, gfp_flags); 578 579 if (order && (gfp_flags & __GFP_COMP)) 580 prep_compound_page(page, order); 581 582 return 0; 583} 584 585/* 586 * Do the hard work of removing an element from the buddy allocator. 587 * Call me with the zone->lock already held. 588 */ 589static struct page *__rmqueue(struct zone *zone, unsigned int order) 590{ 591 struct free_area * area; 592 unsigned int current_order; 593 struct page *page; 594 595 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 596 area = zone->free_area + current_order; 597 if (list_empty(&area->free_list)) 598 continue; 599 600 page = list_entry(area->free_list.next, struct page, lru); 601 list_del(&page->lru); 602 rmv_page_order(page); 603 area->nr_free--; 604 zone->free_pages -= 1UL << order; 605 expand(zone, page, order, current_order, area); 606 return page; 607 } 608 609 return NULL; 610} 611 612/* 613 * Obtain a specified number of elements from the buddy allocator, all under 614 * a single hold of the lock, for efficiency. Add them to the supplied list. 615 * Returns the number of new pages which were placed at *list. 616 */ 617static int rmqueue_bulk(struct zone *zone, unsigned int order, 618 unsigned long count, struct list_head *list) 619{ 620 int i; 621 622 spin_lock(&zone->lock); 623 for (i = 0; i < count; ++i) { 624 struct page *page = __rmqueue(zone, order); 625 if (unlikely(page == NULL)) 626 break; 627 list_add_tail(&page->lru, list); 628 } 629 spin_unlock(&zone->lock); 630 return i; 631} 632 633#ifdef CONFIG_NUMA 634/* 635 * Called from the slab reaper to drain pagesets on a particular node that 636 * belongs to the currently executing processor. 637 * Note that this function must be called with the thread pinned to 638 * a single processor. 639 */ 640void drain_node_pages(int nodeid) 641{ 642 int i; 643 enum zone_type z; 644 unsigned long flags; 645 646 for (z = 0; z < MAX_NR_ZONES; z++) { 647 struct zone *zone = NODE_DATA(nodeid)->node_zones + z; 648 struct per_cpu_pageset *pset; 649 650 if (!populated_zone(zone)) 651 continue; 652 653 pset = zone_pcp(zone, smp_processor_id()); 654 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 655 struct per_cpu_pages *pcp; 656 657 pcp = &pset->pcp[i]; 658 if (pcp->count) { 659 local_irq_save(flags); 660 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 661 pcp->count = 0; 662 local_irq_restore(flags); 663 } 664 } 665 } 666} 667#endif 668 669#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) 670static void __drain_pages(unsigned int cpu) 671{ 672 unsigned long flags; 673 struct zone *zone; 674 int i; 675 676 for_each_zone(zone) { 677 struct per_cpu_pageset *pset; 678 679 pset = zone_pcp(zone, cpu); 680 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 681 struct per_cpu_pages *pcp; 682 683 pcp = &pset->pcp[i]; 684 local_irq_save(flags); 685 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 686 pcp->count = 0; 687 local_irq_restore(flags); 688 } 689 } 690} 691#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ 692 693#ifdef CONFIG_PM 694 695void mark_free_pages(struct zone *zone) 696{ 697 unsigned long zone_pfn, flags; 698 int order; 699 struct list_head *curr; 700 701 if (!zone->spanned_pages) 702 return; 703 704 spin_lock_irqsave(&zone->lock, flags); 705 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 706 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); 707 708 for (order = MAX_ORDER - 1; order >= 0; --order) 709 list_for_each(curr, &zone->free_area[order].free_list) { 710 unsigned long start_pfn, i; 711 712 start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); 713 714 for (i=0; i < (1<<order); i++) 715 SetPageNosaveFree(pfn_to_page(start_pfn+i)); 716 } 717 spin_unlock_irqrestore(&zone->lock, flags); 718} 719 720/* 721 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 722 */ 723void drain_local_pages(void) 724{ 725 unsigned long flags; 726 727 local_irq_save(flags); 728 __drain_pages(smp_processor_id()); 729 local_irq_restore(flags); 730} 731#endif /* CONFIG_PM */ 732 733/* 734 * Free a 0-order page 735 */ 736static void fastcall free_hot_cold_page(struct page *page, int cold) 737{ 738 struct zone *zone = page_zone(page); 739 struct per_cpu_pages *pcp; 740 unsigned long flags; 741 742 arch_free_page(page, 0); 743 744 if (PageAnon(page)) 745 page->mapping = NULL; 746 if (free_pages_check(page)) 747 return; 748 749 kernel_map_pages(page, 1, 0); 750 751 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 752 local_irq_save(flags); 753 __count_vm_event(PGFREE); 754 list_add(&page->lru, &pcp->list); 755 pcp->count++; 756 if (pcp->count >= pcp->high) { 757 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 758 pcp->count -= pcp->batch; 759 } 760 local_irq_restore(flags); 761 put_cpu(); 762} 763 764void fastcall free_hot_page(struct page *page) 765{ 766 free_hot_cold_page(page, 0); 767} 768 769void fastcall free_cold_page(struct page *page) 770{ 771 free_hot_cold_page(page, 1); 772} 773 774/* 775 * split_page takes a non-compound higher-order page, and splits it into 776 * n (1<<order) sub-pages: page[0..n] 777 * Each sub-page must be freed individually. 778 * 779 * Note: this is probably too low level an operation for use in drivers. 780 * Please consult with lkml before using this in your driver. 781 */ 782void split_page(struct page *page, unsigned int order) 783{ 784 int i; 785 786 VM_BUG_ON(PageCompound(page)); 787 VM_BUG_ON(!page_count(page)); 788 for (i = 1; i < (1 << order); i++) 789 set_page_refcounted(page + i); 790} 791 792/* 793 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 794 * we cheat by calling it from here, in the order > 0 path. Saves a branch 795 * or two. 796 */ 797static struct page *buffered_rmqueue(struct zonelist *zonelist, 798 struct zone *zone, int order, gfp_t gfp_flags) 799{ 800 unsigned long flags; 801 struct page *page; 802 int cold = !!(gfp_flags & __GFP_COLD); 803 int cpu; 804 805again: 806 cpu = get_cpu(); 807 if (likely(order == 0)) { 808 struct per_cpu_pages *pcp; 809 810 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 811 local_irq_save(flags); 812 if (!pcp->count) { 813 pcp->count += rmqueue_bulk(zone, 0, 814 pcp->batch, &pcp->list); 815 if (unlikely(!pcp->count)) 816 goto failed; 817 } 818 page = list_entry(pcp->list.next, struct page, lru); 819 list_del(&page->lru); 820 pcp->count--; 821 } else { 822 spin_lock_irqsave(&zone->lock, flags); 823 page = __rmqueue(zone, order); 824 spin_unlock(&zone->lock); 825 if (!page) 826 goto failed; 827 } 828 829 __count_zone_vm_events(PGALLOC, zone, 1 << order); 830 zone_statistics(zonelist, zone); 831 local_irq_restore(flags); 832 put_cpu(); 833 834 VM_BUG_ON(bad_range(zone, page)); 835 if (prep_new_page(page, order, gfp_flags)) 836 goto again; 837 return page; 838 839failed: 840 local_irq_restore(flags); 841 put_cpu(); 842 return NULL; 843} 844 845#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 846#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 847#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 848#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 849#define ALLOC_HARDER 0x10 /* try to alloc harder */ 850#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 851#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 852 853/* 854 * Return 1 if free pages are above 'mark'. This takes into account the order 855 * of the allocation. 856 */ 857int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 858 int classzone_idx, int alloc_flags) 859{ 860 /* free_pages my go negative - that's OK */ 861 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 862 int o; 863 864 if (alloc_flags & ALLOC_HIGH) 865 min -= min / 2; 866 if (alloc_flags & ALLOC_HARDER) 867 min -= min / 4; 868 869 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 870 return 0; 871 for (o = 0; o < order; o++) { 872 /* At the next order, this order's pages become unavailable */ 873 free_pages -= z->free_area[o].nr_free << o; 874 875 /* Require fewer higher order pages to be free */ 876 min >>= 1; 877 878 if (free_pages <= min) 879 return 0; 880 } 881 return 1; 882} 883 884/* 885 * get_page_from_freeliest goes through the zonelist trying to allocate 886 * a page. 887 */ 888static struct page * 889get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 890 struct zonelist *zonelist, int alloc_flags) 891{ 892 struct zone **z = zonelist->zones; 893 struct page *page = NULL; 894 int classzone_idx = zone_idx(*z); 895 struct zone *zone; 896 897 /* 898 * Go through the zonelist once, looking for a zone with enough free. 899 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 900 */ 901 do { 902 zone = *z; 903 if (unlikely((gfp_mask & __GFP_THISNODE) && 904 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) 905 break; 906 if ((alloc_flags & ALLOC_CPUSET) && 907 !cpuset_zone_allowed(zone, gfp_mask)) 908 continue; 909 910 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 911 unsigned long mark; 912 if (alloc_flags & ALLOC_WMARK_MIN) 913 mark = zone->pages_min; 914 else if (alloc_flags & ALLOC_WMARK_LOW) 915 mark = zone->pages_low; 916 else 917 mark = zone->pages_high; 918 if (!zone_watermark_ok(zone , order, mark, 919 classzone_idx, alloc_flags)) 920 if (!zone_reclaim_mode || 921 !zone_reclaim(zone, gfp_mask, order)) 922 continue; 923 } 924 925 page = buffered_rmqueue(zonelist, zone, order, gfp_mask); 926 if (page) { 927 break; 928 } 929 } while (*(++z) != NULL); 930 return page; 931} 932 933/* 934 * This is the 'heart' of the zoned buddy allocator. 935 */ 936struct page * fastcall 937__alloc_pages(gfp_t gfp_mask, unsigned int order, 938 struct zonelist *zonelist) 939{ 940 const gfp_t wait = gfp_mask & __GFP_WAIT; 941 struct zone **z; 942 struct page *page; 943 struct reclaim_state reclaim_state; 944 struct task_struct *p = current; 945 int do_retry; 946 int alloc_flags; 947 int did_some_progress; 948 949 might_sleep_if(wait); 950 951restart: 952 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 953 954 if (unlikely(*z == NULL)) { 955 /* Should this ever happen?? */ 956 return NULL; 957 } 958 959 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 960 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 961 if (page) 962 goto got_pg; 963 964 do { 965 wakeup_kswapd(*z, order); 966 } while (*(++z)); 967 968 /* 969 * OK, we're below the kswapd watermark and have kicked background 970 * reclaim. Now things get more complex, so set up alloc_flags according 971 * to how we want to proceed. 972 * 973 * The caller may dip into page reserves a bit more if the caller 974 * cannot run direct reclaim, or if the caller has realtime scheduling 975 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 976 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 977 */ 978 alloc_flags = ALLOC_WMARK_MIN; 979 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 980 alloc_flags |= ALLOC_HARDER; 981 if (gfp_mask & __GFP_HIGH) 982 alloc_flags |= ALLOC_HIGH; 983 if (wait) 984 alloc_flags |= ALLOC_CPUSET; 985 986 /* 987 * Go through the zonelist again. Let __GFP_HIGH and allocations 988 * coming from realtime tasks go deeper into reserves. 989 * 990 * This is the last chance, in general, before the goto nopage. 991 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 992 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 993 */ 994 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 995 if (page) 996 goto got_pg; 997 998 /* This allocation should allow future memory freeing. */ 999 1000 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1001 && !in_interrupt()) { 1002 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1003nofail_alloc: 1004 /* go through the zonelist yet again, ignoring mins */ 1005 page = get_page_from_freelist(gfp_mask, order, 1006 zonelist, ALLOC_NO_WATERMARKS); 1007 if (page) 1008 goto got_pg; 1009 if (gfp_mask & __GFP_NOFAIL) { 1010 blk_congestion_wait(WRITE, HZ/50); 1011 goto nofail_alloc; 1012 } 1013 } 1014 goto nopage; 1015 } 1016 1017 /* Atomic allocations - we can't balance anything */ 1018 if (!wait) 1019 goto nopage; 1020 1021rebalance: 1022 cond_resched(); 1023 1024 /* We now go into synchronous reclaim */ 1025 cpuset_memory_pressure_bump(); 1026 p->flags |= PF_MEMALLOC; 1027 reclaim_state.reclaimed_slab = 0; 1028 p->reclaim_state = &reclaim_state; 1029 1030 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 1031 1032 p->reclaim_state = NULL; 1033 p->flags &= ~PF_MEMALLOC; 1034 1035 cond_resched(); 1036 1037 if (likely(did_some_progress)) { 1038 page = get_page_from_freelist(gfp_mask, order, 1039 zonelist, alloc_flags); 1040 if (page) 1041 goto got_pg; 1042 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1043 /* 1044 * Go through the zonelist yet one more time, keep 1045 * very high watermark here, this is only to catch 1046 * a parallel oom killing, we must fail if we're still 1047 * under heavy pressure. 1048 */ 1049 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1050 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1051 if (page) 1052 goto got_pg; 1053 1054 out_of_memory(zonelist, gfp_mask, order); 1055 goto restart; 1056 } 1057 1058 /* 1059 * Don't let big-order allocations loop unless the caller explicitly 1060 * requests that. Wait for some write requests to complete then retry. 1061 * 1062 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 1063 * <= 3, but that may not be true in other implementations. 1064 */ 1065 do_retry = 0; 1066 if (!(gfp_mask & __GFP_NORETRY)) { 1067 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1068 do_retry = 1; 1069 if (gfp_mask & __GFP_NOFAIL) 1070 do_retry = 1; 1071 } 1072 if (do_retry) { 1073 blk_congestion_wait(WRITE, HZ/50); 1074 goto rebalance; 1075 } 1076 1077nopage: 1078 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1079 printk(KERN_WARNING "%s: page allocation failure." 1080 " order:%d, mode:0x%x\n", 1081 p->comm, order, gfp_mask); 1082 dump_stack(); 1083 show_mem(); 1084 } 1085got_pg: 1086 return page; 1087} 1088 1089EXPORT_SYMBOL(__alloc_pages); 1090 1091/* 1092 * Common helper functions. 1093 */ 1094fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1095{ 1096 struct page * page; 1097 page = alloc_pages(gfp_mask, order); 1098 if (!page) 1099 return 0; 1100 return (unsigned long) page_address(page); 1101} 1102 1103EXPORT_SYMBOL(__get_free_pages); 1104 1105fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1106{ 1107 struct page * page; 1108 1109 /* 1110 * get_zeroed_page() returns a 32-bit address, which cannot represent 1111 * a highmem page 1112 */ 1113 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1114 1115 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1116 if (page) 1117 return (unsigned long) page_address(page); 1118 return 0; 1119} 1120 1121EXPORT_SYMBOL(get_zeroed_page); 1122 1123void __pagevec_free(struct pagevec *pvec) 1124{ 1125 int i = pagevec_count(pvec); 1126 1127 while (--i >= 0) 1128 free_hot_cold_page(pvec->pages[i], pvec->cold); 1129} 1130 1131fastcall void __free_pages(struct page *page, unsigned int order) 1132{ 1133 if (put_page_testzero(page)) { 1134 if (order == 0) 1135 free_hot_page(page); 1136 else 1137 __free_pages_ok(page, order); 1138 } 1139} 1140 1141EXPORT_SYMBOL(__free_pages); 1142 1143fastcall void free_pages(unsigned long addr, unsigned int order) 1144{ 1145 if (addr != 0) { 1146 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1147 __free_pages(virt_to_page((void *)addr), order); 1148 } 1149} 1150 1151EXPORT_SYMBOL(free_pages); 1152 1153/* 1154 * Total amount of free (allocatable) RAM: 1155 */ 1156unsigned int nr_free_pages(void) 1157{ 1158 unsigned int sum = 0; 1159 struct zone *zone; 1160 1161 for_each_zone(zone) 1162 sum += zone->free_pages; 1163 1164 return sum; 1165} 1166 1167EXPORT_SYMBOL(nr_free_pages); 1168 1169#ifdef CONFIG_NUMA 1170unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1171{ 1172 unsigned int sum = 0; 1173 enum zone_type i; 1174 1175 for (i = 0; i < MAX_NR_ZONES; i++) 1176 sum += pgdat->node_zones[i].free_pages; 1177 1178 return sum; 1179} 1180#endif 1181 1182static unsigned int nr_free_zone_pages(int offset) 1183{ 1184 /* Just pick one node, since fallback list is circular */ 1185 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1186 unsigned int sum = 0; 1187 1188 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1189 struct zone **zonep = zonelist->zones; 1190 struct zone *zone; 1191 1192 for (zone = *zonep++; zone; zone = *zonep++) { 1193 unsigned long size = zone->present_pages; 1194 unsigned long high = zone->pages_high; 1195 if (size > high) 1196 sum += size - high; 1197 } 1198 1199 return sum; 1200} 1201 1202/* 1203 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1204 */ 1205unsigned int nr_free_buffer_pages(void) 1206{ 1207 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1208} 1209 1210/* 1211 * Amount of free RAM allocatable within all zones 1212 */ 1213unsigned int nr_free_pagecache_pages(void) 1214{ 1215 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1216} 1217#ifdef CONFIG_NUMA 1218static void show_node(struct zone *zone) 1219{ 1220 printk("Node %d ", zone->zone_pgdat->node_id); 1221} 1222#else 1223#define show_node(zone) do { } while (0) 1224#endif 1225 1226void si_meminfo(struct sysinfo *val) 1227{ 1228 val->totalram = totalram_pages; 1229 val->sharedram = 0; 1230 val->freeram = nr_free_pages(); 1231 val->bufferram = nr_blockdev_pages(); 1232 val->totalhigh = totalhigh_pages; 1233 val->freehigh = nr_free_highpages(); 1234 val->mem_unit = PAGE_SIZE; 1235} 1236 1237EXPORT_SYMBOL(si_meminfo); 1238 1239#ifdef CONFIG_NUMA 1240void si_meminfo_node(struct sysinfo *val, int nid) 1241{ 1242 pg_data_t *pgdat = NODE_DATA(nid); 1243 1244 val->totalram = pgdat->node_present_pages; 1245 val->freeram = nr_free_pages_pgdat(pgdat); 1246#ifdef CONFIG_HIGHMEM 1247 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1248 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1249#else 1250 val->totalhigh = 0; 1251 val->freehigh = 0; 1252#endif 1253 val->mem_unit = PAGE_SIZE; 1254} 1255#endif 1256 1257#define K(x) ((x) << (PAGE_SHIFT-10)) 1258 1259/* 1260 * Show free area list (used inside shift_scroll-lock stuff) 1261 * We also calculate the percentage fragmentation. We do this by counting the 1262 * memory on each free list with the exception of the first item on the list. 1263 */ 1264void show_free_areas(void) 1265{ 1266 int cpu, temperature; 1267 unsigned long active; 1268 unsigned long inactive; 1269 unsigned long free; 1270 struct zone *zone; 1271 1272 for_each_zone(zone) { 1273 show_node(zone); 1274 printk("%s per-cpu:", zone->name); 1275 1276 if (!populated_zone(zone)) { 1277 printk(" empty\n"); 1278 continue; 1279 } else 1280 printk("\n"); 1281 1282 for_each_online_cpu(cpu) { 1283 struct per_cpu_pageset *pageset; 1284 1285 pageset = zone_pcp(zone, cpu); 1286 1287 for (temperature = 0; temperature < 2; temperature++) 1288 printk("cpu %d %s: high %d, batch %d used:%d\n", 1289 cpu, 1290 temperature ? "cold" : "hot", 1291 pageset->pcp[temperature].high, 1292 pageset->pcp[temperature].batch, 1293 pageset->pcp[temperature].count); 1294 } 1295 } 1296 1297 get_zone_counts(&active, &inactive, &free); 1298 1299 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " 1300 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", 1301 active, 1302 inactive, 1303 global_page_state(NR_FILE_DIRTY), 1304 global_page_state(NR_WRITEBACK), 1305 global_page_state(NR_UNSTABLE_NFS), 1306 nr_free_pages(), 1307 global_page_state(NR_SLAB), 1308 global_page_state(NR_FILE_MAPPED), 1309 global_page_state(NR_PAGETABLE)); 1310 1311 for_each_zone(zone) { 1312 int i; 1313 1314 show_node(zone); 1315 printk("%s" 1316 " free:%lukB" 1317 " min:%lukB" 1318 " low:%lukB" 1319 " high:%lukB" 1320 " active:%lukB" 1321 " inactive:%lukB" 1322 " present:%lukB" 1323 " pages_scanned:%lu" 1324 " all_unreclaimable? %s" 1325 "\n", 1326 zone->name, 1327 K(zone->free_pages), 1328 K(zone->pages_min), 1329 K(zone->pages_low), 1330 K(zone->pages_high), 1331 K(zone->nr_active), 1332 K(zone->nr_inactive), 1333 K(zone->present_pages), 1334 zone->pages_scanned, 1335 (zone->all_unreclaimable ? "yes" : "no") 1336 ); 1337 printk("lowmem_reserve[]:"); 1338 for (i = 0; i < MAX_NR_ZONES; i++) 1339 printk(" %lu", zone->lowmem_reserve[i]); 1340 printk("\n"); 1341 } 1342 1343 for_each_zone(zone) { 1344 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1345 1346 show_node(zone); 1347 printk("%s: ", zone->name); 1348 if (!populated_zone(zone)) { 1349 printk("empty\n"); 1350 continue; 1351 } 1352 1353 spin_lock_irqsave(&zone->lock, flags); 1354 for (order = 0; order < MAX_ORDER; order++) { 1355 nr[order] = zone->free_area[order].nr_free; 1356 total += nr[order] << order; 1357 } 1358 spin_unlock_irqrestore(&zone->lock, flags); 1359 for (order = 0; order < MAX_ORDER; order++) 1360 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1361 printk("= %lukB\n", K(total)); 1362 } 1363 1364 show_swap_cache_info(); 1365} 1366 1367/* 1368 * Builds allocation fallback zone lists. 1369 * 1370 * Add all populated zones of a node to the zonelist. 1371 */ 1372static int __meminit build_zonelists_node(pg_data_t *pgdat, 1373 struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) 1374{ 1375 struct zone *zone; 1376 1377 BUG_ON(zone_type >= MAX_NR_ZONES); 1378 zone_type++; 1379 1380 do { 1381 zone_type--; 1382 zone = pgdat->node_zones + zone_type; 1383 if (populated_zone(zone)) { 1384 zonelist->zones[nr_zones++] = zone; 1385 check_highest_zone(zone_type); 1386 } 1387 1388 } while (zone_type); 1389 return nr_zones; 1390} 1391 1392#ifdef CONFIG_NUMA 1393#define MAX_NODE_LOAD (num_online_nodes()) 1394static int __meminitdata node_load[MAX_NUMNODES]; 1395/** 1396 * find_next_best_node - find the next node that should appear in a given node's fallback list 1397 * @node: node whose fallback list we're appending 1398 * @used_node_mask: nodemask_t of already used nodes 1399 * 1400 * We use a number of factors to determine which is the next node that should 1401 * appear on a given node's fallback list. The node should not have appeared 1402 * already in @node's fallback list, and it should be the next closest node 1403 * according to the distance array (which contains arbitrary distance values 1404 * from each node to each node in the system), and should also prefer nodes 1405 * with no CPUs, since presumably they'll have very little allocation pressure 1406 * on them otherwise. 1407 * It returns -1 if no node is found. 1408 */ 1409static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) 1410{ 1411 int n, val; 1412 int min_val = INT_MAX; 1413 int best_node = -1; 1414 1415 /* Use the local node if we haven't already */ 1416 if (!node_isset(node, *used_node_mask)) { 1417 node_set(node, *used_node_mask); 1418 return node; 1419 } 1420 1421 for_each_online_node(n) { 1422 cpumask_t tmp; 1423 1424 /* Don't want a node to appear more than once */ 1425 if (node_isset(n, *used_node_mask)) 1426 continue; 1427 1428 /* Use the distance array to find the distance */ 1429 val = node_distance(node, n); 1430 1431 /* Penalize nodes under us ("prefer the next node") */ 1432 val += (n < node); 1433 1434 /* Give preference to headless and unused nodes */ 1435 tmp = node_to_cpumask(n); 1436 if (!cpus_empty(tmp)) 1437 val += PENALTY_FOR_NODE_WITH_CPUS; 1438 1439 /* Slight preference for less loaded node */ 1440 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1441 val += node_load[n]; 1442 1443 if (val < min_val) { 1444 min_val = val; 1445 best_node = n; 1446 } 1447 } 1448 1449 if (best_node >= 0) 1450 node_set(best_node, *used_node_mask); 1451 1452 return best_node; 1453} 1454 1455static void __meminit build_zonelists(pg_data_t *pgdat) 1456{ 1457 int j, node, local_node; 1458 enum zone_type i; 1459 int prev_node, load; 1460 struct zonelist *zonelist; 1461 nodemask_t used_mask; 1462 1463 /* initialize zonelists */ 1464 for (i = 0; i < MAX_NR_ZONES; i++) { 1465 zonelist = pgdat->node_zonelists + i; 1466 zonelist->zones[0] = NULL; 1467 } 1468 1469 /* NUMA-aware ordering of nodes */ 1470 local_node = pgdat->node_id; 1471 load = num_online_nodes(); 1472 prev_node = local_node; 1473 nodes_clear(used_mask); 1474 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1475 int distance = node_distance(local_node, node); 1476 1477 /* 1478 * If another node is sufficiently far away then it is better 1479 * to reclaim pages in a zone before going off node. 1480 */ 1481 if (distance > RECLAIM_DISTANCE) 1482 zone_reclaim_mode = 1; 1483 1484 /* 1485 * We don't want to pressure a particular node. 1486 * So adding penalty to the first node in same 1487 * distance group to make it round-robin. 1488 */ 1489 1490 if (distance != node_distance(local_node, prev_node)) 1491 node_load[node] += load; 1492 prev_node = node; 1493 load--; 1494 for (i = 0; i < MAX_NR_ZONES; i++) { 1495 zonelist = pgdat->node_zonelists + i; 1496 for (j = 0; zonelist->zones[j] != NULL; j++); 1497 1498 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1499 zonelist->zones[j] = NULL; 1500 } 1501 } 1502} 1503 1504#else /* CONFIG_NUMA */ 1505 1506static void __meminit build_zonelists(pg_data_t *pgdat) 1507{ 1508 int node, local_node; 1509 enum zone_type i,j; 1510 1511 local_node = pgdat->node_id; 1512 for (i = 0; i < MAX_NR_ZONES; i++) { 1513 struct zonelist *zonelist; 1514 1515 zonelist = pgdat->node_zonelists + i; 1516 1517 j = build_zonelists_node(pgdat, zonelist, 0, i); 1518 /* 1519 * Now we build the zonelist so that it contains the zones 1520 * of all the other nodes. 1521 * We don't want to pressure a particular node, so when 1522 * building the zones for node N, we make sure that the 1523 * zones coming right after the local ones are those from 1524 * node N+1 (modulo N) 1525 */ 1526 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1527 if (!node_online(node)) 1528 continue; 1529 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1530 } 1531 for (node = 0; node < local_node; node++) { 1532 if (!node_online(node)) 1533 continue; 1534 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1535 } 1536 1537 zonelist->zones[j] = NULL; 1538 } 1539} 1540 1541#endif /* CONFIG_NUMA */ 1542 1543/* return values int ....just for stop_machine_run() */ 1544static int __meminit __build_all_zonelists(void *dummy) 1545{ 1546 int nid; 1547 for_each_online_node(nid) 1548 build_zonelists(NODE_DATA(nid)); 1549 return 0; 1550} 1551 1552void __meminit build_all_zonelists(void) 1553{ 1554 if (system_state == SYSTEM_BOOTING) { 1555 __build_all_zonelists(0); 1556 cpuset_init_current_mems_allowed(); 1557 } else { 1558 /* we have to stop all cpus to guaranntee there is no user 1559 of zonelist */ 1560 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 1561 /* cpuset refresh routine should be here */ 1562 } 1563 vm_total_pages = nr_free_pagecache_pages(); 1564 printk("Built %i zonelists. Total pages: %ld\n", 1565 num_online_nodes(), vm_total_pages); 1566} 1567 1568/* 1569 * Helper functions to size the waitqueue hash table. 1570 * Essentially these want to choose hash table sizes sufficiently 1571 * large so that collisions trying to wait on pages are rare. 1572 * But in fact, the number of active page waitqueues on typical 1573 * systems is ridiculously low, less than 200. So this is even 1574 * conservative, even though it seems large. 1575 * 1576 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1577 * waitqueues, i.e. the size of the waitq table given the number of pages. 1578 */ 1579#define PAGES_PER_WAITQUEUE 256 1580 1581#ifndef CONFIG_MEMORY_HOTPLUG 1582static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1583{ 1584 unsigned long size = 1; 1585 1586 pages /= PAGES_PER_WAITQUEUE; 1587 1588 while (size < pages) 1589 size <<= 1; 1590 1591 /* 1592 * Once we have dozens or even hundreds of threads sleeping 1593 * on IO we've got bigger problems than wait queue collision. 1594 * Limit the size of the wait table to a reasonable size. 1595 */ 1596 size = min(size, 4096UL); 1597 1598 return max(size, 4UL); 1599} 1600#else 1601/* 1602 * A zone's size might be changed by hot-add, so it is not possible to determine 1603 * a suitable size for its wait_table. So we use the maximum size now. 1604 * 1605 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 1606 * 1607 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 1608 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 1609 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 1610 * 1611 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 1612 * or more by the traditional way. (See above). It equals: 1613 * 1614 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 1615 * ia64(16K page size) : = ( 8G + 4M)byte. 1616 * powerpc (64K page size) : = (32G +16M)byte. 1617 */ 1618static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1619{ 1620 return 4096UL; 1621} 1622#endif 1623 1624/* 1625 * This is an integer logarithm so that shifts can be used later 1626 * to extract the more random high bits from the multiplicative 1627 * hash function before the remainder is taken. 1628 */ 1629static inline unsigned long wait_table_bits(unsigned long size) 1630{ 1631 return ffz(~size); 1632} 1633 1634#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1635 1636static void __init calculate_zone_totalpages(struct pglist_data *pgdat, 1637 unsigned long *zones_size, unsigned long *zholes_size) 1638{ 1639 unsigned long realtotalpages, totalpages = 0; 1640 enum zone_type i; 1641 1642 for (i = 0; i < MAX_NR_ZONES; i++) 1643 totalpages += zones_size[i]; 1644 pgdat->node_spanned_pages = totalpages; 1645 1646 realtotalpages = totalpages; 1647 if (zholes_size) 1648 for (i = 0; i < MAX_NR_ZONES; i++) 1649 realtotalpages -= zholes_size[i]; 1650 pgdat->node_present_pages = realtotalpages; 1651 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1652} 1653 1654 1655/* 1656 * Initially all pages are reserved - free ones are freed 1657 * up by free_all_bootmem() once the early boot process is 1658 * done. Non-atomic initialization, single-pass. 1659 */ 1660void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1661 unsigned long start_pfn) 1662{ 1663 struct page *page; 1664 unsigned long end_pfn = start_pfn + size; 1665 unsigned long pfn; 1666 1667 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1668 if (!early_pfn_valid(pfn)) 1669 continue; 1670 page = pfn_to_page(pfn); 1671 set_page_links(page, zone, nid, pfn); 1672 init_page_count(page); 1673 reset_page_mapcount(page); 1674 SetPageReserved(page); 1675 INIT_LIST_HEAD(&page->lru); 1676#ifdef WANT_PAGE_VIRTUAL 1677 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1678 if (!is_highmem_idx(zone)) 1679 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1680#endif 1681 } 1682} 1683 1684void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1685 unsigned long size) 1686{ 1687 int order; 1688 for (order = 0; order < MAX_ORDER ; order++) { 1689 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1690 zone->free_area[order].nr_free = 0; 1691 } 1692} 1693 1694#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) 1695void zonetable_add(struct zone *zone, int nid, enum zone_type zid, 1696 unsigned long pfn, unsigned long size) 1697{ 1698 unsigned long snum = pfn_to_section_nr(pfn); 1699 unsigned long end = pfn_to_section_nr(pfn + size); 1700 1701 if (FLAGS_HAS_NODE) 1702 zone_table[ZONETABLE_INDEX(nid, zid)] = zone; 1703 else 1704 for (; snum <= end; snum++) 1705 zone_table[ZONETABLE_INDEX(snum, zid)] = zone; 1706} 1707 1708#ifndef __HAVE_ARCH_MEMMAP_INIT 1709#define memmap_init(size, nid, zone, start_pfn) \ 1710 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1711#endif 1712 1713static int __cpuinit zone_batchsize(struct zone *zone) 1714{ 1715 int batch; 1716 1717 /* 1718 * The per-cpu-pages pools are set to around 1000th of the 1719 * size of the zone. But no more than 1/2 of a meg. 1720 * 1721 * OK, so we don't know how big the cache is. So guess. 1722 */ 1723 batch = zone->present_pages / 1024; 1724 if (batch * PAGE_SIZE > 512 * 1024) 1725 batch = (512 * 1024) / PAGE_SIZE; 1726 batch /= 4; /* We effectively *= 4 below */ 1727 if (batch < 1) 1728 batch = 1; 1729 1730 /* 1731 * Clamp the batch to a 2^n - 1 value. Having a power 1732 * of 2 value was found to be more likely to have 1733 * suboptimal cache aliasing properties in some cases. 1734 * 1735 * For example if 2 tasks are alternately allocating 1736 * batches of pages, one task can end up with a lot 1737 * of pages of one half of the possible page colors 1738 * and the other with pages of the other colors. 1739 */ 1740 batch = (1 << (fls(batch + batch/2)-1)) - 1; 1741 1742 return batch; 1743} 1744 1745inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 1746{ 1747 struct per_cpu_pages *pcp; 1748 1749 memset(p, 0, sizeof(*p)); 1750 1751 pcp = &p->pcp[0]; /* hot */ 1752 pcp->count = 0; 1753 pcp->high = 6 * batch; 1754 pcp->batch = max(1UL, 1 * batch); 1755 INIT_LIST_HEAD(&pcp->list); 1756 1757 pcp = &p->pcp[1]; /* cold*/ 1758 pcp->count = 0; 1759 pcp->high = 2 * batch; 1760 pcp->batch = max(1UL, batch/2); 1761 INIT_LIST_HEAD(&pcp->list); 1762} 1763 1764/* 1765 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 1766 * to the value high for the pageset p. 1767 */ 1768 1769static void setup_pagelist_highmark(struct per_cpu_pageset *p, 1770 unsigned long high) 1771{ 1772 struct per_cpu_pages *pcp; 1773 1774 pcp = &p->pcp[0]; /* hot list */ 1775 pcp->high = high; 1776 pcp->batch = max(1UL, high/4); 1777 if ((high/4) > (PAGE_SHIFT * 8)) 1778 pcp->batch = PAGE_SHIFT * 8; 1779} 1780 1781 1782#ifdef CONFIG_NUMA 1783/* 1784 * Boot pageset table. One per cpu which is going to be used for all 1785 * zones and all nodes. The parameters will be set in such a way 1786 * that an item put on a list will immediately be handed over to 1787 * the buddy list. This is safe since pageset manipulation is done 1788 * with interrupts disabled. 1789 * 1790 * Some NUMA counter updates may also be caught by the boot pagesets. 1791 * 1792 * The boot_pagesets must be kept even after bootup is complete for 1793 * unused processors and/or zones. They do play a role for bootstrapping 1794 * hotplugged processors. 1795 * 1796 * zoneinfo_show() and maybe other functions do 1797 * not check if the processor is online before following the pageset pointer. 1798 * Other parts of the kernel may not check if the zone is available. 1799 */ 1800static struct per_cpu_pageset boot_pageset[NR_CPUS]; 1801 1802/* 1803 * Dynamically allocate memory for the 1804 * per cpu pageset array in struct zone. 1805 */ 1806static int __cpuinit process_zones(int cpu) 1807{ 1808 struct zone *zone, *dzone; 1809 1810 for_each_zone(zone) { 1811 1812 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 1813 GFP_KERNEL, cpu_to_node(cpu)); 1814 if (!zone_pcp(zone, cpu)) 1815 goto bad; 1816 1817 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 1818 1819 if (percpu_pagelist_fraction) 1820 setup_pagelist_highmark(zone_pcp(zone, cpu), 1821 (zone->present_pages / percpu_pagelist_fraction)); 1822 } 1823 1824 return 0; 1825bad: 1826 for_each_zone(dzone) { 1827 if (dzone == zone) 1828 break; 1829 kfree(zone_pcp(dzone, cpu)); 1830 zone_pcp(dzone, cpu) = NULL; 1831 } 1832 return -ENOMEM; 1833} 1834 1835static inline void free_zone_pagesets(int cpu) 1836{ 1837 struct zone *zone; 1838 1839 for_each_zone(zone) { 1840 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 1841 1842 /* Free per_cpu_pageset if it is slab allocated */ 1843 if (pset != &boot_pageset[cpu]) 1844 kfree(pset); 1845 zone_pcp(zone, cpu) = NULL; 1846 } 1847} 1848 1849static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 1850 unsigned long action, 1851 void *hcpu) 1852{ 1853 int cpu = (long)hcpu; 1854 int ret = NOTIFY_OK; 1855 1856 switch (action) { 1857 case CPU_UP_PREPARE: 1858 if (process_zones(cpu)) 1859 ret = NOTIFY_BAD; 1860 break; 1861 case CPU_UP_CANCELED: 1862 case CPU_DEAD: 1863 free_zone_pagesets(cpu); 1864 break; 1865 default: 1866 break; 1867 } 1868 return ret; 1869} 1870 1871static struct notifier_block __cpuinitdata pageset_notifier = 1872 { &pageset_cpuup_callback, NULL, 0 }; 1873 1874void __init setup_per_cpu_pageset(void) 1875{ 1876 int err; 1877 1878 /* Initialize per_cpu_pageset for cpu 0. 1879 * A cpuup callback will do this for every cpu 1880 * as it comes online 1881 */ 1882 err = process_zones(smp_processor_id()); 1883 BUG_ON(err); 1884 register_cpu_notifier(&pageset_notifier); 1885} 1886 1887#endif 1888 1889static __meminit 1890int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1891{ 1892 int i; 1893 struct pglist_data *pgdat = zone->zone_pgdat; 1894 size_t alloc_size; 1895 1896 /* 1897 * The per-page waitqueue mechanism uses hashed waitqueues 1898 * per zone. 1899 */ 1900 zone->wait_table_hash_nr_entries = 1901 wait_table_hash_nr_entries(zone_size_pages); 1902 zone->wait_table_bits = 1903 wait_table_bits(zone->wait_table_hash_nr_entries); 1904 alloc_size = zone->wait_table_hash_nr_entries 1905 * sizeof(wait_queue_head_t); 1906 1907 if (system_state == SYSTEM_BOOTING) { 1908 zone->wait_table = (wait_queue_head_t *) 1909 alloc_bootmem_node(pgdat, alloc_size); 1910 } else { 1911 /* 1912 * This case means that a zone whose size was 0 gets new memory 1913 * via memory hot-add. 1914 * But it may be the case that a new node was hot-added. In 1915 * this case vmalloc() will not be able to use this new node's 1916 * memory - this wait_table must be initialized to use this new 1917 * node itself as well. 1918 * To use this new node's memory, further consideration will be 1919 * necessary. 1920 */ 1921 zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); 1922 } 1923 if (!zone->wait_table) 1924 return -ENOMEM; 1925 1926 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 1927 init_waitqueue_head(zone->wait_table + i); 1928 1929 return 0; 1930} 1931 1932static __meminit void zone_pcp_init(struct zone *zone) 1933{ 1934 int cpu; 1935 unsigned long batch = zone_batchsize(zone); 1936 1937 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1938#ifdef CONFIG_NUMA 1939 /* Early boot. Slab allocator not functional yet */ 1940 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 1941 setup_pageset(&boot_pageset[cpu],0); 1942#else 1943 setup_pageset(zone_pcp(zone,cpu), batch); 1944#endif 1945 } 1946 if (zone->present_pages) 1947 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 1948 zone->name, zone->present_pages, batch); 1949} 1950 1951__meminit int init_currently_empty_zone(struct zone *zone, 1952 unsigned long zone_start_pfn, 1953 unsigned long size) 1954{ 1955 struct pglist_data *pgdat = zone->zone_pgdat; 1956 int ret; 1957 ret = zone_wait_table_init(zone, size); 1958 if (ret) 1959 return ret; 1960 pgdat->nr_zones = zone_idx(zone) + 1; 1961 1962 zone->zone_start_pfn = zone_start_pfn; 1963 1964 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 1965 1966 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 1967 1968 return 0; 1969} 1970 1971/* 1972 * Set up the zone data structures: 1973 * - mark all pages reserved 1974 * - mark all memory queues empty 1975 * - clear the memory bitmaps 1976 */ 1977static void __meminit free_area_init_core(struct pglist_data *pgdat, 1978 unsigned long *zones_size, unsigned long *zholes_size) 1979{ 1980 enum zone_type j; 1981 int nid = pgdat->node_id; 1982 unsigned long zone_start_pfn = pgdat->node_start_pfn; 1983 int ret; 1984 1985 pgdat_resize_init(pgdat); 1986 pgdat->nr_zones = 0; 1987 init_waitqueue_head(&pgdat->kswapd_wait); 1988 pgdat->kswapd_max_order = 0; 1989 1990 for (j = 0; j < MAX_NR_ZONES; j++) { 1991 struct zone *zone = pgdat->node_zones + j; 1992 unsigned long size, realsize; 1993 1994 realsize = size = zones_size[j]; 1995 if (zholes_size) 1996 realsize -= zholes_size[j]; 1997 1998 if (!is_highmem_idx(j)) 1999 nr_kernel_pages += realsize; 2000 nr_all_pages += realsize; 2001 2002 zone->spanned_pages = size; 2003 zone->present_pages = realsize; 2004#ifdef CONFIG_NUMA 2005 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 2006 / 100; 2007#endif 2008 zone->name = zone_names[j]; 2009 spin_lock_init(&zone->lock); 2010 spin_lock_init(&zone->lru_lock); 2011 zone_seqlock_init(zone); 2012 zone->zone_pgdat = pgdat; 2013 zone->free_pages = 0; 2014 2015 zone->temp_priority = zone->prev_priority = DEF_PRIORITY; 2016 2017 zone_pcp_init(zone); 2018 INIT_LIST_HEAD(&zone->active_list); 2019 INIT_LIST_HEAD(&zone->inactive_list); 2020 zone->nr_scan_active = 0; 2021 zone->nr_scan_inactive = 0; 2022 zone->nr_active = 0; 2023 zone->nr_inactive = 0; 2024 zap_zone_vm_stats(zone); 2025 atomic_set(&zone->reclaim_in_progress, 0); 2026 if (!size) 2027 continue; 2028 2029 zonetable_add(zone, nid, j, zone_start_pfn, size); 2030 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 2031 BUG_ON(ret); 2032 zone_start_pfn += size; 2033 } 2034} 2035 2036static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2037{ 2038 /* Skip empty nodes */ 2039 if (!pgdat->node_spanned_pages) 2040 return; 2041 2042#ifdef CONFIG_FLAT_NODE_MEM_MAP 2043 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2044 if (!pgdat->node_mem_map) { 2045 unsigned long size, start, end; 2046 struct page *map; 2047 2048 /* 2049 * The zone's endpoints aren't required to be MAX_ORDER 2050 * aligned but the node_mem_map endpoints must be in order 2051 * for the buddy allocator to function correctly. 2052 */ 2053 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 2054 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 2055 end = ALIGN(end, MAX_ORDER_NR_PAGES); 2056 size = (end - start) * sizeof(struct page); 2057 map = alloc_remap(pgdat->node_id, size); 2058 if (!map) 2059 map = alloc_bootmem_node(pgdat, size); 2060 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 2061 } 2062#ifdef CONFIG_FLATMEM 2063 /* 2064 * With no DISCONTIG, the global mem_map is just set as node 0's 2065 */ 2066 if (pgdat == NODE_DATA(0)) 2067 mem_map = NODE_DATA(0)->node_mem_map; 2068#endif 2069#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2070} 2071 2072void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, 2073 unsigned long *zones_size, unsigned long node_start_pfn, 2074 unsigned long *zholes_size) 2075{ 2076 pgdat->node_id = nid; 2077 pgdat->node_start_pfn = node_start_pfn; 2078 calculate_zone_totalpages(pgdat, zones_size, zholes_size); 2079 2080 alloc_node_mem_map(pgdat); 2081 2082 free_area_init_core(pgdat, zones_size, zholes_size); 2083} 2084 2085#ifndef CONFIG_NEED_MULTIPLE_NODES 2086static bootmem_data_t contig_bootmem_data; 2087struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2088 2089EXPORT_SYMBOL(contig_page_data); 2090#endif 2091 2092void __init free_area_init(unsigned long *zones_size) 2093{ 2094 free_area_init_node(0, NODE_DATA(0), zones_size, 2095 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2096} 2097 2098#ifdef CONFIG_HOTPLUG_CPU 2099static int page_alloc_cpu_notify(struct notifier_block *self, 2100 unsigned long action, void *hcpu) 2101{ 2102 int cpu = (unsigned long)hcpu; 2103 2104 if (action == CPU_DEAD) { 2105 local_irq_disable(); 2106 __drain_pages(cpu); 2107 vm_events_fold_cpu(cpu); 2108 local_irq_enable(); 2109 refresh_cpu_vm_stats(cpu); 2110 } 2111 return NOTIFY_OK; 2112} 2113#endif /* CONFIG_HOTPLUG_CPU */ 2114 2115void __init page_alloc_init(void) 2116{ 2117 hotcpu_notifier(page_alloc_cpu_notify, 0); 2118} 2119 2120/* 2121 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 2122 * or min_free_kbytes changes. 2123 */ 2124static void calculate_totalreserve_pages(void) 2125{ 2126 struct pglist_data *pgdat; 2127 unsigned long reserve_pages = 0; 2128 enum zone_type i, j; 2129 2130 for_each_online_pgdat(pgdat) { 2131 for (i = 0; i < MAX_NR_ZONES; i++) { 2132 struct zone *zone = pgdat->node_zones + i; 2133 unsigned long max = 0; 2134 2135 /* Find valid and maximum lowmem_reserve in the zone */ 2136 for (j = i; j < MAX_NR_ZONES; j++) { 2137 if (zone->lowmem_reserve[j] > max) 2138 max = zone->lowmem_reserve[j]; 2139 } 2140 2141 /* we treat pages_high as reserved pages. */ 2142 max += zone->pages_high; 2143 2144 if (max > zone->present_pages) 2145 max = zone->present_pages; 2146 reserve_pages += max; 2147 } 2148 } 2149 totalreserve_pages = reserve_pages; 2150} 2151 2152/* 2153 * setup_per_zone_lowmem_reserve - called whenever 2154 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 2155 * has a correct pages reserved value, so an adequate number of 2156 * pages are left in the zone after a successful __alloc_pages(). 2157 */ 2158static void setup_per_zone_lowmem_reserve(void) 2159{ 2160 struct pglist_data *pgdat; 2161 enum zone_type j, idx; 2162 2163 for_each_online_pgdat(pgdat) { 2164 for (j = 0; j < MAX_NR_ZONES; j++) { 2165 struct zone *zone = pgdat->node_zones + j; 2166 unsigned long present_pages = zone->present_pages; 2167 2168 zone->lowmem_reserve[j] = 0; 2169 2170 idx = j; 2171 while (idx) { 2172 struct zone *lower_zone; 2173 2174 idx--; 2175 2176 if (sysctl_lowmem_reserve_ratio[idx] < 1) 2177 sysctl_lowmem_reserve_ratio[idx] = 1; 2178 2179 lower_zone = pgdat->node_zones + idx; 2180 lower_zone->lowmem_reserve[j] = present_pages / 2181 sysctl_lowmem_reserve_ratio[idx]; 2182 present_pages += lower_zone->present_pages; 2183 } 2184 } 2185 } 2186 2187 /* update totalreserve_pages */ 2188 calculate_totalreserve_pages(); 2189} 2190 2191/* 2192 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures 2193 * that the pages_{min,low,high} values for each zone are set correctly 2194 * with respect to min_free_kbytes. 2195 */ 2196void setup_per_zone_pages_min(void) 2197{ 2198 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 2199 unsigned long lowmem_pages = 0; 2200 struct zone *zone; 2201 unsigned long flags; 2202 2203 /* Calculate total number of !ZONE_HIGHMEM pages */ 2204 for_each_zone(zone) { 2205 if (!is_highmem(zone)) 2206 lowmem_pages += zone->present_pages; 2207 } 2208 2209 for_each_zone(zone) { 2210 u64 tmp; 2211 2212 spin_lock_irqsave(&zone->lru_lock, flags); 2213 tmp = (u64)pages_min * zone->present_pages; 2214 do_div(tmp, lowmem_pages); 2215 if (is_highmem(zone)) { 2216 /* 2217 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2218 * need highmem pages, so cap pages_min to a small 2219 * value here. 2220 * 2221 * The (pages_high-pages_low) and (pages_low-pages_min) 2222 * deltas controls asynch page reclaim, and so should 2223 * not be capped for highmem. 2224 */ 2225 int min_pages; 2226 2227 min_pages = zone->present_pages / 1024; 2228 if (min_pages < SWAP_CLUSTER_MAX) 2229 min_pages = SWAP_CLUSTER_MAX; 2230 if (min_pages > 128) 2231 min_pages = 128; 2232 zone->pages_min = min_pages; 2233 } else { 2234 /* 2235 * If it's a lowmem zone, reserve a number of pages 2236 * proportionate to the zone's size. 2237 */ 2238 zone->pages_min = tmp; 2239 } 2240 2241 zone->pages_low = zone->pages_min + (tmp >> 2); 2242 zone->pages_high = zone->pages_min + (tmp >> 1); 2243 spin_unlock_irqrestore(&zone->lru_lock, flags); 2244 } 2245 2246 /* update totalreserve_pages */ 2247 calculate_totalreserve_pages(); 2248} 2249 2250/* 2251 * Initialise min_free_kbytes. 2252 * 2253 * For small machines we want it small (128k min). For large machines 2254 * we want it large (64MB max). But it is not linear, because network 2255 * bandwidth does not increase linearly with machine size. We use 2256 * 2257 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 2258 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 2259 * 2260 * which yields 2261 * 2262 * 16MB: 512k 2263 * 32MB: 724k 2264 * 64MB: 1024k 2265 * 128MB: 1448k 2266 * 256MB: 2048k 2267 * 512MB: 2896k 2268 * 1024MB: 4096k 2269 * 2048MB: 5792k 2270 * 4096MB: 8192k 2271 * 8192MB: 11584k 2272 * 16384MB: 16384k 2273 */ 2274static int __init init_per_zone_pages_min(void) 2275{ 2276 unsigned long lowmem_kbytes; 2277 2278 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 2279 2280 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 2281 if (min_free_kbytes < 128) 2282 min_free_kbytes = 128; 2283 if (min_free_kbytes > 65536) 2284 min_free_kbytes = 65536; 2285 setup_per_zone_pages_min(); 2286 setup_per_zone_lowmem_reserve(); 2287 return 0; 2288} 2289module_init(init_per_zone_pages_min) 2290 2291/* 2292 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 2293 * that we can call two helper functions whenever min_free_kbytes 2294 * changes. 2295 */ 2296int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 2297 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2298{ 2299 proc_dointvec(table, write, file, buffer, length, ppos); 2300 setup_per_zone_pages_min(); 2301 return 0; 2302} 2303 2304#ifdef CONFIG_NUMA 2305int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 2306 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2307{ 2308 struct zone *zone; 2309 int rc; 2310 2311 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2312 if (rc) 2313 return rc; 2314 2315 for_each_zone(zone) 2316 zone->min_unmapped_pages = (zone->present_pages * 2317 sysctl_min_unmapped_ratio) / 100; 2318 return 0; 2319} 2320#endif 2321 2322/* 2323 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 2324 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 2325 * whenever sysctl_lowmem_reserve_ratio changes. 2326 * 2327 * The reserve ratio obviously has absolutely no relation with the 2328 * pages_min watermarks. The lowmem reserve ratio can only make sense 2329 * if in function of the boot time zone sizes. 2330 */ 2331int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 2332 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2333{ 2334 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2335 setup_per_zone_lowmem_reserve(); 2336 return 0; 2337} 2338 2339/* 2340 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 2341 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 2342 * can have before it gets flushed back to buddy allocator. 2343 */ 2344 2345int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 2346 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2347{ 2348 struct zone *zone; 2349 unsigned int cpu; 2350 int ret; 2351 2352 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2353 if (!write || (ret == -EINVAL)) 2354 return ret; 2355 for_each_zone(zone) { 2356 for_each_online_cpu(cpu) { 2357 unsigned long high; 2358 high = zone->present_pages / percpu_pagelist_fraction; 2359 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 2360 } 2361 } 2362 return 0; 2363} 2364 2365int hashdist = HASHDIST_DEFAULT; 2366 2367#ifdef CONFIG_NUMA 2368static int __init set_hashdist(char *str) 2369{ 2370 if (!str) 2371 return 0; 2372 hashdist = simple_strtoul(str, &str, 0); 2373 return 1; 2374} 2375__setup("hashdist=", set_hashdist); 2376#endif 2377 2378/* 2379 * allocate a large system hash table from bootmem 2380 * - it is assumed that the hash table must contain an exact power-of-2 2381 * quantity of entries 2382 * - limit is the number of hash buckets, not the total allocation size 2383 */ 2384void *__init alloc_large_system_hash(const char *tablename, 2385 unsigned long bucketsize, 2386 unsigned long numentries, 2387 int scale, 2388 int flags, 2389 unsigned int *_hash_shift, 2390 unsigned int *_hash_mask, 2391 unsigned long limit) 2392{ 2393 unsigned long long max = limit; 2394 unsigned long log2qty, size; 2395 void *table = NULL; 2396 2397 /* allow the kernel cmdline to have a say */ 2398 if (!numentries) { 2399 /* round applicable memory size up to nearest megabyte */ 2400 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; 2401 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 2402 numentries >>= 20 - PAGE_SHIFT; 2403 numentries <<= 20 - PAGE_SHIFT; 2404 2405 /* limit to 1 bucket per 2^scale bytes of low memory */ 2406 if (scale > PAGE_SHIFT) 2407 numentries >>= (scale - PAGE_SHIFT); 2408 else 2409 numentries <<= (PAGE_SHIFT - scale); 2410 } 2411 numentries = roundup_pow_of_two(numentries); 2412 2413 /* limit allocation size to 1/16 total memory by default */ 2414 if (max == 0) { 2415 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2416 do_div(max, bucketsize); 2417 } 2418 2419 if (numentries > max) 2420 numentries = max; 2421 2422 log2qty = long_log2(numentries); 2423 2424 do { 2425 size = bucketsize << log2qty; 2426 if (flags & HASH_EARLY) 2427 table = alloc_bootmem(size); 2428 else if (hashdist) 2429 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 2430 else { 2431 unsigned long order; 2432 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 2433 ; 2434 table = (void*) __get_free_pages(GFP_ATOMIC, order); 2435 } 2436 } while (!table && size > PAGE_SIZE && --log2qty); 2437 2438 if (!table) 2439 panic("Failed to allocate %s hash table\n", tablename); 2440 2441 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 2442 tablename, 2443 (1U << log2qty), 2444 long_log2(size) - PAGE_SHIFT, 2445 size); 2446 2447 if (_hash_shift) 2448 *_hash_shift = log2qty; 2449 if (_hash_mask) 2450 *_hash_mask = (1 << log2qty) - 1; 2451 2452 return table; 2453} 2454 2455#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 2456struct page *pfn_to_page(unsigned long pfn) 2457{ 2458 return __pfn_to_page(pfn); 2459} 2460unsigned long page_to_pfn(struct page *page) 2461{ 2462 return __page_to_pfn(page); 2463} 2464EXPORT_SYMBOL(pfn_to_page); 2465EXPORT_SYMBOL(page_to_pfn); 2466#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 2467