page_alloc.c revision 1192d526412b1b8ccb1493064cea06efc12c772b
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/bootmem.h> 23#include <linux/compiler.h> 24#include <linux/kernel.h> 25#include <linux/module.h> 26#include <linux/suspend.h> 27#include <linux/pagevec.h> 28#include <linux/blkdev.h> 29#include <linux/slab.h> 30#include <linux/notifier.h> 31#include <linux/topology.h> 32#include <linux/sysctl.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/memory_hotplug.h> 36#include <linux/nodemask.h> 37#include <linux/vmalloc.h> 38#include <linux/mempolicy.h> 39#include <linux/stop_machine.h> 40 41#include <asm/tlbflush.h> 42#include <asm/div64.h> 43#include "internal.h" 44 45/* 46 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 47 * initializer cleaner 48 */ 49nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 50EXPORT_SYMBOL(node_online_map); 51nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 52EXPORT_SYMBOL(node_possible_map); 53unsigned long totalram_pages __read_mostly; 54unsigned long totalreserve_pages __read_mostly; 55long nr_swap_pages; 56int percpu_pagelist_fraction; 57 58static void __free_pages_ok(struct page *page, unsigned int order); 59 60/* 61 * results with 256, 32 in the lowmem_reserve sysctl: 62 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 63 * 1G machine -> (16M dma, 784M normal, 224M high) 64 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 65 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 66 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 67 * 68 * TBD: should special case ZONE_DMA32 machines here - in those we normally 69 * don't need any ZONE_NORMAL reservation 70 */ 71int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 72 256, 73#ifdef CONFIG_ZONE_DMA32 74 256, 75#endif 76#ifdef CONFIG_HIGHMEM 77 32 78#endif 79}; 80 81EXPORT_SYMBOL(totalram_pages); 82 83/* 84 * Used by page_zone() to look up the address of the struct zone whose 85 * id is encoded in the upper bits of page->flags 86 */ 87struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; 88EXPORT_SYMBOL(zone_table); 89 90static char *zone_names[MAX_NR_ZONES] = { 91 "DMA", 92#ifdef CONFIG_ZONE_DMA32 93 "DMA32", 94#endif 95 "Normal", 96#ifdef CONFIG_HIGHMEM 97 "HighMem" 98#endif 99}; 100 101int min_free_kbytes = 1024; 102 103unsigned long __meminitdata nr_kernel_pages; 104unsigned long __meminitdata nr_all_pages; 105 106#ifdef CONFIG_DEBUG_VM 107static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 108{ 109 int ret = 0; 110 unsigned seq; 111 unsigned long pfn = page_to_pfn(page); 112 113 do { 114 seq = zone_span_seqbegin(zone); 115 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 116 ret = 1; 117 else if (pfn < zone->zone_start_pfn) 118 ret = 1; 119 } while (zone_span_seqretry(zone, seq)); 120 121 return ret; 122} 123 124static int page_is_consistent(struct zone *zone, struct page *page) 125{ 126#ifdef CONFIG_HOLES_IN_ZONE 127 if (!pfn_valid(page_to_pfn(page))) 128 return 0; 129#endif 130 if (zone != page_zone(page)) 131 return 0; 132 133 return 1; 134} 135/* 136 * Temporary debugging check for pages not lying within a given zone. 137 */ 138static int bad_range(struct zone *zone, struct page *page) 139{ 140 if (page_outside_zone_boundaries(zone, page)) 141 return 1; 142 if (!page_is_consistent(zone, page)) 143 return 1; 144 145 return 0; 146} 147#else 148static inline int bad_range(struct zone *zone, struct page *page) 149{ 150 return 0; 151} 152#endif 153 154static void bad_page(struct page *page) 155{ 156 printk(KERN_EMERG "Bad page state in process '%s'\n" 157 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 158 KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 159 KERN_EMERG "Backtrace:\n", 160 current->comm, page, (int)(2*sizeof(unsigned long)), 161 (unsigned long)page->flags, page->mapping, 162 page_mapcount(page), page_count(page)); 163 dump_stack(); 164 page->flags &= ~(1 << PG_lru | 165 1 << PG_private | 166 1 << PG_locked | 167 1 << PG_active | 168 1 << PG_dirty | 169 1 << PG_reclaim | 170 1 << PG_slab | 171 1 << PG_swapcache | 172 1 << PG_writeback | 173 1 << PG_buddy ); 174 set_page_count(page, 0); 175 reset_page_mapcount(page); 176 page->mapping = NULL; 177 add_taint(TAINT_BAD_PAGE); 178} 179 180/* 181 * Higher-order pages are called "compound pages". They are structured thusly: 182 * 183 * The first PAGE_SIZE page is called the "head page". 184 * 185 * The remaining PAGE_SIZE pages are called "tail pages". 186 * 187 * All pages have PG_compound set. All pages have their ->private pointing at 188 * the head page (even the head page has this). 189 * 190 * The first tail page's ->lru.next holds the address of the compound page's 191 * put_page() function. Its ->lru.prev holds the order of allocation. 192 * This usage means that zero-order pages may not be compound. 193 */ 194 195static void free_compound_page(struct page *page) 196{ 197 __free_pages_ok(page, (unsigned long)page[1].lru.prev); 198} 199 200static void prep_compound_page(struct page *page, unsigned long order) 201{ 202 int i; 203 int nr_pages = 1 << order; 204 205 page[1].lru.next = (void *)free_compound_page; /* set dtor */ 206 page[1].lru.prev = (void *)order; 207 for (i = 0; i < nr_pages; i++) { 208 struct page *p = page + i; 209 210 __SetPageCompound(p); 211 set_page_private(p, (unsigned long)page); 212 } 213} 214 215static void destroy_compound_page(struct page *page, unsigned long order) 216{ 217 int i; 218 int nr_pages = 1 << order; 219 220 if (unlikely((unsigned long)page[1].lru.prev != order)) 221 bad_page(page); 222 223 for (i = 0; i < nr_pages; i++) { 224 struct page *p = page + i; 225 226 if (unlikely(!PageCompound(p) | 227 (page_private(p) != (unsigned long)page))) 228 bad_page(page); 229 __ClearPageCompound(p); 230 } 231} 232 233static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 234{ 235 int i; 236 237 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 238 /* 239 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 240 * and __GFP_HIGHMEM from hard or soft interrupt context. 241 */ 242 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 243 for (i = 0; i < (1 << order); i++) 244 clear_highpage(page + i); 245} 246 247/* 248 * function for dealing with page's order in buddy system. 249 * zone->lock is already acquired when we use these. 250 * So, we don't need atomic page->flags operations here. 251 */ 252static inline unsigned long page_order(struct page *page) 253{ 254 return page_private(page); 255} 256 257static inline void set_page_order(struct page *page, int order) 258{ 259 set_page_private(page, order); 260 __SetPageBuddy(page); 261} 262 263static inline void rmv_page_order(struct page *page) 264{ 265 __ClearPageBuddy(page); 266 set_page_private(page, 0); 267} 268 269/* 270 * Locate the struct page for both the matching buddy in our 271 * pair (buddy1) and the combined O(n+1) page they form (page). 272 * 273 * 1) Any buddy B1 will have an order O twin B2 which satisfies 274 * the following equation: 275 * B2 = B1 ^ (1 << O) 276 * For example, if the starting buddy (buddy2) is #8 its order 277 * 1 buddy is #10: 278 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 279 * 280 * 2) Any buddy B will have an order O+1 parent P which 281 * satisfies the following equation: 282 * P = B & ~(1 << O) 283 * 284 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 285 */ 286static inline struct page * 287__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 288{ 289 unsigned long buddy_idx = page_idx ^ (1 << order); 290 291 return page + (buddy_idx - page_idx); 292} 293 294static inline unsigned long 295__find_combined_index(unsigned long page_idx, unsigned int order) 296{ 297 return (page_idx & ~(1 << order)); 298} 299 300/* 301 * This function checks whether a page is free && is the buddy 302 * we can do coalesce a page and its buddy if 303 * (a) the buddy is not in a hole && 304 * (b) the buddy is in the buddy system && 305 * (c) a page and its buddy have the same order && 306 * (d) a page and its buddy are in the same zone. 307 * 308 * For recording whether a page is in the buddy system, we use PG_buddy. 309 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 310 * 311 * For recording page's order, we use page_private(page). 312 */ 313static inline int page_is_buddy(struct page *page, struct page *buddy, 314 int order) 315{ 316#ifdef CONFIG_HOLES_IN_ZONE 317 if (!pfn_valid(page_to_pfn(buddy))) 318 return 0; 319#endif 320 321 if (page_zone_id(page) != page_zone_id(buddy)) 322 return 0; 323 324 if (PageBuddy(buddy) && page_order(buddy) == order) { 325 BUG_ON(page_count(buddy) != 0); 326 return 1; 327 } 328 return 0; 329} 330 331/* 332 * Freeing function for a buddy system allocator. 333 * 334 * The concept of a buddy system is to maintain direct-mapped table 335 * (containing bit values) for memory blocks of various "orders". 336 * The bottom level table contains the map for the smallest allocatable 337 * units of memory (here, pages), and each level above it describes 338 * pairs of units from the levels below, hence, "buddies". 339 * At a high level, all that happens here is marking the table entry 340 * at the bottom level available, and propagating the changes upward 341 * as necessary, plus some accounting needed to play nicely with other 342 * parts of the VM system. 343 * At each level, we keep a list of pages, which are heads of continuous 344 * free pages of length of (1 << order) and marked with PG_buddy. Page's 345 * order is recorded in page_private(page) field. 346 * So when we are allocating or freeing one, we can derive the state of the 347 * other. That is, if we allocate a small block, and both were 348 * free, the remainder of the region must be split into blocks. 349 * If a block is freed, and its buddy is also free, then this 350 * triggers coalescing into a block of larger size. 351 * 352 * -- wli 353 */ 354 355static inline void __free_one_page(struct page *page, 356 struct zone *zone, unsigned int order) 357{ 358 unsigned long page_idx; 359 int order_size = 1 << order; 360 361 if (unlikely(PageCompound(page))) 362 destroy_compound_page(page, order); 363 364 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 365 366 VM_BUG_ON(page_idx & (order_size - 1)); 367 VM_BUG_ON(bad_range(zone, page)); 368 369 zone->free_pages += order_size; 370 while (order < MAX_ORDER-1) { 371 unsigned long combined_idx; 372 struct free_area *area; 373 struct page *buddy; 374 375 buddy = __page_find_buddy(page, page_idx, order); 376 if (!page_is_buddy(page, buddy, order)) 377 break; /* Move the buddy up one level. */ 378 379 list_del(&buddy->lru); 380 area = zone->free_area + order; 381 area->nr_free--; 382 rmv_page_order(buddy); 383 combined_idx = __find_combined_index(page_idx, order); 384 page = page + (combined_idx - page_idx); 385 page_idx = combined_idx; 386 order++; 387 } 388 set_page_order(page, order); 389 list_add(&page->lru, &zone->free_area[order].free_list); 390 zone->free_area[order].nr_free++; 391} 392 393static inline int free_pages_check(struct page *page) 394{ 395 if (unlikely(page_mapcount(page) | 396 (page->mapping != NULL) | 397 (page_count(page) != 0) | 398 (page->flags & ( 399 1 << PG_lru | 400 1 << PG_private | 401 1 << PG_locked | 402 1 << PG_active | 403 1 << PG_reclaim | 404 1 << PG_slab | 405 1 << PG_swapcache | 406 1 << PG_writeback | 407 1 << PG_reserved | 408 1 << PG_buddy )))) 409 bad_page(page); 410 if (PageDirty(page)) 411 __ClearPageDirty(page); 412 /* 413 * For now, we report if PG_reserved was found set, but do not 414 * clear it, and do not free the page. But we shall soon need 415 * to do more, for when the ZERO_PAGE count wraps negative. 416 */ 417 return PageReserved(page); 418} 419 420/* 421 * Frees a list of pages. 422 * Assumes all pages on list are in same zone, and of same order. 423 * count is the number of pages to free. 424 * 425 * If the zone was previously in an "all pages pinned" state then look to 426 * see if this freeing clears that state. 427 * 428 * And clear the zone's pages_scanned counter, to hold off the "all pages are 429 * pinned" detection logic. 430 */ 431static void free_pages_bulk(struct zone *zone, int count, 432 struct list_head *list, int order) 433{ 434 spin_lock(&zone->lock); 435 zone->all_unreclaimable = 0; 436 zone->pages_scanned = 0; 437 while (count--) { 438 struct page *page; 439 440 VM_BUG_ON(list_empty(list)); 441 page = list_entry(list->prev, struct page, lru); 442 /* have to delete it as __free_one_page list manipulates */ 443 list_del(&page->lru); 444 __free_one_page(page, zone, order); 445 } 446 spin_unlock(&zone->lock); 447} 448 449static void free_one_page(struct zone *zone, struct page *page, int order) 450{ 451 LIST_HEAD(list); 452 list_add(&page->lru, &list); 453 free_pages_bulk(zone, 1, &list, order); 454} 455 456static void __free_pages_ok(struct page *page, unsigned int order) 457{ 458 unsigned long flags; 459 int i; 460 int reserved = 0; 461 462 arch_free_page(page, order); 463 if (!PageHighMem(page)) 464 debug_check_no_locks_freed(page_address(page), 465 PAGE_SIZE<<order); 466 467 for (i = 0 ; i < (1 << order) ; ++i) 468 reserved += free_pages_check(page + i); 469 if (reserved) 470 return; 471 472 kernel_map_pages(page, 1 << order, 0); 473 local_irq_save(flags); 474 __count_vm_events(PGFREE, 1 << order); 475 free_one_page(page_zone(page), page, order); 476 local_irq_restore(flags); 477} 478 479/* 480 * permit the bootmem allocator to evade page validation on high-order frees 481 */ 482void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) 483{ 484 if (order == 0) { 485 __ClearPageReserved(page); 486 set_page_count(page, 0); 487 set_page_refcounted(page); 488 __free_page(page); 489 } else { 490 int loop; 491 492 prefetchw(page); 493 for (loop = 0; loop < BITS_PER_LONG; loop++) { 494 struct page *p = &page[loop]; 495 496 if (loop + 1 < BITS_PER_LONG) 497 prefetchw(p + 1); 498 __ClearPageReserved(p); 499 set_page_count(p, 0); 500 } 501 502 set_page_refcounted(page); 503 __free_pages(page, order); 504 } 505} 506 507 508/* 509 * The order of subdivision here is critical for the IO subsystem. 510 * Please do not alter this order without good reasons and regression 511 * testing. Specifically, as large blocks of memory are subdivided, 512 * the order in which smaller blocks are delivered depends on the order 513 * they're subdivided in this function. This is the primary factor 514 * influencing the order in which pages are delivered to the IO 515 * subsystem according to empirical testing, and this is also justified 516 * by considering the behavior of a buddy system containing a single 517 * large block of memory acted on by a series of small allocations. 518 * This behavior is a critical factor in sglist merging's success. 519 * 520 * -- wli 521 */ 522static inline void expand(struct zone *zone, struct page *page, 523 int low, int high, struct free_area *area) 524{ 525 unsigned long size = 1 << high; 526 527 while (high > low) { 528 area--; 529 high--; 530 size >>= 1; 531 VM_BUG_ON(bad_range(zone, &page[size])); 532 list_add(&page[size].lru, &area->free_list); 533 area->nr_free++; 534 set_page_order(&page[size], high); 535 } 536} 537 538/* 539 * This page is about to be returned from the page allocator 540 */ 541static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 542{ 543 if (unlikely(page_mapcount(page) | 544 (page->mapping != NULL) | 545 (page_count(page) != 0) | 546 (page->flags & ( 547 1 << PG_lru | 548 1 << PG_private | 549 1 << PG_locked | 550 1 << PG_active | 551 1 << PG_dirty | 552 1 << PG_reclaim | 553 1 << PG_slab | 554 1 << PG_swapcache | 555 1 << PG_writeback | 556 1 << PG_reserved | 557 1 << PG_buddy )))) 558 bad_page(page); 559 560 /* 561 * For now, we report if PG_reserved was found set, but do not 562 * clear it, and do not allocate the page: as a safety net. 563 */ 564 if (PageReserved(page)) 565 return 1; 566 567 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 568 1 << PG_referenced | 1 << PG_arch_1 | 569 1 << PG_checked | 1 << PG_mappedtodisk); 570 set_page_private(page, 0); 571 set_page_refcounted(page); 572 kernel_map_pages(page, 1 << order, 1); 573 574 if (gfp_flags & __GFP_ZERO) 575 prep_zero_page(page, order, gfp_flags); 576 577 if (order && (gfp_flags & __GFP_COMP)) 578 prep_compound_page(page, order); 579 580 return 0; 581} 582 583/* 584 * Do the hard work of removing an element from the buddy allocator. 585 * Call me with the zone->lock already held. 586 */ 587static struct page *__rmqueue(struct zone *zone, unsigned int order) 588{ 589 struct free_area * area; 590 unsigned int current_order; 591 struct page *page; 592 593 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 594 area = zone->free_area + current_order; 595 if (list_empty(&area->free_list)) 596 continue; 597 598 page = list_entry(area->free_list.next, struct page, lru); 599 list_del(&page->lru); 600 rmv_page_order(page); 601 area->nr_free--; 602 zone->free_pages -= 1UL << order; 603 expand(zone, page, order, current_order, area); 604 return page; 605 } 606 607 return NULL; 608} 609 610/* 611 * Obtain a specified number of elements from the buddy allocator, all under 612 * a single hold of the lock, for efficiency. Add them to the supplied list. 613 * Returns the number of new pages which were placed at *list. 614 */ 615static int rmqueue_bulk(struct zone *zone, unsigned int order, 616 unsigned long count, struct list_head *list) 617{ 618 int i; 619 620 spin_lock(&zone->lock); 621 for (i = 0; i < count; ++i) { 622 struct page *page = __rmqueue(zone, order); 623 if (unlikely(page == NULL)) 624 break; 625 list_add_tail(&page->lru, list); 626 } 627 spin_unlock(&zone->lock); 628 return i; 629} 630 631#ifdef CONFIG_NUMA 632/* 633 * Called from the slab reaper to drain pagesets on a particular node that 634 * belong to the currently executing processor. 635 * Note that this function must be called with the thread pinned to 636 * a single processor. 637 */ 638void drain_node_pages(int nodeid) 639{ 640 int i; 641 enum zone_type z; 642 unsigned long flags; 643 644 for (z = 0; z < MAX_NR_ZONES; z++) { 645 struct zone *zone = NODE_DATA(nodeid)->node_zones + z; 646 struct per_cpu_pageset *pset; 647 648 pset = zone_pcp(zone, smp_processor_id()); 649 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 650 struct per_cpu_pages *pcp; 651 652 pcp = &pset->pcp[i]; 653 if (pcp->count) { 654 local_irq_save(flags); 655 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 656 pcp->count = 0; 657 local_irq_restore(flags); 658 } 659 } 660 } 661} 662#endif 663 664#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) 665static void __drain_pages(unsigned int cpu) 666{ 667 unsigned long flags; 668 struct zone *zone; 669 int i; 670 671 for_each_zone(zone) { 672 struct per_cpu_pageset *pset; 673 674 pset = zone_pcp(zone, cpu); 675 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 676 struct per_cpu_pages *pcp; 677 678 pcp = &pset->pcp[i]; 679 local_irq_save(flags); 680 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 681 pcp->count = 0; 682 local_irq_restore(flags); 683 } 684 } 685} 686#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ 687 688#ifdef CONFIG_PM 689 690void mark_free_pages(struct zone *zone) 691{ 692 unsigned long zone_pfn, flags; 693 int order; 694 struct list_head *curr; 695 696 if (!zone->spanned_pages) 697 return; 698 699 spin_lock_irqsave(&zone->lock, flags); 700 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 701 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); 702 703 for (order = MAX_ORDER - 1; order >= 0; --order) 704 list_for_each(curr, &zone->free_area[order].free_list) { 705 unsigned long start_pfn, i; 706 707 start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); 708 709 for (i=0; i < (1<<order); i++) 710 SetPageNosaveFree(pfn_to_page(start_pfn+i)); 711 } 712 spin_unlock_irqrestore(&zone->lock, flags); 713} 714 715/* 716 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 717 */ 718void drain_local_pages(void) 719{ 720 unsigned long flags; 721 722 local_irq_save(flags); 723 __drain_pages(smp_processor_id()); 724 local_irq_restore(flags); 725} 726#endif /* CONFIG_PM */ 727 728/* 729 * Free a 0-order page 730 */ 731static void fastcall free_hot_cold_page(struct page *page, int cold) 732{ 733 struct zone *zone = page_zone(page); 734 struct per_cpu_pages *pcp; 735 unsigned long flags; 736 737 arch_free_page(page, 0); 738 739 if (PageAnon(page)) 740 page->mapping = NULL; 741 if (free_pages_check(page)) 742 return; 743 744 kernel_map_pages(page, 1, 0); 745 746 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 747 local_irq_save(flags); 748 __count_vm_event(PGFREE); 749 list_add(&page->lru, &pcp->list); 750 pcp->count++; 751 if (pcp->count >= pcp->high) { 752 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 753 pcp->count -= pcp->batch; 754 } 755 local_irq_restore(flags); 756 put_cpu(); 757} 758 759void fastcall free_hot_page(struct page *page) 760{ 761 free_hot_cold_page(page, 0); 762} 763 764void fastcall free_cold_page(struct page *page) 765{ 766 free_hot_cold_page(page, 1); 767} 768 769/* 770 * split_page takes a non-compound higher-order page, and splits it into 771 * n (1<<order) sub-pages: page[0..n] 772 * Each sub-page must be freed individually. 773 * 774 * Note: this is probably too low level an operation for use in drivers. 775 * Please consult with lkml before using this in your driver. 776 */ 777void split_page(struct page *page, unsigned int order) 778{ 779 int i; 780 781 VM_BUG_ON(PageCompound(page)); 782 VM_BUG_ON(!page_count(page)); 783 for (i = 1; i < (1 << order); i++) 784 set_page_refcounted(page + i); 785} 786 787/* 788 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 789 * we cheat by calling it from here, in the order > 0 path. Saves a branch 790 * or two. 791 */ 792static struct page *buffered_rmqueue(struct zonelist *zonelist, 793 struct zone *zone, int order, gfp_t gfp_flags) 794{ 795 unsigned long flags; 796 struct page *page; 797 int cold = !!(gfp_flags & __GFP_COLD); 798 int cpu; 799 800again: 801 cpu = get_cpu(); 802 if (likely(order == 0)) { 803 struct per_cpu_pages *pcp; 804 805 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 806 local_irq_save(flags); 807 if (!pcp->count) { 808 pcp->count += rmqueue_bulk(zone, 0, 809 pcp->batch, &pcp->list); 810 if (unlikely(!pcp->count)) 811 goto failed; 812 } 813 page = list_entry(pcp->list.next, struct page, lru); 814 list_del(&page->lru); 815 pcp->count--; 816 } else { 817 spin_lock_irqsave(&zone->lock, flags); 818 page = __rmqueue(zone, order); 819 spin_unlock(&zone->lock); 820 if (!page) 821 goto failed; 822 } 823 824 __count_zone_vm_events(PGALLOC, zone, 1 << order); 825 zone_statistics(zonelist, zone); 826 local_irq_restore(flags); 827 put_cpu(); 828 829 VM_BUG_ON(bad_range(zone, page)); 830 if (prep_new_page(page, order, gfp_flags)) 831 goto again; 832 return page; 833 834failed: 835 local_irq_restore(flags); 836 put_cpu(); 837 return NULL; 838} 839 840#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 841#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 842#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 843#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 844#define ALLOC_HARDER 0x10 /* try to alloc harder */ 845#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 846#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 847 848/* 849 * Return 1 if free pages are above 'mark'. This takes into account the order 850 * of the allocation. 851 */ 852int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 853 int classzone_idx, int alloc_flags) 854{ 855 /* free_pages my go negative - that's OK */ 856 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 857 int o; 858 859 if (alloc_flags & ALLOC_HIGH) 860 min -= min / 2; 861 if (alloc_flags & ALLOC_HARDER) 862 min -= min / 4; 863 864 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 865 return 0; 866 for (o = 0; o < order; o++) { 867 /* At the next order, this order's pages become unavailable */ 868 free_pages -= z->free_area[o].nr_free << o; 869 870 /* Require fewer higher order pages to be free */ 871 min >>= 1; 872 873 if (free_pages <= min) 874 return 0; 875 } 876 return 1; 877} 878 879/* 880 * get_page_from_freeliest goes through the zonelist trying to allocate 881 * a page. 882 */ 883static struct page * 884get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 885 struct zonelist *zonelist, int alloc_flags) 886{ 887 struct zone **z = zonelist->zones; 888 struct page *page = NULL; 889 int classzone_idx = zone_idx(*z); 890 struct zone *zone; 891 892 /* 893 * Go through the zonelist once, looking for a zone with enough free. 894 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 895 */ 896 do { 897 zone = *z; 898 if (unlikely((gfp_mask & __GFP_THISNODE) && 899 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) 900 break; 901 if ((alloc_flags & ALLOC_CPUSET) && 902 !cpuset_zone_allowed(zone, gfp_mask)) 903 continue; 904 905 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 906 unsigned long mark; 907 if (alloc_flags & ALLOC_WMARK_MIN) 908 mark = zone->pages_min; 909 else if (alloc_flags & ALLOC_WMARK_LOW) 910 mark = zone->pages_low; 911 else 912 mark = zone->pages_high; 913 if (!zone_watermark_ok(zone , order, mark, 914 classzone_idx, alloc_flags)) 915 if (!zone_reclaim_mode || 916 !zone_reclaim(zone, gfp_mask, order)) 917 continue; 918 } 919 920 page = buffered_rmqueue(zonelist, zone, order, gfp_mask); 921 if (page) { 922 break; 923 } 924 } while (*(++z) != NULL); 925 return page; 926} 927 928/* 929 * This is the 'heart' of the zoned buddy allocator. 930 */ 931struct page * fastcall 932__alloc_pages(gfp_t gfp_mask, unsigned int order, 933 struct zonelist *zonelist) 934{ 935 const gfp_t wait = gfp_mask & __GFP_WAIT; 936 struct zone **z; 937 struct page *page; 938 struct reclaim_state reclaim_state; 939 struct task_struct *p = current; 940 int do_retry; 941 int alloc_flags; 942 int did_some_progress; 943 944 might_sleep_if(wait); 945 946restart: 947 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 948 949 if (unlikely(*z == NULL)) { 950 /* Should this ever happen?? */ 951 return NULL; 952 } 953 954 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 955 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 956 if (page) 957 goto got_pg; 958 959 do { 960 wakeup_kswapd(*z, order); 961 } while (*(++z)); 962 963 /* 964 * OK, we're below the kswapd watermark and have kicked background 965 * reclaim. Now things get more complex, so set up alloc_flags according 966 * to how we want to proceed. 967 * 968 * The caller may dip into page reserves a bit more if the caller 969 * cannot run direct reclaim, or if the caller has realtime scheduling 970 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 971 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 972 */ 973 alloc_flags = ALLOC_WMARK_MIN; 974 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 975 alloc_flags |= ALLOC_HARDER; 976 if (gfp_mask & __GFP_HIGH) 977 alloc_flags |= ALLOC_HIGH; 978 if (wait) 979 alloc_flags |= ALLOC_CPUSET; 980 981 /* 982 * Go through the zonelist again. Let __GFP_HIGH and allocations 983 * coming from realtime tasks go deeper into reserves. 984 * 985 * This is the last chance, in general, before the goto nopage. 986 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 987 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 988 */ 989 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 990 if (page) 991 goto got_pg; 992 993 /* This allocation should allow future memory freeing. */ 994 995 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 996 && !in_interrupt()) { 997 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 998nofail_alloc: 999 /* go through the zonelist yet again, ignoring mins */ 1000 page = get_page_from_freelist(gfp_mask, order, 1001 zonelist, ALLOC_NO_WATERMARKS); 1002 if (page) 1003 goto got_pg; 1004 if (gfp_mask & __GFP_NOFAIL) { 1005 blk_congestion_wait(WRITE, HZ/50); 1006 goto nofail_alloc; 1007 } 1008 } 1009 goto nopage; 1010 } 1011 1012 /* Atomic allocations - we can't balance anything */ 1013 if (!wait) 1014 goto nopage; 1015 1016rebalance: 1017 cond_resched(); 1018 1019 /* We now go into synchronous reclaim */ 1020 cpuset_memory_pressure_bump(); 1021 p->flags |= PF_MEMALLOC; 1022 reclaim_state.reclaimed_slab = 0; 1023 p->reclaim_state = &reclaim_state; 1024 1025 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 1026 1027 p->reclaim_state = NULL; 1028 p->flags &= ~PF_MEMALLOC; 1029 1030 cond_resched(); 1031 1032 if (likely(did_some_progress)) { 1033 page = get_page_from_freelist(gfp_mask, order, 1034 zonelist, alloc_flags); 1035 if (page) 1036 goto got_pg; 1037 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1038 /* 1039 * Go through the zonelist yet one more time, keep 1040 * very high watermark here, this is only to catch 1041 * a parallel oom killing, we must fail if we're still 1042 * under heavy pressure. 1043 */ 1044 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1045 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1046 if (page) 1047 goto got_pg; 1048 1049 out_of_memory(zonelist, gfp_mask, order); 1050 goto restart; 1051 } 1052 1053 /* 1054 * Don't let big-order allocations loop unless the caller explicitly 1055 * requests that. Wait for some write requests to complete then retry. 1056 * 1057 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 1058 * <= 3, but that may not be true in other implementations. 1059 */ 1060 do_retry = 0; 1061 if (!(gfp_mask & __GFP_NORETRY)) { 1062 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1063 do_retry = 1; 1064 if (gfp_mask & __GFP_NOFAIL) 1065 do_retry = 1; 1066 } 1067 if (do_retry) { 1068 blk_congestion_wait(WRITE, HZ/50); 1069 goto rebalance; 1070 } 1071 1072nopage: 1073 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1074 printk(KERN_WARNING "%s: page allocation failure." 1075 " order:%d, mode:0x%x\n", 1076 p->comm, order, gfp_mask); 1077 dump_stack(); 1078 show_mem(); 1079 } 1080got_pg: 1081 return page; 1082} 1083 1084EXPORT_SYMBOL(__alloc_pages); 1085 1086/* 1087 * Common helper functions. 1088 */ 1089fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1090{ 1091 struct page * page; 1092 page = alloc_pages(gfp_mask, order); 1093 if (!page) 1094 return 0; 1095 return (unsigned long) page_address(page); 1096} 1097 1098EXPORT_SYMBOL(__get_free_pages); 1099 1100fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1101{ 1102 struct page * page; 1103 1104 /* 1105 * get_zeroed_page() returns a 32-bit address, which cannot represent 1106 * a highmem page 1107 */ 1108 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1109 1110 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1111 if (page) 1112 return (unsigned long) page_address(page); 1113 return 0; 1114} 1115 1116EXPORT_SYMBOL(get_zeroed_page); 1117 1118void __pagevec_free(struct pagevec *pvec) 1119{ 1120 int i = pagevec_count(pvec); 1121 1122 while (--i >= 0) 1123 free_hot_cold_page(pvec->pages[i], pvec->cold); 1124} 1125 1126fastcall void __free_pages(struct page *page, unsigned int order) 1127{ 1128 if (put_page_testzero(page)) { 1129 if (order == 0) 1130 free_hot_page(page); 1131 else 1132 __free_pages_ok(page, order); 1133 } 1134} 1135 1136EXPORT_SYMBOL(__free_pages); 1137 1138fastcall void free_pages(unsigned long addr, unsigned int order) 1139{ 1140 if (addr != 0) { 1141 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1142 __free_pages(virt_to_page((void *)addr), order); 1143 } 1144} 1145 1146EXPORT_SYMBOL(free_pages); 1147 1148/* 1149 * Total amount of free (allocatable) RAM: 1150 */ 1151unsigned int nr_free_pages(void) 1152{ 1153 unsigned int sum = 0; 1154 struct zone *zone; 1155 1156 for_each_zone(zone) 1157 sum += zone->free_pages; 1158 1159 return sum; 1160} 1161 1162EXPORT_SYMBOL(nr_free_pages); 1163 1164#ifdef CONFIG_NUMA 1165unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1166{ 1167 unsigned int sum = 0; 1168 enum zone_type i; 1169 1170 for (i = 0; i < MAX_NR_ZONES; i++) 1171 sum += pgdat->node_zones[i].free_pages; 1172 1173 return sum; 1174} 1175#endif 1176 1177static unsigned int nr_free_zone_pages(int offset) 1178{ 1179 /* Just pick one node, since fallback list is circular */ 1180 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1181 unsigned int sum = 0; 1182 1183 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1184 struct zone **zonep = zonelist->zones; 1185 struct zone *zone; 1186 1187 for (zone = *zonep++; zone; zone = *zonep++) { 1188 unsigned long size = zone->present_pages; 1189 unsigned long high = zone->pages_high; 1190 if (size > high) 1191 sum += size - high; 1192 } 1193 1194 return sum; 1195} 1196 1197/* 1198 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1199 */ 1200unsigned int nr_free_buffer_pages(void) 1201{ 1202 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1203} 1204 1205/* 1206 * Amount of free RAM allocatable within all zones 1207 */ 1208unsigned int nr_free_pagecache_pages(void) 1209{ 1210 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1211} 1212#ifdef CONFIG_NUMA 1213static void show_node(struct zone *zone) 1214{ 1215 printk("Node %d ", zone->zone_pgdat->node_id); 1216} 1217#else 1218#define show_node(zone) do { } while (0) 1219#endif 1220 1221void si_meminfo(struct sysinfo *val) 1222{ 1223 val->totalram = totalram_pages; 1224 val->sharedram = 0; 1225 val->freeram = nr_free_pages(); 1226 val->bufferram = nr_blockdev_pages(); 1227 val->totalhigh = totalhigh_pages; 1228 val->freehigh = nr_free_highpages(); 1229 val->mem_unit = PAGE_SIZE; 1230} 1231 1232EXPORT_SYMBOL(si_meminfo); 1233 1234#ifdef CONFIG_NUMA 1235void si_meminfo_node(struct sysinfo *val, int nid) 1236{ 1237 pg_data_t *pgdat = NODE_DATA(nid); 1238 1239 val->totalram = pgdat->node_present_pages; 1240 val->freeram = nr_free_pages_pgdat(pgdat); 1241#ifdef CONFIG_HIGHMEM 1242 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1243 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1244#else 1245 val->totalhigh = 0; 1246 val->freehigh = 0; 1247#endif 1248 val->mem_unit = PAGE_SIZE; 1249} 1250#endif 1251 1252#define K(x) ((x) << (PAGE_SHIFT-10)) 1253 1254/* 1255 * Show free area list (used inside shift_scroll-lock stuff) 1256 * We also calculate the percentage fragmentation. We do this by counting the 1257 * memory on each free list with the exception of the first item on the list. 1258 */ 1259void show_free_areas(void) 1260{ 1261 int cpu, temperature; 1262 unsigned long active; 1263 unsigned long inactive; 1264 unsigned long free; 1265 struct zone *zone; 1266 1267 for_each_zone(zone) { 1268 show_node(zone); 1269 printk("%s per-cpu:", zone->name); 1270 1271 if (!populated_zone(zone)) { 1272 printk(" empty\n"); 1273 continue; 1274 } else 1275 printk("\n"); 1276 1277 for_each_online_cpu(cpu) { 1278 struct per_cpu_pageset *pageset; 1279 1280 pageset = zone_pcp(zone, cpu); 1281 1282 for (temperature = 0; temperature < 2; temperature++) 1283 printk("cpu %d %s: high %d, batch %d used:%d\n", 1284 cpu, 1285 temperature ? "cold" : "hot", 1286 pageset->pcp[temperature].high, 1287 pageset->pcp[temperature].batch, 1288 pageset->pcp[temperature].count); 1289 } 1290 } 1291 1292 get_zone_counts(&active, &inactive, &free); 1293 1294 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " 1295 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", 1296 active, 1297 inactive, 1298 global_page_state(NR_FILE_DIRTY), 1299 global_page_state(NR_WRITEBACK), 1300 global_page_state(NR_UNSTABLE_NFS), 1301 nr_free_pages(), 1302 global_page_state(NR_SLAB), 1303 global_page_state(NR_FILE_MAPPED), 1304 global_page_state(NR_PAGETABLE)); 1305 1306 for_each_zone(zone) { 1307 int i; 1308 1309 show_node(zone); 1310 printk("%s" 1311 " free:%lukB" 1312 " min:%lukB" 1313 " low:%lukB" 1314 " high:%lukB" 1315 " active:%lukB" 1316 " inactive:%lukB" 1317 " present:%lukB" 1318 " pages_scanned:%lu" 1319 " all_unreclaimable? %s" 1320 "\n", 1321 zone->name, 1322 K(zone->free_pages), 1323 K(zone->pages_min), 1324 K(zone->pages_low), 1325 K(zone->pages_high), 1326 K(zone->nr_active), 1327 K(zone->nr_inactive), 1328 K(zone->present_pages), 1329 zone->pages_scanned, 1330 (zone->all_unreclaimable ? "yes" : "no") 1331 ); 1332 printk("lowmem_reserve[]:"); 1333 for (i = 0; i < MAX_NR_ZONES; i++) 1334 printk(" %lu", zone->lowmem_reserve[i]); 1335 printk("\n"); 1336 } 1337 1338 for_each_zone(zone) { 1339 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1340 1341 show_node(zone); 1342 printk("%s: ", zone->name); 1343 if (!populated_zone(zone)) { 1344 printk("empty\n"); 1345 continue; 1346 } 1347 1348 spin_lock_irqsave(&zone->lock, flags); 1349 for (order = 0; order < MAX_ORDER; order++) { 1350 nr[order] = zone->free_area[order].nr_free; 1351 total += nr[order] << order; 1352 } 1353 spin_unlock_irqrestore(&zone->lock, flags); 1354 for (order = 0; order < MAX_ORDER; order++) 1355 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1356 printk("= %lukB\n", K(total)); 1357 } 1358 1359 show_swap_cache_info(); 1360} 1361 1362/* 1363 * Builds allocation fallback zone lists. 1364 * 1365 * Add all populated zones of a node to the zonelist. 1366 */ 1367static int __meminit build_zonelists_node(pg_data_t *pgdat, 1368 struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) 1369{ 1370 struct zone *zone; 1371 1372 BUG_ON(zone_type >= MAX_NR_ZONES); 1373 zone_type++; 1374 1375 do { 1376 zone_type--; 1377 zone = pgdat->node_zones + zone_type; 1378 if (populated_zone(zone)) { 1379 zonelist->zones[nr_zones++] = zone; 1380 check_highest_zone(zone_type); 1381 } 1382 1383 } while (zone_type); 1384 return nr_zones; 1385} 1386 1387#ifdef CONFIG_NUMA 1388#define MAX_NODE_LOAD (num_online_nodes()) 1389static int __meminitdata node_load[MAX_NUMNODES]; 1390/** 1391 * find_next_best_node - find the next node that should appear in a given node's fallback list 1392 * @node: node whose fallback list we're appending 1393 * @used_node_mask: nodemask_t of already used nodes 1394 * 1395 * We use a number of factors to determine which is the next node that should 1396 * appear on a given node's fallback list. The node should not have appeared 1397 * already in @node's fallback list, and it should be the next closest node 1398 * according to the distance array (which contains arbitrary distance values 1399 * from each node to each node in the system), and should also prefer nodes 1400 * with no CPUs, since presumably they'll have very little allocation pressure 1401 * on them otherwise. 1402 * It returns -1 if no node is found. 1403 */ 1404static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) 1405{ 1406 int n, val; 1407 int min_val = INT_MAX; 1408 int best_node = -1; 1409 1410 /* Use the local node if we haven't already */ 1411 if (!node_isset(node, *used_node_mask)) { 1412 node_set(node, *used_node_mask); 1413 return node; 1414 } 1415 1416 for_each_online_node(n) { 1417 cpumask_t tmp; 1418 1419 /* Don't want a node to appear more than once */ 1420 if (node_isset(n, *used_node_mask)) 1421 continue; 1422 1423 /* Use the distance array to find the distance */ 1424 val = node_distance(node, n); 1425 1426 /* Penalize nodes under us ("prefer the next node") */ 1427 val += (n < node); 1428 1429 /* Give preference to headless and unused nodes */ 1430 tmp = node_to_cpumask(n); 1431 if (!cpus_empty(tmp)) 1432 val += PENALTY_FOR_NODE_WITH_CPUS; 1433 1434 /* Slight preference for less loaded node */ 1435 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1436 val += node_load[n]; 1437 1438 if (val < min_val) { 1439 min_val = val; 1440 best_node = n; 1441 } 1442 } 1443 1444 if (best_node >= 0) 1445 node_set(best_node, *used_node_mask); 1446 1447 return best_node; 1448} 1449 1450static void __meminit build_zonelists(pg_data_t *pgdat) 1451{ 1452 int j, node, local_node; 1453 enum zone_type i; 1454 int prev_node, load; 1455 struct zonelist *zonelist; 1456 nodemask_t used_mask; 1457 1458 /* initialize zonelists */ 1459 for (i = 0; i < MAX_NR_ZONES; i++) { 1460 zonelist = pgdat->node_zonelists + i; 1461 zonelist->zones[0] = NULL; 1462 } 1463 1464 /* NUMA-aware ordering of nodes */ 1465 local_node = pgdat->node_id; 1466 load = num_online_nodes(); 1467 prev_node = local_node; 1468 nodes_clear(used_mask); 1469 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1470 int distance = node_distance(local_node, node); 1471 1472 /* 1473 * If another node is sufficiently far away then it is better 1474 * to reclaim pages in a zone before going off node. 1475 */ 1476 if (distance > RECLAIM_DISTANCE) 1477 zone_reclaim_mode = 1; 1478 1479 /* 1480 * We don't want to pressure a particular node. 1481 * So adding penalty to the first node in same 1482 * distance group to make it round-robin. 1483 */ 1484 1485 if (distance != node_distance(local_node, prev_node)) 1486 node_load[node] += load; 1487 prev_node = node; 1488 load--; 1489 for (i = 0; i < MAX_NR_ZONES; i++) { 1490 zonelist = pgdat->node_zonelists + i; 1491 for (j = 0; zonelist->zones[j] != NULL; j++); 1492 1493 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1494 zonelist->zones[j] = NULL; 1495 } 1496 } 1497} 1498 1499#else /* CONFIG_NUMA */ 1500 1501static void __meminit build_zonelists(pg_data_t *pgdat) 1502{ 1503 int node, local_node; 1504 enum zone_type i,j; 1505 1506 local_node = pgdat->node_id; 1507 for (i = 0; i < MAX_NR_ZONES; i++) { 1508 struct zonelist *zonelist; 1509 1510 zonelist = pgdat->node_zonelists + i; 1511 1512 j = build_zonelists_node(pgdat, zonelist, 0, i); 1513 /* 1514 * Now we build the zonelist so that it contains the zones 1515 * of all the other nodes. 1516 * We don't want to pressure a particular node, so when 1517 * building the zones for node N, we make sure that the 1518 * zones coming right after the local ones are those from 1519 * node N+1 (modulo N) 1520 */ 1521 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1522 if (!node_online(node)) 1523 continue; 1524 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1525 } 1526 for (node = 0; node < local_node; node++) { 1527 if (!node_online(node)) 1528 continue; 1529 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1530 } 1531 1532 zonelist->zones[j] = NULL; 1533 } 1534} 1535 1536#endif /* CONFIG_NUMA */ 1537 1538/* return values int ....just for stop_machine_run() */ 1539static int __meminit __build_all_zonelists(void *dummy) 1540{ 1541 int nid; 1542 for_each_online_node(nid) 1543 build_zonelists(NODE_DATA(nid)); 1544 return 0; 1545} 1546 1547void __meminit build_all_zonelists(void) 1548{ 1549 if (system_state == SYSTEM_BOOTING) { 1550 __build_all_zonelists(0); 1551 cpuset_init_current_mems_allowed(); 1552 } else { 1553 /* we have to stop all cpus to guaranntee there is no user 1554 of zonelist */ 1555 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 1556 /* cpuset refresh routine should be here */ 1557 } 1558 vm_total_pages = nr_free_pagecache_pages(); 1559 printk("Built %i zonelists. Total pages: %ld\n", 1560 num_online_nodes(), vm_total_pages); 1561} 1562 1563/* 1564 * Helper functions to size the waitqueue hash table. 1565 * Essentially these want to choose hash table sizes sufficiently 1566 * large so that collisions trying to wait on pages are rare. 1567 * But in fact, the number of active page waitqueues on typical 1568 * systems is ridiculously low, less than 200. So this is even 1569 * conservative, even though it seems large. 1570 * 1571 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1572 * waitqueues, i.e. the size of the waitq table given the number of pages. 1573 */ 1574#define PAGES_PER_WAITQUEUE 256 1575 1576#ifndef CONFIG_MEMORY_HOTPLUG 1577static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1578{ 1579 unsigned long size = 1; 1580 1581 pages /= PAGES_PER_WAITQUEUE; 1582 1583 while (size < pages) 1584 size <<= 1; 1585 1586 /* 1587 * Once we have dozens or even hundreds of threads sleeping 1588 * on IO we've got bigger problems than wait queue collision. 1589 * Limit the size of the wait table to a reasonable size. 1590 */ 1591 size = min(size, 4096UL); 1592 1593 return max(size, 4UL); 1594} 1595#else 1596/* 1597 * A zone's size might be changed by hot-add, so it is not possible to determine 1598 * a suitable size for its wait_table. So we use the maximum size now. 1599 * 1600 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 1601 * 1602 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 1603 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 1604 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 1605 * 1606 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 1607 * or more by the traditional way. (See above). It equals: 1608 * 1609 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 1610 * ia64(16K page size) : = ( 8G + 4M)byte. 1611 * powerpc (64K page size) : = (32G +16M)byte. 1612 */ 1613static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1614{ 1615 return 4096UL; 1616} 1617#endif 1618 1619/* 1620 * This is an integer logarithm so that shifts can be used later 1621 * to extract the more random high bits from the multiplicative 1622 * hash function before the remainder is taken. 1623 */ 1624static inline unsigned long wait_table_bits(unsigned long size) 1625{ 1626 return ffz(~size); 1627} 1628 1629#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1630 1631static void __init calculate_zone_totalpages(struct pglist_data *pgdat, 1632 unsigned long *zones_size, unsigned long *zholes_size) 1633{ 1634 unsigned long realtotalpages, totalpages = 0; 1635 enum zone_type i; 1636 1637 for (i = 0; i < MAX_NR_ZONES; i++) 1638 totalpages += zones_size[i]; 1639 pgdat->node_spanned_pages = totalpages; 1640 1641 realtotalpages = totalpages; 1642 if (zholes_size) 1643 for (i = 0; i < MAX_NR_ZONES; i++) 1644 realtotalpages -= zholes_size[i]; 1645 pgdat->node_present_pages = realtotalpages; 1646 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1647} 1648 1649 1650/* 1651 * Initially all pages are reserved - free ones are freed 1652 * up by free_all_bootmem() once the early boot process is 1653 * done. Non-atomic initialization, single-pass. 1654 */ 1655void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1656 unsigned long start_pfn) 1657{ 1658 struct page *page; 1659 unsigned long end_pfn = start_pfn + size; 1660 unsigned long pfn; 1661 1662 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1663 if (!early_pfn_valid(pfn)) 1664 continue; 1665 page = pfn_to_page(pfn); 1666 set_page_links(page, zone, nid, pfn); 1667 init_page_count(page); 1668 reset_page_mapcount(page); 1669 SetPageReserved(page); 1670 INIT_LIST_HEAD(&page->lru); 1671#ifdef WANT_PAGE_VIRTUAL 1672 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1673 if (!is_highmem_idx(zone)) 1674 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1675#endif 1676 } 1677} 1678 1679void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1680 unsigned long size) 1681{ 1682 int order; 1683 for (order = 0; order < MAX_ORDER ; order++) { 1684 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1685 zone->free_area[order].nr_free = 0; 1686 } 1687} 1688 1689#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) 1690void zonetable_add(struct zone *zone, int nid, enum zone_type zid, 1691 unsigned long pfn, unsigned long size) 1692{ 1693 unsigned long snum = pfn_to_section_nr(pfn); 1694 unsigned long end = pfn_to_section_nr(pfn + size); 1695 1696 if (FLAGS_HAS_NODE) 1697 zone_table[ZONETABLE_INDEX(nid, zid)] = zone; 1698 else 1699 for (; snum <= end; snum++) 1700 zone_table[ZONETABLE_INDEX(snum, zid)] = zone; 1701} 1702 1703#ifndef __HAVE_ARCH_MEMMAP_INIT 1704#define memmap_init(size, nid, zone, start_pfn) \ 1705 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1706#endif 1707 1708static int __cpuinit zone_batchsize(struct zone *zone) 1709{ 1710 int batch; 1711 1712 /* 1713 * The per-cpu-pages pools are set to around 1000th of the 1714 * size of the zone. But no more than 1/2 of a meg. 1715 * 1716 * OK, so we don't know how big the cache is. So guess. 1717 */ 1718 batch = zone->present_pages / 1024; 1719 if (batch * PAGE_SIZE > 512 * 1024) 1720 batch = (512 * 1024) / PAGE_SIZE; 1721 batch /= 4; /* We effectively *= 4 below */ 1722 if (batch < 1) 1723 batch = 1; 1724 1725 /* 1726 * Clamp the batch to a 2^n - 1 value. Having a power 1727 * of 2 value was found to be more likely to have 1728 * suboptimal cache aliasing properties in some cases. 1729 * 1730 * For example if 2 tasks are alternately allocating 1731 * batches of pages, one task can end up with a lot 1732 * of pages of one half of the possible page colors 1733 * and the other with pages of the other colors. 1734 */ 1735 batch = (1 << (fls(batch + batch/2)-1)) - 1; 1736 1737 return batch; 1738} 1739 1740inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 1741{ 1742 struct per_cpu_pages *pcp; 1743 1744 memset(p, 0, sizeof(*p)); 1745 1746 pcp = &p->pcp[0]; /* hot */ 1747 pcp->count = 0; 1748 pcp->high = 6 * batch; 1749 pcp->batch = max(1UL, 1 * batch); 1750 INIT_LIST_HEAD(&pcp->list); 1751 1752 pcp = &p->pcp[1]; /* cold*/ 1753 pcp->count = 0; 1754 pcp->high = 2 * batch; 1755 pcp->batch = max(1UL, batch/2); 1756 INIT_LIST_HEAD(&pcp->list); 1757} 1758 1759/* 1760 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 1761 * to the value high for the pageset p. 1762 */ 1763 1764static void setup_pagelist_highmark(struct per_cpu_pageset *p, 1765 unsigned long high) 1766{ 1767 struct per_cpu_pages *pcp; 1768 1769 pcp = &p->pcp[0]; /* hot list */ 1770 pcp->high = high; 1771 pcp->batch = max(1UL, high/4); 1772 if ((high/4) > (PAGE_SHIFT * 8)) 1773 pcp->batch = PAGE_SHIFT * 8; 1774} 1775 1776 1777#ifdef CONFIG_NUMA 1778/* 1779 * Boot pageset table. One per cpu which is going to be used for all 1780 * zones and all nodes. The parameters will be set in such a way 1781 * that an item put on a list will immediately be handed over to 1782 * the buddy list. This is safe since pageset manipulation is done 1783 * with interrupts disabled. 1784 * 1785 * Some NUMA counter updates may also be caught by the boot pagesets. 1786 * 1787 * The boot_pagesets must be kept even after bootup is complete for 1788 * unused processors and/or zones. They do play a role for bootstrapping 1789 * hotplugged processors. 1790 * 1791 * zoneinfo_show() and maybe other functions do 1792 * not check if the processor is online before following the pageset pointer. 1793 * Other parts of the kernel may not check if the zone is available. 1794 */ 1795static struct per_cpu_pageset boot_pageset[NR_CPUS]; 1796 1797/* 1798 * Dynamically allocate memory for the 1799 * per cpu pageset array in struct zone. 1800 */ 1801static int __cpuinit process_zones(int cpu) 1802{ 1803 struct zone *zone, *dzone; 1804 1805 for_each_zone(zone) { 1806 1807 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 1808 GFP_KERNEL, cpu_to_node(cpu)); 1809 if (!zone_pcp(zone, cpu)) 1810 goto bad; 1811 1812 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 1813 1814 if (percpu_pagelist_fraction) 1815 setup_pagelist_highmark(zone_pcp(zone, cpu), 1816 (zone->present_pages / percpu_pagelist_fraction)); 1817 } 1818 1819 return 0; 1820bad: 1821 for_each_zone(dzone) { 1822 if (dzone == zone) 1823 break; 1824 kfree(zone_pcp(dzone, cpu)); 1825 zone_pcp(dzone, cpu) = NULL; 1826 } 1827 return -ENOMEM; 1828} 1829 1830static inline void free_zone_pagesets(int cpu) 1831{ 1832 struct zone *zone; 1833 1834 for_each_zone(zone) { 1835 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 1836 1837 /* Free per_cpu_pageset if it is slab allocated */ 1838 if (pset != &boot_pageset[cpu]) 1839 kfree(pset); 1840 zone_pcp(zone, cpu) = NULL; 1841 } 1842} 1843 1844static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 1845 unsigned long action, 1846 void *hcpu) 1847{ 1848 int cpu = (long)hcpu; 1849 int ret = NOTIFY_OK; 1850 1851 switch (action) { 1852 case CPU_UP_PREPARE: 1853 if (process_zones(cpu)) 1854 ret = NOTIFY_BAD; 1855 break; 1856 case CPU_UP_CANCELED: 1857 case CPU_DEAD: 1858 free_zone_pagesets(cpu); 1859 break; 1860 default: 1861 break; 1862 } 1863 return ret; 1864} 1865 1866static struct notifier_block __cpuinitdata pageset_notifier = 1867 { &pageset_cpuup_callback, NULL, 0 }; 1868 1869void __init setup_per_cpu_pageset(void) 1870{ 1871 int err; 1872 1873 /* Initialize per_cpu_pageset for cpu 0. 1874 * A cpuup callback will do this for every cpu 1875 * as it comes online 1876 */ 1877 err = process_zones(smp_processor_id()); 1878 BUG_ON(err); 1879 register_cpu_notifier(&pageset_notifier); 1880} 1881 1882#endif 1883 1884static __meminit 1885int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1886{ 1887 int i; 1888 struct pglist_data *pgdat = zone->zone_pgdat; 1889 size_t alloc_size; 1890 1891 /* 1892 * The per-page waitqueue mechanism uses hashed waitqueues 1893 * per zone. 1894 */ 1895 zone->wait_table_hash_nr_entries = 1896 wait_table_hash_nr_entries(zone_size_pages); 1897 zone->wait_table_bits = 1898 wait_table_bits(zone->wait_table_hash_nr_entries); 1899 alloc_size = zone->wait_table_hash_nr_entries 1900 * sizeof(wait_queue_head_t); 1901 1902 if (system_state == SYSTEM_BOOTING) { 1903 zone->wait_table = (wait_queue_head_t *) 1904 alloc_bootmem_node(pgdat, alloc_size); 1905 } else { 1906 /* 1907 * This case means that a zone whose size was 0 gets new memory 1908 * via memory hot-add. 1909 * But it may be the case that a new node was hot-added. In 1910 * this case vmalloc() will not be able to use this new node's 1911 * memory - this wait_table must be initialized to use this new 1912 * node itself as well. 1913 * To use this new node's memory, further consideration will be 1914 * necessary. 1915 */ 1916 zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); 1917 } 1918 if (!zone->wait_table) 1919 return -ENOMEM; 1920 1921 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 1922 init_waitqueue_head(zone->wait_table + i); 1923 1924 return 0; 1925} 1926 1927static __meminit void zone_pcp_init(struct zone *zone) 1928{ 1929 int cpu; 1930 unsigned long batch = zone_batchsize(zone); 1931 1932 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1933#ifdef CONFIG_NUMA 1934 /* Early boot. Slab allocator not functional yet */ 1935 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 1936 setup_pageset(&boot_pageset[cpu],0); 1937#else 1938 setup_pageset(zone_pcp(zone,cpu), batch); 1939#endif 1940 } 1941 if (zone->present_pages) 1942 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 1943 zone->name, zone->present_pages, batch); 1944} 1945 1946__meminit int init_currently_empty_zone(struct zone *zone, 1947 unsigned long zone_start_pfn, 1948 unsigned long size) 1949{ 1950 struct pglist_data *pgdat = zone->zone_pgdat; 1951 int ret; 1952 ret = zone_wait_table_init(zone, size); 1953 if (ret) 1954 return ret; 1955 pgdat->nr_zones = zone_idx(zone) + 1; 1956 1957 zone->zone_start_pfn = zone_start_pfn; 1958 1959 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 1960 1961 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 1962 1963 return 0; 1964} 1965 1966/* 1967 * Set up the zone data structures: 1968 * - mark all pages reserved 1969 * - mark all memory queues empty 1970 * - clear the memory bitmaps 1971 */ 1972static void __meminit free_area_init_core(struct pglist_data *pgdat, 1973 unsigned long *zones_size, unsigned long *zholes_size) 1974{ 1975 enum zone_type j; 1976 int nid = pgdat->node_id; 1977 unsigned long zone_start_pfn = pgdat->node_start_pfn; 1978 int ret; 1979 1980 pgdat_resize_init(pgdat); 1981 pgdat->nr_zones = 0; 1982 init_waitqueue_head(&pgdat->kswapd_wait); 1983 pgdat->kswapd_max_order = 0; 1984 1985 for (j = 0; j < MAX_NR_ZONES; j++) { 1986 struct zone *zone = pgdat->node_zones + j; 1987 unsigned long size, realsize; 1988 1989 realsize = size = zones_size[j]; 1990 if (zholes_size) 1991 realsize -= zholes_size[j]; 1992 1993 if (!is_highmem_idx(j)) 1994 nr_kernel_pages += realsize; 1995 nr_all_pages += realsize; 1996 1997 zone->spanned_pages = size; 1998 zone->present_pages = realsize; 1999#ifdef CONFIG_NUMA 2000 zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio) 2001 / 100; 2002#endif 2003 zone->name = zone_names[j]; 2004 spin_lock_init(&zone->lock); 2005 spin_lock_init(&zone->lru_lock); 2006 zone_seqlock_init(zone); 2007 zone->zone_pgdat = pgdat; 2008 zone->free_pages = 0; 2009 2010 zone->temp_priority = zone->prev_priority = DEF_PRIORITY; 2011 2012 zone_pcp_init(zone); 2013 INIT_LIST_HEAD(&zone->active_list); 2014 INIT_LIST_HEAD(&zone->inactive_list); 2015 zone->nr_scan_active = 0; 2016 zone->nr_scan_inactive = 0; 2017 zone->nr_active = 0; 2018 zone->nr_inactive = 0; 2019 zap_zone_vm_stats(zone); 2020 atomic_set(&zone->reclaim_in_progress, 0); 2021 if (!size) 2022 continue; 2023 2024 zonetable_add(zone, nid, j, zone_start_pfn, size); 2025 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 2026 BUG_ON(ret); 2027 zone_start_pfn += size; 2028 } 2029} 2030 2031static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2032{ 2033 /* Skip empty nodes */ 2034 if (!pgdat->node_spanned_pages) 2035 return; 2036 2037#ifdef CONFIG_FLAT_NODE_MEM_MAP 2038 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2039 if (!pgdat->node_mem_map) { 2040 unsigned long size, start, end; 2041 struct page *map; 2042 2043 /* 2044 * The zone's endpoints aren't required to be MAX_ORDER 2045 * aligned but the node_mem_map endpoints must be in order 2046 * for the buddy allocator to function correctly. 2047 */ 2048 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 2049 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 2050 end = ALIGN(end, MAX_ORDER_NR_PAGES); 2051 size = (end - start) * sizeof(struct page); 2052 map = alloc_remap(pgdat->node_id, size); 2053 if (!map) 2054 map = alloc_bootmem_node(pgdat, size); 2055 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 2056 } 2057#ifdef CONFIG_FLATMEM 2058 /* 2059 * With no DISCONTIG, the global mem_map is just set as node 0's 2060 */ 2061 if (pgdat == NODE_DATA(0)) 2062 mem_map = NODE_DATA(0)->node_mem_map; 2063#endif 2064#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2065} 2066 2067void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, 2068 unsigned long *zones_size, unsigned long node_start_pfn, 2069 unsigned long *zholes_size) 2070{ 2071 pgdat->node_id = nid; 2072 pgdat->node_start_pfn = node_start_pfn; 2073 calculate_zone_totalpages(pgdat, zones_size, zholes_size); 2074 2075 alloc_node_mem_map(pgdat); 2076 2077 free_area_init_core(pgdat, zones_size, zholes_size); 2078} 2079 2080#ifndef CONFIG_NEED_MULTIPLE_NODES 2081static bootmem_data_t contig_bootmem_data; 2082struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2083 2084EXPORT_SYMBOL(contig_page_data); 2085#endif 2086 2087void __init free_area_init(unsigned long *zones_size) 2088{ 2089 free_area_init_node(0, NODE_DATA(0), zones_size, 2090 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2091} 2092 2093#ifdef CONFIG_HOTPLUG_CPU 2094static int page_alloc_cpu_notify(struct notifier_block *self, 2095 unsigned long action, void *hcpu) 2096{ 2097 int cpu = (unsigned long)hcpu; 2098 2099 if (action == CPU_DEAD) { 2100 local_irq_disable(); 2101 __drain_pages(cpu); 2102 vm_events_fold_cpu(cpu); 2103 local_irq_enable(); 2104 refresh_cpu_vm_stats(cpu); 2105 } 2106 return NOTIFY_OK; 2107} 2108#endif /* CONFIG_HOTPLUG_CPU */ 2109 2110void __init page_alloc_init(void) 2111{ 2112 hotcpu_notifier(page_alloc_cpu_notify, 0); 2113} 2114 2115/* 2116 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 2117 * or min_free_kbytes changes. 2118 */ 2119static void calculate_totalreserve_pages(void) 2120{ 2121 struct pglist_data *pgdat; 2122 unsigned long reserve_pages = 0; 2123 enum zone_type i, j; 2124 2125 for_each_online_pgdat(pgdat) { 2126 for (i = 0; i < MAX_NR_ZONES; i++) { 2127 struct zone *zone = pgdat->node_zones + i; 2128 unsigned long max = 0; 2129 2130 /* Find valid and maximum lowmem_reserve in the zone */ 2131 for (j = i; j < MAX_NR_ZONES; j++) { 2132 if (zone->lowmem_reserve[j] > max) 2133 max = zone->lowmem_reserve[j]; 2134 } 2135 2136 /* we treat pages_high as reserved pages. */ 2137 max += zone->pages_high; 2138 2139 if (max > zone->present_pages) 2140 max = zone->present_pages; 2141 reserve_pages += max; 2142 } 2143 } 2144 totalreserve_pages = reserve_pages; 2145} 2146 2147/* 2148 * setup_per_zone_lowmem_reserve - called whenever 2149 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 2150 * has a correct pages reserved value, so an adequate number of 2151 * pages are left in the zone after a successful __alloc_pages(). 2152 */ 2153static void setup_per_zone_lowmem_reserve(void) 2154{ 2155 struct pglist_data *pgdat; 2156 enum zone_type j, idx; 2157 2158 for_each_online_pgdat(pgdat) { 2159 for (j = 0; j < MAX_NR_ZONES; j++) { 2160 struct zone *zone = pgdat->node_zones + j; 2161 unsigned long present_pages = zone->present_pages; 2162 2163 zone->lowmem_reserve[j] = 0; 2164 2165 idx = j; 2166 while (idx) { 2167 struct zone *lower_zone; 2168 2169 idx--; 2170 2171 if (sysctl_lowmem_reserve_ratio[idx] < 1) 2172 sysctl_lowmem_reserve_ratio[idx] = 1; 2173 2174 lower_zone = pgdat->node_zones + idx; 2175 lower_zone->lowmem_reserve[j] = present_pages / 2176 sysctl_lowmem_reserve_ratio[idx]; 2177 present_pages += lower_zone->present_pages; 2178 } 2179 } 2180 } 2181 2182 /* update totalreserve_pages */ 2183 calculate_totalreserve_pages(); 2184} 2185 2186/* 2187 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures 2188 * that the pages_{min,low,high} values for each zone are set correctly 2189 * with respect to min_free_kbytes. 2190 */ 2191void setup_per_zone_pages_min(void) 2192{ 2193 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 2194 unsigned long lowmem_pages = 0; 2195 struct zone *zone; 2196 unsigned long flags; 2197 2198 /* Calculate total number of !ZONE_HIGHMEM pages */ 2199 for_each_zone(zone) { 2200 if (!is_highmem(zone)) 2201 lowmem_pages += zone->present_pages; 2202 } 2203 2204 for_each_zone(zone) { 2205 u64 tmp; 2206 2207 spin_lock_irqsave(&zone->lru_lock, flags); 2208 tmp = (u64)pages_min * zone->present_pages; 2209 do_div(tmp, lowmem_pages); 2210 if (is_highmem(zone)) { 2211 /* 2212 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2213 * need highmem pages, so cap pages_min to a small 2214 * value here. 2215 * 2216 * The (pages_high-pages_low) and (pages_low-pages_min) 2217 * deltas controls asynch page reclaim, and so should 2218 * not be capped for highmem. 2219 */ 2220 int min_pages; 2221 2222 min_pages = zone->present_pages / 1024; 2223 if (min_pages < SWAP_CLUSTER_MAX) 2224 min_pages = SWAP_CLUSTER_MAX; 2225 if (min_pages > 128) 2226 min_pages = 128; 2227 zone->pages_min = min_pages; 2228 } else { 2229 /* 2230 * If it's a lowmem zone, reserve a number of pages 2231 * proportionate to the zone's size. 2232 */ 2233 zone->pages_min = tmp; 2234 } 2235 2236 zone->pages_low = zone->pages_min + (tmp >> 2); 2237 zone->pages_high = zone->pages_min + (tmp >> 1); 2238 spin_unlock_irqrestore(&zone->lru_lock, flags); 2239 } 2240 2241 /* update totalreserve_pages */ 2242 calculate_totalreserve_pages(); 2243} 2244 2245/* 2246 * Initialise min_free_kbytes. 2247 * 2248 * For small machines we want it small (128k min). For large machines 2249 * we want it large (64MB max). But it is not linear, because network 2250 * bandwidth does not increase linearly with machine size. We use 2251 * 2252 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 2253 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 2254 * 2255 * which yields 2256 * 2257 * 16MB: 512k 2258 * 32MB: 724k 2259 * 64MB: 1024k 2260 * 128MB: 1448k 2261 * 256MB: 2048k 2262 * 512MB: 2896k 2263 * 1024MB: 4096k 2264 * 2048MB: 5792k 2265 * 4096MB: 8192k 2266 * 8192MB: 11584k 2267 * 16384MB: 16384k 2268 */ 2269static int __init init_per_zone_pages_min(void) 2270{ 2271 unsigned long lowmem_kbytes; 2272 2273 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 2274 2275 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 2276 if (min_free_kbytes < 128) 2277 min_free_kbytes = 128; 2278 if (min_free_kbytes > 65536) 2279 min_free_kbytes = 65536; 2280 setup_per_zone_pages_min(); 2281 setup_per_zone_lowmem_reserve(); 2282 return 0; 2283} 2284module_init(init_per_zone_pages_min) 2285 2286/* 2287 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 2288 * that we can call two helper functions whenever min_free_kbytes 2289 * changes. 2290 */ 2291int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 2292 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2293{ 2294 proc_dointvec(table, write, file, buffer, length, ppos); 2295 setup_per_zone_pages_min(); 2296 return 0; 2297} 2298 2299#ifdef CONFIG_NUMA 2300int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 2301 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2302{ 2303 struct zone *zone; 2304 int rc; 2305 2306 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2307 if (rc) 2308 return rc; 2309 2310 for_each_zone(zone) 2311 zone->min_unmapped_ratio = (zone->present_pages * 2312 sysctl_min_unmapped_ratio) / 100; 2313 return 0; 2314} 2315#endif 2316 2317/* 2318 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 2319 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 2320 * whenever sysctl_lowmem_reserve_ratio changes. 2321 * 2322 * The reserve ratio obviously has absolutely no relation with the 2323 * pages_min watermarks. The lowmem reserve ratio can only make sense 2324 * if in function of the boot time zone sizes. 2325 */ 2326int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 2327 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2328{ 2329 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2330 setup_per_zone_lowmem_reserve(); 2331 return 0; 2332} 2333 2334/* 2335 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 2336 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 2337 * can have before it gets flushed back to buddy allocator. 2338 */ 2339 2340int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 2341 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2342{ 2343 struct zone *zone; 2344 unsigned int cpu; 2345 int ret; 2346 2347 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2348 if (!write || (ret == -EINVAL)) 2349 return ret; 2350 for_each_zone(zone) { 2351 for_each_online_cpu(cpu) { 2352 unsigned long high; 2353 high = zone->present_pages / percpu_pagelist_fraction; 2354 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 2355 } 2356 } 2357 return 0; 2358} 2359 2360int hashdist = HASHDIST_DEFAULT; 2361 2362#ifdef CONFIG_NUMA 2363static int __init set_hashdist(char *str) 2364{ 2365 if (!str) 2366 return 0; 2367 hashdist = simple_strtoul(str, &str, 0); 2368 return 1; 2369} 2370__setup("hashdist=", set_hashdist); 2371#endif 2372 2373/* 2374 * allocate a large system hash table from bootmem 2375 * - it is assumed that the hash table must contain an exact power-of-2 2376 * quantity of entries 2377 * - limit is the number of hash buckets, not the total allocation size 2378 */ 2379void *__init alloc_large_system_hash(const char *tablename, 2380 unsigned long bucketsize, 2381 unsigned long numentries, 2382 int scale, 2383 int flags, 2384 unsigned int *_hash_shift, 2385 unsigned int *_hash_mask, 2386 unsigned long limit) 2387{ 2388 unsigned long long max = limit; 2389 unsigned long log2qty, size; 2390 void *table = NULL; 2391 2392 /* allow the kernel cmdline to have a say */ 2393 if (!numentries) { 2394 /* round applicable memory size up to nearest megabyte */ 2395 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; 2396 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 2397 numentries >>= 20 - PAGE_SHIFT; 2398 numentries <<= 20 - PAGE_SHIFT; 2399 2400 /* limit to 1 bucket per 2^scale bytes of low memory */ 2401 if (scale > PAGE_SHIFT) 2402 numentries >>= (scale - PAGE_SHIFT); 2403 else 2404 numentries <<= (PAGE_SHIFT - scale); 2405 } 2406 numentries = roundup_pow_of_two(numentries); 2407 2408 /* limit allocation size to 1/16 total memory by default */ 2409 if (max == 0) { 2410 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2411 do_div(max, bucketsize); 2412 } 2413 2414 if (numentries > max) 2415 numentries = max; 2416 2417 log2qty = long_log2(numentries); 2418 2419 do { 2420 size = bucketsize << log2qty; 2421 if (flags & HASH_EARLY) 2422 table = alloc_bootmem(size); 2423 else if (hashdist) 2424 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 2425 else { 2426 unsigned long order; 2427 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 2428 ; 2429 table = (void*) __get_free_pages(GFP_ATOMIC, order); 2430 } 2431 } while (!table && size > PAGE_SIZE && --log2qty); 2432 2433 if (!table) 2434 panic("Failed to allocate %s hash table\n", tablename); 2435 2436 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 2437 tablename, 2438 (1U << log2qty), 2439 long_log2(size) - PAGE_SHIFT, 2440 size); 2441 2442 if (_hash_shift) 2443 *_hash_shift = log2qty; 2444 if (_hash_mask) 2445 *_hash_mask = (1 << log2qty) - 1; 2446 2447 return table; 2448} 2449 2450#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 2451struct page *pfn_to_page(unsigned long pfn) 2452{ 2453 return __pfn_to_page(pfn); 2454} 2455unsigned long page_to_pfn(struct page *page) 2456{ 2457 return __page_to_pfn(page); 2458} 2459EXPORT_SYMBOL(pfn_to_page); 2460EXPORT_SYMBOL(page_to_pfn); 2461#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 2462