page_alloc.c revision a94b3ab7eab4edcc9b2cb474b188f774c331adf7
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/config.h> 18#include <linux/stddef.h> 19#include <linux/mm.h> 20#include <linux/swap.h> 21#include <linux/interrupt.h> 22#include <linux/pagemap.h> 23#include <linux/bootmem.h> 24#include <linux/compiler.h> 25#include <linux/kernel.h> 26#include <linux/module.h> 27#include <linux/suspend.h> 28#include <linux/pagevec.h> 29#include <linux/blkdev.h> 30#include <linux/slab.h> 31#include <linux/notifier.h> 32#include <linux/topology.h> 33#include <linux/sysctl.h> 34#include <linux/cpu.h> 35#include <linux/cpuset.h> 36#include <linux/memory_hotplug.h> 37#include <linux/nodemask.h> 38#include <linux/vmalloc.h> 39 40#include <asm/tlbflush.h> 41#include "internal.h" 42 43/* 44 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 45 * initializer cleaner 46 */ 47nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 48EXPORT_SYMBOL(node_online_map); 49nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 50EXPORT_SYMBOL(node_possible_map); 51struct pglist_data *pgdat_list __read_mostly; 52unsigned long totalram_pages __read_mostly; 53unsigned long totalhigh_pages __read_mostly; 54long nr_swap_pages; 55 56/* 57 * results with 256, 32 in the lowmem_reserve sysctl: 58 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 59 * 1G machine -> (16M dma, 784M normal, 224M high) 60 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 61 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 62 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 63 * 64 * TBD: should special case ZONE_DMA32 machines here - in those we normally 65 * don't need any ZONE_NORMAL reservation 66 */ 67int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 }; 68 69EXPORT_SYMBOL(totalram_pages); 70 71/* 72 * Used by page_zone() to look up the address of the struct zone whose 73 * id is encoded in the upper bits of page->flags 74 */ 75struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; 76EXPORT_SYMBOL(zone_table); 77 78static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; 79int min_free_kbytes = 1024; 80 81unsigned long __initdata nr_kernel_pages; 82unsigned long __initdata nr_all_pages; 83 84static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 85{ 86 int ret = 0; 87 unsigned seq; 88 unsigned long pfn = page_to_pfn(page); 89 90 do { 91 seq = zone_span_seqbegin(zone); 92 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 93 ret = 1; 94 else if (pfn < zone->zone_start_pfn) 95 ret = 1; 96 } while (zone_span_seqretry(zone, seq)); 97 98 return ret; 99} 100 101static int page_is_consistent(struct zone *zone, struct page *page) 102{ 103#ifdef CONFIG_HOLES_IN_ZONE 104 if (!pfn_valid(page_to_pfn(page))) 105 return 0; 106#endif 107 if (zone != page_zone(page)) 108 return 0; 109 110 return 1; 111} 112/* 113 * Temporary debugging check for pages not lying within a given zone. 114 */ 115static int bad_range(struct zone *zone, struct page *page) 116{ 117 if (page_outside_zone_boundaries(zone, page)) 118 return 1; 119 if (!page_is_consistent(zone, page)) 120 return 1; 121 122 return 0; 123} 124 125static void bad_page(const char *function, struct page *page) 126{ 127 printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n", 128 function, current->comm, page); 129 printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", 130 (int)(2*sizeof(unsigned long)), (unsigned long)page->flags, 131 page->mapping, page_mapcount(page), page_count(page)); 132 printk(KERN_EMERG "Backtrace:\n"); 133 dump_stack(); 134 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"); 135 page->flags &= ~(1 << PG_lru | 136 1 << PG_private | 137 1 << PG_locked | 138 1 << PG_active | 139 1 << PG_dirty | 140 1 << PG_reclaim | 141 1 << PG_slab | 142 1 << PG_swapcache | 143 1 << PG_writeback ); 144 set_page_count(page, 0); 145 reset_page_mapcount(page); 146 page->mapping = NULL; 147 add_taint(TAINT_BAD_PAGE); 148} 149 150/* 151 * Higher-order pages are called "compound pages". They are structured thusly: 152 * 153 * The first PAGE_SIZE page is called the "head page". 154 * 155 * The remaining PAGE_SIZE pages are called "tail pages". 156 * 157 * All pages have PG_compound set. All pages have their ->private pointing at 158 * the head page (even the head page has this). 159 * 160 * The first tail page's ->mapping, if non-zero, holds the address of the 161 * compound page's put_page() function. 162 * 163 * The order of the allocation is stored in the first tail page's ->index 164 * This is only for debug at present. This usage means that zero-order pages 165 * may not be compound. 166 */ 167static void prep_compound_page(struct page *page, unsigned long order) 168{ 169 int i; 170 int nr_pages = 1 << order; 171 172 page[1].mapping = NULL; 173 page[1].index = order; 174 for (i = 0; i < nr_pages; i++) { 175 struct page *p = page + i; 176 177 SetPageCompound(p); 178 set_page_private(p, (unsigned long)page); 179 } 180} 181 182static void destroy_compound_page(struct page *page, unsigned long order) 183{ 184 int i; 185 int nr_pages = 1 << order; 186 187 if (!PageCompound(page)) 188 return; 189 190 if (page[1].index != order) 191 bad_page(__FUNCTION__, page); 192 193 for (i = 0; i < nr_pages; i++) { 194 struct page *p = page + i; 195 196 if (!PageCompound(p)) 197 bad_page(__FUNCTION__, page); 198 if (page_private(p) != (unsigned long)page) 199 bad_page(__FUNCTION__, page); 200 ClearPageCompound(p); 201 } 202} 203 204/* 205 * function for dealing with page's order in buddy system. 206 * zone->lock is already acquired when we use these. 207 * So, we don't need atomic page->flags operations here. 208 */ 209static inline unsigned long page_order(struct page *page) { 210 return page_private(page); 211} 212 213static inline void set_page_order(struct page *page, int order) { 214 set_page_private(page, order); 215 __SetPagePrivate(page); 216} 217 218static inline void rmv_page_order(struct page *page) 219{ 220 __ClearPagePrivate(page); 221 set_page_private(page, 0); 222} 223 224/* 225 * Locate the struct page for both the matching buddy in our 226 * pair (buddy1) and the combined O(n+1) page they form (page). 227 * 228 * 1) Any buddy B1 will have an order O twin B2 which satisfies 229 * the following equation: 230 * B2 = B1 ^ (1 << O) 231 * For example, if the starting buddy (buddy2) is #8 its order 232 * 1 buddy is #10: 233 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 234 * 235 * 2) Any buddy B will have an order O+1 parent P which 236 * satisfies the following equation: 237 * P = B & ~(1 << O) 238 * 239 * Assumption: *_mem_map is contigious at least up to MAX_ORDER 240 */ 241static inline struct page * 242__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 243{ 244 unsigned long buddy_idx = page_idx ^ (1 << order); 245 246 return page + (buddy_idx - page_idx); 247} 248 249static inline unsigned long 250__find_combined_index(unsigned long page_idx, unsigned int order) 251{ 252 return (page_idx & ~(1 << order)); 253} 254 255/* 256 * This function checks whether a page is free && is the buddy 257 * we can do coalesce a page and its buddy if 258 * (a) the buddy is free && 259 * (b) the buddy is on the buddy system && 260 * (c) a page and its buddy have the same order. 261 * for recording page's order, we use page_private(page) and PG_private. 262 * 263 */ 264static inline int page_is_buddy(struct page *page, int order) 265{ 266 if (PagePrivate(page) && 267 (page_order(page) == order) && 268 page_count(page) == 0) 269 return 1; 270 return 0; 271} 272 273/* 274 * Freeing function for a buddy system allocator. 275 * 276 * The concept of a buddy system is to maintain direct-mapped table 277 * (containing bit values) for memory blocks of various "orders". 278 * The bottom level table contains the map for the smallest allocatable 279 * units of memory (here, pages), and each level above it describes 280 * pairs of units from the levels below, hence, "buddies". 281 * At a high level, all that happens here is marking the table entry 282 * at the bottom level available, and propagating the changes upward 283 * as necessary, plus some accounting needed to play nicely with other 284 * parts of the VM system. 285 * At each level, we keep a list of pages, which are heads of continuous 286 * free pages of length of (1 << order) and marked with PG_Private.Page's 287 * order is recorded in page_private(page) field. 288 * So when we are allocating or freeing one, we can derive the state of the 289 * other. That is, if we allocate a small block, and both were 290 * free, the remainder of the region must be split into blocks. 291 * If a block is freed, and its buddy is also free, then this 292 * triggers coalescing into a block of larger size. 293 * 294 * -- wli 295 */ 296 297static inline void __free_pages_bulk (struct page *page, 298 struct zone *zone, unsigned int order) 299{ 300 unsigned long page_idx; 301 int order_size = 1 << order; 302 303 if (unlikely(order)) 304 destroy_compound_page(page, order); 305 306 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 307 308 BUG_ON(page_idx & (order_size - 1)); 309 BUG_ON(bad_range(zone, page)); 310 311 zone->free_pages += order_size; 312 while (order < MAX_ORDER-1) { 313 unsigned long combined_idx; 314 struct free_area *area; 315 struct page *buddy; 316 317 combined_idx = __find_combined_index(page_idx, order); 318 buddy = __page_find_buddy(page, page_idx, order); 319 320 if (bad_range(zone, buddy)) 321 break; 322 if (!page_is_buddy(buddy, order)) 323 break; /* Move the buddy up one level. */ 324 list_del(&buddy->lru); 325 area = zone->free_area + order; 326 area->nr_free--; 327 rmv_page_order(buddy); 328 page = page + (combined_idx - page_idx); 329 page_idx = combined_idx; 330 order++; 331 } 332 set_page_order(page, order); 333 list_add(&page->lru, &zone->free_area[order].free_list); 334 zone->free_area[order].nr_free++; 335} 336 337static inline int free_pages_check(const char *function, struct page *page) 338{ 339 if ( page_mapcount(page) || 340 page->mapping != NULL || 341 page_count(page) != 0 || 342 (page->flags & ( 343 1 << PG_lru | 344 1 << PG_private | 345 1 << PG_locked | 346 1 << PG_active | 347 1 << PG_reclaim | 348 1 << PG_slab | 349 1 << PG_swapcache | 350 1 << PG_writeback | 351 1 << PG_reserved ))) 352 bad_page(function, page); 353 if (PageDirty(page)) 354 __ClearPageDirty(page); 355 /* 356 * For now, we report if PG_reserved was found set, but do not 357 * clear it, and do not free the page. But we shall soon need 358 * to do more, for when the ZERO_PAGE count wraps negative. 359 */ 360 return PageReserved(page); 361} 362 363/* 364 * Frees a list of pages. 365 * Assumes all pages on list are in same zone, and of same order. 366 * count is the number of pages to free. 367 * 368 * If the zone was previously in an "all pages pinned" state then look to 369 * see if this freeing clears that state. 370 * 371 * And clear the zone's pages_scanned counter, to hold off the "all pages are 372 * pinned" detection logic. 373 */ 374static int 375free_pages_bulk(struct zone *zone, int count, 376 struct list_head *list, unsigned int order) 377{ 378 unsigned long flags; 379 struct page *page = NULL; 380 int ret = 0; 381 382 spin_lock_irqsave(&zone->lock, flags); 383 zone->all_unreclaimable = 0; 384 zone->pages_scanned = 0; 385 while (!list_empty(list) && count--) { 386 page = list_entry(list->prev, struct page, lru); 387 /* have to delete it as __free_pages_bulk list manipulates */ 388 list_del(&page->lru); 389 __free_pages_bulk(page, zone, order); 390 ret++; 391 } 392 spin_unlock_irqrestore(&zone->lock, flags); 393 return ret; 394} 395 396void __free_pages_ok(struct page *page, unsigned int order) 397{ 398 LIST_HEAD(list); 399 int i; 400 int reserved = 0; 401 402 arch_free_page(page, order); 403 404#ifndef CONFIG_MMU 405 if (order > 0) 406 for (i = 1 ; i < (1 << order) ; ++i) 407 __put_page(page + i); 408#endif 409 410 for (i = 0 ; i < (1 << order) ; ++i) 411 reserved += free_pages_check(__FUNCTION__, page + i); 412 if (reserved) 413 return; 414 415 list_add(&page->lru, &list); 416 mod_page_state(pgfree, 1 << order); 417 kernel_map_pages(page, 1<<order, 0); 418 free_pages_bulk(page_zone(page), 1, &list, order); 419} 420 421 422/* 423 * The order of subdivision here is critical for the IO subsystem. 424 * Please do not alter this order without good reasons and regression 425 * testing. Specifically, as large blocks of memory are subdivided, 426 * the order in which smaller blocks are delivered depends on the order 427 * they're subdivided in this function. This is the primary factor 428 * influencing the order in which pages are delivered to the IO 429 * subsystem according to empirical testing, and this is also justified 430 * by considering the behavior of a buddy system containing a single 431 * large block of memory acted on by a series of small allocations. 432 * This behavior is a critical factor in sglist merging's success. 433 * 434 * -- wli 435 */ 436static inline struct page * 437expand(struct zone *zone, struct page *page, 438 int low, int high, struct free_area *area) 439{ 440 unsigned long size = 1 << high; 441 442 while (high > low) { 443 area--; 444 high--; 445 size >>= 1; 446 BUG_ON(bad_range(zone, &page[size])); 447 list_add(&page[size].lru, &area->free_list); 448 area->nr_free++; 449 set_page_order(&page[size], high); 450 } 451 return page; 452} 453 454void set_page_refs(struct page *page, int order) 455{ 456#ifdef CONFIG_MMU 457 set_page_count(page, 1); 458#else 459 int i; 460 461 /* 462 * We need to reference all the pages for this order, otherwise if 463 * anyone accesses one of the pages with (get/put) it will be freed. 464 * - eg: access_process_vm() 465 */ 466 for (i = 0; i < (1 << order); i++) 467 set_page_count(page + i, 1); 468#endif /* CONFIG_MMU */ 469} 470 471/* 472 * This page is about to be returned from the page allocator 473 */ 474static int prep_new_page(struct page *page, int order) 475{ 476 if ( page_mapcount(page) || 477 page->mapping != NULL || 478 page_count(page) != 0 || 479 (page->flags & ( 480 1 << PG_lru | 481 1 << PG_private | 482 1 << PG_locked | 483 1 << PG_active | 484 1 << PG_dirty | 485 1 << PG_reclaim | 486 1 << PG_slab | 487 1 << PG_swapcache | 488 1 << PG_writeback | 489 1 << PG_reserved ))) 490 bad_page(__FUNCTION__, page); 491 492 /* 493 * For now, we report if PG_reserved was found set, but do not 494 * clear it, and do not allocate the page: as a safety net. 495 */ 496 if (PageReserved(page)) 497 return 1; 498 499 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 500 1 << PG_referenced | 1 << PG_arch_1 | 501 1 << PG_checked | 1 << PG_mappedtodisk); 502 set_page_private(page, 0); 503 set_page_refs(page, order); 504 kernel_map_pages(page, 1 << order, 1); 505 return 0; 506} 507 508/* 509 * Do the hard work of removing an element from the buddy allocator. 510 * Call me with the zone->lock already held. 511 */ 512static struct page *__rmqueue(struct zone *zone, unsigned int order) 513{ 514 struct free_area * area; 515 unsigned int current_order; 516 struct page *page; 517 518 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 519 area = zone->free_area + current_order; 520 if (list_empty(&area->free_list)) 521 continue; 522 523 page = list_entry(area->free_list.next, struct page, lru); 524 list_del(&page->lru); 525 rmv_page_order(page); 526 area->nr_free--; 527 zone->free_pages -= 1UL << order; 528 return expand(zone, page, order, current_order, area); 529 } 530 531 return NULL; 532} 533 534/* 535 * Obtain a specified number of elements from the buddy allocator, all under 536 * a single hold of the lock, for efficiency. Add them to the supplied list. 537 * Returns the number of new pages which were placed at *list. 538 */ 539static int rmqueue_bulk(struct zone *zone, unsigned int order, 540 unsigned long count, struct list_head *list) 541{ 542 unsigned long flags; 543 int i; 544 int allocated = 0; 545 struct page *page; 546 547 spin_lock_irqsave(&zone->lock, flags); 548 for (i = 0; i < count; ++i) { 549 page = __rmqueue(zone, order); 550 if (page == NULL) 551 break; 552 allocated++; 553 list_add_tail(&page->lru, list); 554 } 555 spin_unlock_irqrestore(&zone->lock, flags); 556 return allocated; 557} 558 559#ifdef CONFIG_NUMA 560/* Called from the slab reaper to drain remote pagesets */ 561void drain_remote_pages(void) 562{ 563 struct zone *zone; 564 int i; 565 unsigned long flags; 566 567 local_irq_save(flags); 568 for_each_zone(zone) { 569 struct per_cpu_pageset *pset; 570 571 /* Do not drain local pagesets */ 572 if (zone->zone_pgdat->node_id == numa_node_id()) 573 continue; 574 575 pset = zone->pageset[smp_processor_id()]; 576 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 577 struct per_cpu_pages *pcp; 578 579 pcp = &pset->pcp[i]; 580 if (pcp->count) 581 pcp->count -= free_pages_bulk(zone, pcp->count, 582 &pcp->list, 0); 583 } 584 } 585 local_irq_restore(flags); 586} 587#endif 588 589#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) 590static void __drain_pages(unsigned int cpu) 591{ 592 struct zone *zone; 593 int i; 594 595 for_each_zone(zone) { 596 struct per_cpu_pageset *pset; 597 598 pset = zone_pcp(zone, cpu); 599 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 600 struct per_cpu_pages *pcp; 601 602 pcp = &pset->pcp[i]; 603 pcp->count -= free_pages_bulk(zone, pcp->count, 604 &pcp->list, 0); 605 } 606 } 607} 608#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ 609 610#ifdef CONFIG_PM 611 612void mark_free_pages(struct zone *zone) 613{ 614 unsigned long zone_pfn, flags; 615 int order; 616 struct list_head *curr; 617 618 if (!zone->spanned_pages) 619 return; 620 621 spin_lock_irqsave(&zone->lock, flags); 622 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 623 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); 624 625 for (order = MAX_ORDER - 1; order >= 0; --order) 626 list_for_each(curr, &zone->free_area[order].free_list) { 627 unsigned long start_pfn, i; 628 629 start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); 630 631 for (i=0; i < (1<<order); i++) 632 SetPageNosaveFree(pfn_to_page(start_pfn+i)); 633 } 634 spin_unlock_irqrestore(&zone->lock, flags); 635} 636 637/* 638 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 639 */ 640void drain_local_pages(void) 641{ 642 unsigned long flags; 643 644 local_irq_save(flags); 645 __drain_pages(smp_processor_id()); 646 local_irq_restore(flags); 647} 648#endif /* CONFIG_PM */ 649 650static void zone_statistics(struct zonelist *zonelist, struct zone *z) 651{ 652#ifdef CONFIG_NUMA 653 unsigned long flags; 654 int cpu; 655 pg_data_t *pg = z->zone_pgdat; 656 pg_data_t *orig = zonelist->zones[0]->zone_pgdat; 657 struct per_cpu_pageset *p; 658 659 local_irq_save(flags); 660 cpu = smp_processor_id(); 661 p = zone_pcp(z,cpu); 662 if (pg == orig) { 663 p->numa_hit++; 664 } else { 665 p->numa_miss++; 666 zone_pcp(zonelist->zones[0], cpu)->numa_foreign++; 667 } 668 if (pg == NODE_DATA(numa_node_id())) 669 p->local_node++; 670 else 671 p->other_node++; 672 local_irq_restore(flags); 673#endif 674} 675 676/* 677 * Free a 0-order page 678 */ 679static void FASTCALL(free_hot_cold_page(struct page *page, int cold)); 680static void fastcall free_hot_cold_page(struct page *page, int cold) 681{ 682 struct zone *zone = page_zone(page); 683 struct per_cpu_pages *pcp; 684 unsigned long flags; 685 686 arch_free_page(page, 0); 687 688 if (PageAnon(page)) 689 page->mapping = NULL; 690 if (free_pages_check(__FUNCTION__, page)) 691 return; 692 693 inc_page_state(pgfree); 694 kernel_map_pages(page, 1, 0); 695 696 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 697 local_irq_save(flags); 698 list_add(&page->lru, &pcp->list); 699 pcp->count++; 700 if (pcp->count >= pcp->high) 701 pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 702 local_irq_restore(flags); 703 put_cpu(); 704} 705 706void fastcall free_hot_page(struct page *page) 707{ 708 free_hot_cold_page(page, 0); 709} 710 711void fastcall free_cold_page(struct page *page) 712{ 713 free_hot_cold_page(page, 1); 714} 715 716static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 717{ 718 int i; 719 720 BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 721 for(i = 0; i < (1 << order); i++) 722 clear_highpage(page + i); 723} 724 725/* 726 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 727 * we cheat by calling it from here, in the order > 0 path. Saves a branch 728 * or two. 729 */ 730static struct page * 731buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 732{ 733 unsigned long flags; 734 struct page *page; 735 int cold = !!(gfp_flags & __GFP_COLD); 736 737again: 738 if (order == 0) { 739 struct per_cpu_pages *pcp; 740 741 page = NULL; 742 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 743 local_irq_save(flags); 744 if (pcp->count <= pcp->low) 745 pcp->count += rmqueue_bulk(zone, 0, 746 pcp->batch, &pcp->list); 747 if (pcp->count) { 748 page = list_entry(pcp->list.next, struct page, lru); 749 list_del(&page->lru); 750 pcp->count--; 751 } 752 local_irq_restore(flags); 753 put_cpu(); 754 } else { 755 spin_lock_irqsave(&zone->lock, flags); 756 page = __rmqueue(zone, order); 757 spin_unlock_irqrestore(&zone->lock, flags); 758 } 759 760 if (page != NULL) { 761 BUG_ON(bad_range(zone, page)); 762 mod_page_state_zone(zone, pgalloc, 1 << order); 763 if (prep_new_page(page, order)) 764 goto again; 765 766 if (gfp_flags & __GFP_ZERO) 767 prep_zero_page(page, order, gfp_flags); 768 769 if (order && (gfp_flags & __GFP_COMP)) 770 prep_compound_page(page, order); 771 } 772 return page; 773} 774 775#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 776#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 777#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 778#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 779#define ALLOC_HARDER 0x10 /* try to alloc harder */ 780#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 781#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 782 783/* 784 * Return 1 if free pages are above 'mark'. This takes into account the order 785 * of the allocation. 786 */ 787int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 788 int classzone_idx, int alloc_flags) 789{ 790 /* free_pages my go negative - that's OK */ 791 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 792 int o; 793 794 if (alloc_flags & ALLOC_HIGH) 795 min -= min / 2; 796 if (alloc_flags & ALLOC_HARDER) 797 min -= min / 4; 798 799 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 800 return 0; 801 for (o = 0; o < order; o++) { 802 /* At the next order, this order's pages become unavailable */ 803 free_pages -= z->free_area[o].nr_free << o; 804 805 /* Require fewer higher order pages to be free */ 806 min >>= 1; 807 808 if (free_pages <= min) 809 return 0; 810 } 811 return 1; 812} 813 814/* 815 * get_page_from_freeliest goes through the zonelist trying to allocate 816 * a page. 817 */ 818static struct page * 819get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 820 struct zonelist *zonelist, int alloc_flags) 821{ 822 struct zone **z = zonelist->zones; 823 struct page *page = NULL; 824 int classzone_idx = zone_idx(*z); 825 826 /* 827 * Go through the zonelist once, looking for a zone with enough free. 828 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 829 */ 830 do { 831 if ((alloc_flags & ALLOC_CPUSET) && 832 !cpuset_zone_allowed(*z, gfp_mask)) 833 continue; 834 835 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 836 unsigned long mark; 837 if (alloc_flags & ALLOC_WMARK_MIN) 838 mark = (*z)->pages_min; 839 else if (alloc_flags & ALLOC_WMARK_LOW) 840 mark = (*z)->pages_low; 841 else 842 mark = (*z)->pages_high; 843 if (!zone_watermark_ok(*z, order, mark, 844 classzone_idx, alloc_flags)) 845 continue; 846 } 847 848 page = buffered_rmqueue(*z, order, gfp_mask); 849 if (page) { 850 zone_statistics(zonelist, *z); 851 break; 852 } 853 } while (*(++z) != NULL); 854 return page; 855} 856 857/* 858 * This is the 'heart' of the zoned buddy allocator. 859 */ 860struct page * fastcall 861__alloc_pages(gfp_t gfp_mask, unsigned int order, 862 struct zonelist *zonelist) 863{ 864 const gfp_t wait = gfp_mask & __GFP_WAIT; 865 struct zone **z; 866 struct page *page; 867 struct reclaim_state reclaim_state; 868 struct task_struct *p = current; 869 int do_retry; 870 int alloc_flags; 871 int did_some_progress; 872 873 might_sleep_if(wait); 874 875restart: 876 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 877 878 if (unlikely(*z == NULL)) { 879 /* Should this ever happen?? */ 880 return NULL; 881 } 882 883 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 884 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 885 if (page) 886 goto got_pg; 887 888 do { 889 wakeup_kswapd(*z, order); 890 } while (*(++z)); 891 892 /* 893 * OK, we're below the kswapd watermark and have kicked background 894 * reclaim. Now things get more complex, so set up alloc_flags according 895 * to how we want to proceed. 896 * 897 * The caller may dip into page reserves a bit more if the caller 898 * cannot run direct reclaim, or if the caller has realtime scheduling 899 * policy. 900 */ 901 alloc_flags = ALLOC_WMARK_MIN; 902 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 903 alloc_flags |= ALLOC_HARDER; 904 if (gfp_mask & __GFP_HIGH) 905 alloc_flags |= ALLOC_HIGH; 906 alloc_flags |= ALLOC_CPUSET; 907 908 /* 909 * Go through the zonelist again. Let __GFP_HIGH and allocations 910 * coming from realtime tasks go deeper into reserves. 911 * 912 * This is the last chance, in general, before the goto nopage. 913 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 914 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 915 */ 916 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 917 if (page) 918 goto got_pg; 919 920 /* This allocation should allow future memory freeing. */ 921 922 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 923 && !in_interrupt()) { 924 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 925nofail_alloc: 926 /* go through the zonelist yet again, ignoring mins */ 927 page = get_page_from_freelist(gfp_mask, order, 928 zonelist, ALLOC_NO_WATERMARKS); 929 if (page) 930 goto got_pg; 931 if (gfp_mask & __GFP_NOFAIL) { 932 blk_congestion_wait(WRITE, HZ/50); 933 goto nofail_alloc; 934 } 935 } 936 goto nopage; 937 } 938 939 /* Atomic allocations - we can't balance anything */ 940 if (!wait) 941 goto nopage; 942 943rebalance: 944 cond_resched(); 945 946 /* We now go into synchronous reclaim */ 947 p->flags |= PF_MEMALLOC; 948 reclaim_state.reclaimed_slab = 0; 949 p->reclaim_state = &reclaim_state; 950 951 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 952 953 p->reclaim_state = NULL; 954 p->flags &= ~PF_MEMALLOC; 955 956 cond_resched(); 957 958 if (likely(did_some_progress)) { 959 page = get_page_from_freelist(gfp_mask, order, 960 zonelist, alloc_flags); 961 if (page) 962 goto got_pg; 963 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 964 /* 965 * Go through the zonelist yet one more time, keep 966 * very high watermark here, this is only to catch 967 * a parallel oom killing, we must fail if we're still 968 * under heavy pressure. 969 */ 970 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 971 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 972 if (page) 973 goto got_pg; 974 975 out_of_memory(gfp_mask, order); 976 goto restart; 977 } 978 979 /* 980 * Don't let big-order allocations loop unless the caller explicitly 981 * requests that. Wait for some write requests to complete then retry. 982 * 983 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 984 * <= 3, but that may not be true in other implementations. 985 */ 986 do_retry = 0; 987 if (!(gfp_mask & __GFP_NORETRY)) { 988 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 989 do_retry = 1; 990 if (gfp_mask & __GFP_NOFAIL) 991 do_retry = 1; 992 } 993 if (do_retry) { 994 blk_congestion_wait(WRITE, HZ/50); 995 goto rebalance; 996 } 997 998nopage: 999 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1000 printk(KERN_WARNING "%s: page allocation failure." 1001 " order:%d, mode:0x%x\n", 1002 p->comm, order, gfp_mask); 1003 dump_stack(); 1004 show_mem(); 1005 } 1006got_pg: 1007 return page; 1008} 1009 1010EXPORT_SYMBOL(__alloc_pages); 1011 1012/* 1013 * Common helper functions. 1014 */ 1015fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1016{ 1017 struct page * page; 1018 page = alloc_pages(gfp_mask, order); 1019 if (!page) 1020 return 0; 1021 return (unsigned long) page_address(page); 1022} 1023 1024EXPORT_SYMBOL(__get_free_pages); 1025 1026fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1027{ 1028 struct page * page; 1029 1030 /* 1031 * get_zeroed_page() returns a 32-bit address, which cannot represent 1032 * a highmem page 1033 */ 1034 BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1035 1036 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1037 if (page) 1038 return (unsigned long) page_address(page); 1039 return 0; 1040} 1041 1042EXPORT_SYMBOL(get_zeroed_page); 1043 1044void __pagevec_free(struct pagevec *pvec) 1045{ 1046 int i = pagevec_count(pvec); 1047 1048 while (--i >= 0) 1049 free_hot_cold_page(pvec->pages[i], pvec->cold); 1050} 1051 1052fastcall void __free_pages(struct page *page, unsigned int order) 1053{ 1054 if (put_page_testzero(page)) { 1055 if (order == 0) 1056 free_hot_page(page); 1057 else 1058 __free_pages_ok(page, order); 1059 } 1060} 1061 1062EXPORT_SYMBOL(__free_pages); 1063 1064fastcall void free_pages(unsigned long addr, unsigned int order) 1065{ 1066 if (addr != 0) { 1067 BUG_ON(!virt_addr_valid((void *)addr)); 1068 __free_pages(virt_to_page((void *)addr), order); 1069 } 1070} 1071 1072EXPORT_SYMBOL(free_pages); 1073 1074/* 1075 * Total amount of free (allocatable) RAM: 1076 */ 1077unsigned int nr_free_pages(void) 1078{ 1079 unsigned int sum = 0; 1080 struct zone *zone; 1081 1082 for_each_zone(zone) 1083 sum += zone->free_pages; 1084 1085 return sum; 1086} 1087 1088EXPORT_SYMBOL(nr_free_pages); 1089 1090#ifdef CONFIG_NUMA 1091unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1092{ 1093 unsigned int i, sum = 0; 1094 1095 for (i = 0; i < MAX_NR_ZONES; i++) 1096 sum += pgdat->node_zones[i].free_pages; 1097 1098 return sum; 1099} 1100#endif 1101 1102static unsigned int nr_free_zone_pages(int offset) 1103{ 1104 /* Just pick one node, since fallback list is circular */ 1105 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1106 unsigned int sum = 0; 1107 1108 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1109 struct zone **zonep = zonelist->zones; 1110 struct zone *zone; 1111 1112 for (zone = *zonep++; zone; zone = *zonep++) { 1113 unsigned long size = zone->present_pages; 1114 unsigned long high = zone->pages_high; 1115 if (size > high) 1116 sum += size - high; 1117 } 1118 1119 return sum; 1120} 1121 1122/* 1123 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1124 */ 1125unsigned int nr_free_buffer_pages(void) 1126{ 1127 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1128} 1129 1130/* 1131 * Amount of free RAM allocatable within all zones 1132 */ 1133unsigned int nr_free_pagecache_pages(void) 1134{ 1135 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1136} 1137 1138#ifdef CONFIG_HIGHMEM 1139unsigned int nr_free_highpages (void) 1140{ 1141 pg_data_t *pgdat; 1142 unsigned int pages = 0; 1143 1144 for_each_pgdat(pgdat) 1145 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1146 1147 return pages; 1148} 1149#endif 1150 1151#ifdef CONFIG_NUMA 1152static void show_node(struct zone *zone) 1153{ 1154 printk("Node %d ", zone->zone_pgdat->node_id); 1155} 1156#else 1157#define show_node(zone) do { } while (0) 1158#endif 1159 1160/* 1161 * Accumulate the page_state information across all CPUs. 1162 * The result is unavoidably approximate - it can change 1163 * during and after execution of this function. 1164 */ 1165static DEFINE_PER_CPU(struct page_state, page_states) = {0}; 1166 1167atomic_t nr_pagecache = ATOMIC_INIT(0); 1168EXPORT_SYMBOL(nr_pagecache); 1169#ifdef CONFIG_SMP 1170DEFINE_PER_CPU(long, nr_pagecache_local) = 0; 1171#endif 1172 1173void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) 1174{ 1175 int cpu = 0; 1176 1177 memset(ret, 0, sizeof(*ret)); 1178 cpus_and(*cpumask, *cpumask, cpu_online_map); 1179 1180 cpu = first_cpu(*cpumask); 1181 while (cpu < NR_CPUS) { 1182 unsigned long *in, *out, off; 1183 1184 in = (unsigned long *)&per_cpu(page_states, cpu); 1185 1186 cpu = next_cpu(cpu, *cpumask); 1187 1188 if (cpu < NR_CPUS) 1189 prefetch(&per_cpu(page_states, cpu)); 1190 1191 out = (unsigned long *)ret; 1192 for (off = 0; off < nr; off++) 1193 *out++ += *in++; 1194 } 1195} 1196 1197void get_page_state_node(struct page_state *ret, int node) 1198{ 1199 int nr; 1200 cpumask_t mask = node_to_cpumask(node); 1201 1202 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); 1203 nr /= sizeof(unsigned long); 1204 1205 __get_page_state(ret, nr+1, &mask); 1206} 1207 1208void get_page_state(struct page_state *ret) 1209{ 1210 int nr; 1211 cpumask_t mask = CPU_MASK_ALL; 1212 1213 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); 1214 nr /= sizeof(unsigned long); 1215 1216 __get_page_state(ret, nr + 1, &mask); 1217} 1218 1219void get_full_page_state(struct page_state *ret) 1220{ 1221 cpumask_t mask = CPU_MASK_ALL; 1222 1223 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); 1224} 1225 1226unsigned long __read_page_state(unsigned long offset) 1227{ 1228 unsigned long ret = 0; 1229 int cpu; 1230 1231 for_each_online_cpu(cpu) { 1232 unsigned long in; 1233 1234 in = (unsigned long)&per_cpu(page_states, cpu) + offset; 1235 ret += *((unsigned long *)in); 1236 } 1237 return ret; 1238} 1239 1240void __mod_page_state(unsigned long offset, unsigned long delta) 1241{ 1242 unsigned long flags; 1243 void* ptr; 1244 1245 local_irq_save(flags); 1246 ptr = &__get_cpu_var(page_states); 1247 *(unsigned long*)(ptr + offset) += delta; 1248 local_irq_restore(flags); 1249} 1250 1251EXPORT_SYMBOL(__mod_page_state); 1252 1253void __get_zone_counts(unsigned long *active, unsigned long *inactive, 1254 unsigned long *free, struct pglist_data *pgdat) 1255{ 1256 struct zone *zones = pgdat->node_zones; 1257 int i; 1258 1259 *active = 0; 1260 *inactive = 0; 1261 *free = 0; 1262 for (i = 0; i < MAX_NR_ZONES; i++) { 1263 *active += zones[i].nr_active; 1264 *inactive += zones[i].nr_inactive; 1265 *free += zones[i].free_pages; 1266 } 1267} 1268 1269void get_zone_counts(unsigned long *active, 1270 unsigned long *inactive, unsigned long *free) 1271{ 1272 struct pglist_data *pgdat; 1273 1274 *active = 0; 1275 *inactive = 0; 1276 *free = 0; 1277 for_each_pgdat(pgdat) { 1278 unsigned long l, m, n; 1279 __get_zone_counts(&l, &m, &n, pgdat); 1280 *active += l; 1281 *inactive += m; 1282 *free += n; 1283 } 1284} 1285 1286void si_meminfo(struct sysinfo *val) 1287{ 1288 val->totalram = totalram_pages; 1289 val->sharedram = 0; 1290 val->freeram = nr_free_pages(); 1291 val->bufferram = nr_blockdev_pages(); 1292#ifdef CONFIG_HIGHMEM 1293 val->totalhigh = totalhigh_pages; 1294 val->freehigh = nr_free_highpages(); 1295#else 1296 val->totalhigh = 0; 1297 val->freehigh = 0; 1298#endif 1299 val->mem_unit = PAGE_SIZE; 1300} 1301 1302EXPORT_SYMBOL(si_meminfo); 1303 1304#ifdef CONFIG_NUMA 1305void si_meminfo_node(struct sysinfo *val, int nid) 1306{ 1307 pg_data_t *pgdat = NODE_DATA(nid); 1308 1309 val->totalram = pgdat->node_present_pages; 1310 val->freeram = nr_free_pages_pgdat(pgdat); 1311 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1312 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1313 val->mem_unit = PAGE_SIZE; 1314} 1315#endif 1316 1317#define K(x) ((x) << (PAGE_SHIFT-10)) 1318 1319/* 1320 * Show free area list (used inside shift_scroll-lock stuff) 1321 * We also calculate the percentage fragmentation. We do this by counting the 1322 * memory on each free list with the exception of the first item on the list. 1323 */ 1324void show_free_areas(void) 1325{ 1326 struct page_state ps; 1327 int cpu, temperature; 1328 unsigned long active; 1329 unsigned long inactive; 1330 unsigned long free; 1331 struct zone *zone; 1332 1333 for_each_zone(zone) { 1334 show_node(zone); 1335 printk("%s per-cpu:", zone->name); 1336 1337 if (!zone->present_pages) { 1338 printk(" empty\n"); 1339 continue; 1340 } else 1341 printk("\n"); 1342 1343 for_each_online_cpu(cpu) { 1344 struct per_cpu_pageset *pageset; 1345 1346 pageset = zone_pcp(zone, cpu); 1347 1348 for (temperature = 0; temperature < 2; temperature++) 1349 printk("cpu %d %s: low %d, high %d, batch %d used:%d\n", 1350 cpu, 1351 temperature ? "cold" : "hot", 1352 pageset->pcp[temperature].low, 1353 pageset->pcp[temperature].high, 1354 pageset->pcp[temperature].batch, 1355 pageset->pcp[temperature].count); 1356 } 1357 } 1358 1359 get_page_state(&ps); 1360 get_zone_counts(&active, &inactive, &free); 1361 1362 printk("Free pages: %11ukB (%ukB HighMem)\n", 1363 K(nr_free_pages()), 1364 K(nr_free_highpages())); 1365 1366 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " 1367 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", 1368 active, 1369 inactive, 1370 ps.nr_dirty, 1371 ps.nr_writeback, 1372 ps.nr_unstable, 1373 nr_free_pages(), 1374 ps.nr_slab, 1375 ps.nr_mapped, 1376 ps.nr_page_table_pages); 1377 1378 for_each_zone(zone) { 1379 int i; 1380 1381 show_node(zone); 1382 printk("%s" 1383 " free:%lukB" 1384 " min:%lukB" 1385 " low:%lukB" 1386 " high:%lukB" 1387 " active:%lukB" 1388 " inactive:%lukB" 1389 " present:%lukB" 1390 " pages_scanned:%lu" 1391 " all_unreclaimable? %s" 1392 "\n", 1393 zone->name, 1394 K(zone->free_pages), 1395 K(zone->pages_min), 1396 K(zone->pages_low), 1397 K(zone->pages_high), 1398 K(zone->nr_active), 1399 K(zone->nr_inactive), 1400 K(zone->present_pages), 1401 zone->pages_scanned, 1402 (zone->all_unreclaimable ? "yes" : "no") 1403 ); 1404 printk("lowmem_reserve[]:"); 1405 for (i = 0; i < MAX_NR_ZONES; i++) 1406 printk(" %lu", zone->lowmem_reserve[i]); 1407 printk("\n"); 1408 } 1409 1410 for_each_zone(zone) { 1411 unsigned long nr, flags, order, total = 0; 1412 1413 show_node(zone); 1414 printk("%s: ", zone->name); 1415 if (!zone->present_pages) { 1416 printk("empty\n"); 1417 continue; 1418 } 1419 1420 spin_lock_irqsave(&zone->lock, flags); 1421 for (order = 0; order < MAX_ORDER; order++) { 1422 nr = zone->free_area[order].nr_free; 1423 total += nr << order; 1424 printk("%lu*%lukB ", nr, K(1UL) << order); 1425 } 1426 spin_unlock_irqrestore(&zone->lock, flags); 1427 printk("= %lukB\n", K(total)); 1428 } 1429 1430 show_swap_cache_info(); 1431} 1432 1433/* 1434 * Builds allocation fallback zone lists. 1435 */ 1436static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k) 1437{ 1438 switch (k) { 1439 struct zone *zone; 1440 default: 1441 BUG(); 1442 case ZONE_HIGHMEM: 1443 zone = pgdat->node_zones + ZONE_HIGHMEM; 1444 if (zone->present_pages) { 1445#ifndef CONFIG_HIGHMEM 1446 BUG(); 1447#endif 1448 zonelist->zones[j++] = zone; 1449 } 1450 case ZONE_NORMAL: 1451 zone = pgdat->node_zones + ZONE_NORMAL; 1452 if (zone->present_pages) 1453 zonelist->zones[j++] = zone; 1454 case ZONE_DMA32: 1455 zone = pgdat->node_zones + ZONE_DMA32; 1456 if (zone->present_pages) 1457 zonelist->zones[j++] = zone; 1458 case ZONE_DMA: 1459 zone = pgdat->node_zones + ZONE_DMA; 1460 if (zone->present_pages) 1461 zonelist->zones[j++] = zone; 1462 } 1463 1464 return j; 1465} 1466 1467static inline int highest_zone(int zone_bits) 1468{ 1469 int res = ZONE_NORMAL; 1470 if (zone_bits & (__force int)__GFP_HIGHMEM) 1471 res = ZONE_HIGHMEM; 1472 if (zone_bits & (__force int)__GFP_DMA32) 1473 res = ZONE_DMA32; 1474 if (zone_bits & (__force int)__GFP_DMA) 1475 res = ZONE_DMA; 1476 return res; 1477} 1478 1479#ifdef CONFIG_NUMA 1480#define MAX_NODE_LOAD (num_online_nodes()) 1481static int __initdata node_load[MAX_NUMNODES]; 1482/** 1483 * find_next_best_node - find the next node that should appear in a given node's fallback list 1484 * @node: node whose fallback list we're appending 1485 * @used_node_mask: nodemask_t of already used nodes 1486 * 1487 * We use a number of factors to determine which is the next node that should 1488 * appear on a given node's fallback list. The node should not have appeared 1489 * already in @node's fallback list, and it should be the next closest node 1490 * according to the distance array (which contains arbitrary distance values 1491 * from each node to each node in the system), and should also prefer nodes 1492 * with no CPUs, since presumably they'll have very little allocation pressure 1493 * on them otherwise. 1494 * It returns -1 if no node is found. 1495 */ 1496static int __init find_next_best_node(int node, nodemask_t *used_node_mask) 1497{ 1498 int i, n, val; 1499 int min_val = INT_MAX; 1500 int best_node = -1; 1501 1502 for_each_online_node(i) { 1503 cpumask_t tmp; 1504 1505 /* Start from local node */ 1506 n = (node+i) % num_online_nodes(); 1507 1508 /* Don't want a node to appear more than once */ 1509 if (node_isset(n, *used_node_mask)) 1510 continue; 1511 1512 /* Use the local node if we haven't already */ 1513 if (!node_isset(node, *used_node_mask)) { 1514 best_node = node; 1515 break; 1516 } 1517 1518 /* Use the distance array to find the distance */ 1519 val = node_distance(node, n); 1520 1521 /* Give preference to headless and unused nodes */ 1522 tmp = node_to_cpumask(n); 1523 if (!cpus_empty(tmp)) 1524 val += PENALTY_FOR_NODE_WITH_CPUS; 1525 1526 /* Slight preference for less loaded node */ 1527 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1528 val += node_load[n]; 1529 1530 if (val < min_val) { 1531 min_val = val; 1532 best_node = n; 1533 } 1534 } 1535 1536 if (best_node >= 0) 1537 node_set(best_node, *used_node_mask); 1538 1539 return best_node; 1540} 1541 1542static void __init build_zonelists(pg_data_t *pgdat) 1543{ 1544 int i, j, k, node, local_node; 1545 int prev_node, load; 1546 struct zonelist *zonelist; 1547 nodemask_t used_mask; 1548 1549 /* initialize zonelists */ 1550 for (i = 0; i < GFP_ZONETYPES; i++) { 1551 zonelist = pgdat->node_zonelists + i; 1552 zonelist->zones[0] = NULL; 1553 } 1554 1555 /* NUMA-aware ordering of nodes */ 1556 local_node = pgdat->node_id; 1557 load = num_online_nodes(); 1558 prev_node = local_node; 1559 nodes_clear(used_mask); 1560 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1561 /* 1562 * We don't want to pressure a particular node. 1563 * So adding penalty to the first node in same 1564 * distance group to make it round-robin. 1565 */ 1566 if (node_distance(local_node, node) != 1567 node_distance(local_node, prev_node)) 1568 node_load[node] += load; 1569 prev_node = node; 1570 load--; 1571 for (i = 0; i < GFP_ZONETYPES; i++) { 1572 zonelist = pgdat->node_zonelists + i; 1573 for (j = 0; zonelist->zones[j] != NULL; j++); 1574 1575 k = highest_zone(i); 1576 1577 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1578 zonelist->zones[j] = NULL; 1579 } 1580 } 1581} 1582 1583#else /* CONFIG_NUMA */ 1584 1585static void __init build_zonelists(pg_data_t *pgdat) 1586{ 1587 int i, j, k, node, local_node; 1588 1589 local_node = pgdat->node_id; 1590 for (i = 0; i < GFP_ZONETYPES; i++) { 1591 struct zonelist *zonelist; 1592 1593 zonelist = pgdat->node_zonelists + i; 1594 1595 j = 0; 1596 k = highest_zone(i); 1597 j = build_zonelists_node(pgdat, zonelist, j, k); 1598 /* 1599 * Now we build the zonelist so that it contains the zones 1600 * of all the other nodes. 1601 * We don't want to pressure a particular node, so when 1602 * building the zones for node N, we make sure that the 1603 * zones coming right after the local ones are those from 1604 * node N+1 (modulo N) 1605 */ 1606 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1607 if (!node_online(node)) 1608 continue; 1609 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1610 } 1611 for (node = 0; node < local_node; node++) { 1612 if (!node_online(node)) 1613 continue; 1614 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1615 } 1616 1617 zonelist->zones[j] = NULL; 1618 } 1619} 1620 1621#endif /* CONFIG_NUMA */ 1622 1623void __init build_all_zonelists(void) 1624{ 1625 int i; 1626 1627 for_each_online_node(i) 1628 build_zonelists(NODE_DATA(i)); 1629 printk("Built %i zonelists\n", num_online_nodes()); 1630 cpuset_init_current_mems_allowed(); 1631} 1632 1633/* 1634 * Helper functions to size the waitqueue hash table. 1635 * Essentially these want to choose hash table sizes sufficiently 1636 * large so that collisions trying to wait on pages are rare. 1637 * But in fact, the number of active page waitqueues on typical 1638 * systems is ridiculously low, less than 200. So this is even 1639 * conservative, even though it seems large. 1640 * 1641 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1642 * waitqueues, i.e. the size of the waitq table given the number of pages. 1643 */ 1644#define PAGES_PER_WAITQUEUE 256 1645 1646static inline unsigned long wait_table_size(unsigned long pages) 1647{ 1648 unsigned long size = 1; 1649 1650 pages /= PAGES_PER_WAITQUEUE; 1651 1652 while (size < pages) 1653 size <<= 1; 1654 1655 /* 1656 * Once we have dozens or even hundreds of threads sleeping 1657 * on IO we've got bigger problems than wait queue collision. 1658 * Limit the size of the wait table to a reasonable size. 1659 */ 1660 size = min(size, 4096UL); 1661 1662 return max(size, 4UL); 1663} 1664 1665/* 1666 * This is an integer logarithm so that shifts can be used later 1667 * to extract the more random high bits from the multiplicative 1668 * hash function before the remainder is taken. 1669 */ 1670static inline unsigned long wait_table_bits(unsigned long size) 1671{ 1672 return ffz(~size); 1673} 1674 1675#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1676 1677static void __init calculate_zone_totalpages(struct pglist_data *pgdat, 1678 unsigned long *zones_size, unsigned long *zholes_size) 1679{ 1680 unsigned long realtotalpages, totalpages = 0; 1681 int i; 1682 1683 for (i = 0; i < MAX_NR_ZONES; i++) 1684 totalpages += zones_size[i]; 1685 pgdat->node_spanned_pages = totalpages; 1686 1687 realtotalpages = totalpages; 1688 if (zholes_size) 1689 for (i = 0; i < MAX_NR_ZONES; i++) 1690 realtotalpages -= zholes_size[i]; 1691 pgdat->node_present_pages = realtotalpages; 1692 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1693} 1694 1695 1696/* 1697 * Initially all pages are reserved - free ones are freed 1698 * up by free_all_bootmem() once the early boot process is 1699 * done. Non-atomic initialization, single-pass. 1700 */ 1701void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1702 unsigned long start_pfn) 1703{ 1704 struct page *page; 1705 unsigned long end_pfn = start_pfn + size; 1706 unsigned long pfn; 1707 1708 for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) { 1709 if (!early_pfn_valid(pfn)) 1710 continue; 1711 page = pfn_to_page(pfn); 1712 set_page_links(page, zone, nid, pfn); 1713 set_page_count(page, 1); 1714 reset_page_mapcount(page); 1715 SetPageReserved(page); 1716 INIT_LIST_HEAD(&page->lru); 1717#ifdef WANT_PAGE_VIRTUAL 1718 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1719 if (!is_highmem_idx(zone)) 1720 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1721#endif 1722 } 1723} 1724 1725void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1726 unsigned long size) 1727{ 1728 int order; 1729 for (order = 0; order < MAX_ORDER ; order++) { 1730 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1731 zone->free_area[order].nr_free = 0; 1732 } 1733} 1734 1735#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) 1736void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, 1737 unsigned long size) 1738{ 1739 unsigned long snum = pfn_to_section_nr(pfn); 1740 unsigned long end = pfn_to_section_nr(pfn + size); 1741 1742 if (FLAGS_HAS_NODE) 1743 zone_table[ZONETABLE_INDEX(nid, zid)] = zone; 1744 else 1745 for (; snum <= end; snum++) 1746 zone_table[ZONETABLE_INDEX(snum, zid)] = zone; 1747} 1748 1749#ifndef __HAVE_ARCH_MEMMAP_INIT 1750#define memmap_init(size, nid, zone, start_pfn) \ 1751 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1752#endif 1753 1754static int __devinit zone_batchsize(struct zone *zone) 1755{ 1756 int batch; 1757 1758 /* 1759 * The per-cpu-pages pools are set to around 1000th of the 1760 * size of the zone. But no more than 1/2 of a meg. 1761 * 1762 * OK, so we don't know how big the cache is. So guess. 1763 */ 1764 batch = zone->present_pages / 1024; 1765 if (batch * PAGE_SIZE > 512 * 1024) 1766 batch = (512 * 1024) / PAGE_SIZE; 1767 batch /= 4; /* We effectively *= 4 below */ 1768 if (batch < 1) 1769 batch = 1; 1770 1771 /* 1772 * Clamp the batch to a 2^n - 1 value. Having a power 1773 * of 2 value was found to be more likely to have 1774 * suboptimal cache aliasing properties in some cases. 1775 * 1776 * For example if 2 tasks are alternately allocating 1777 * batches of pages, one task can end up with a lot 1778 * of pages of one half of the possible page colors 1779 * and the other with pages of the other colors. 1780 */ 1781 batch = (1 << (fls(batch + batch/2)-1)) - 1; 1782 1783 return batch; 1784} 1785 1786inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 1787{ 1788 struct per_cpu_pages *pcp; 1789 1790 memset(p, 0, sizeof(*p)); 1791 1792 pcp = &p->pcp[0]; /* hot */ 1793 pcp->count = 0; 1794 pcp->low = 0; 1795 pcp->high = 6 * batch; 1796 pcp->batch = max(1UL, 1 * batch); 1797 INIT_LIST_HEAD(&pcp->list); 1798 1799 pcp = &p->pcp[1]; /* cold*/ 1800 pcp->count = 0; 1801 pcp->low = 0; 1802 pcp->high = 2 * batch; 1803 pcp->batch = max(1UL, batch/2); 1804 INIT_LIST_HEAD(&pcp->list); 1805} 1806 1807#ifdef CONFIG_NUMA 1808/* 1809 * Boot pageset table. One per cpu which is going to be used for all 1810 * zones and all nodes. The parameters will be set in such a way 1811 * that an item put on a list will immediately be handed over to 1812 * the buddy list. This is safe since pageset manipulation is done 1813 * with interrupts disabled. 1814 * 1815 * Some NUMA counter updates may also be caught by the boot pagesets. 1816 * 1817 * The boot_pagesets must be kept even after bootup is complete for 1818 * unused processors and/or zones. They do play a role for bootstrapping 1819 * hotplugged processors. 1820 * 1821 * zoneinfo_show() and maybe other functions do 1822 * not check if the processor is online before following the pageset pointer. 1823 * Other parts of the kernel may not check if the zone is available. 1824 */ 1825static struct per_cpu_pageset 1826 boot_pageset[NR_CPUS]; 1827 1828/* 1829 * Dynamically allocate memory for the 1830 * per cpu pageset array in struct zone. 1831 */ 1832static int __devinit process_zones(int cpu) 1833{ 1834 struct zone *zone, *dzone; 1835 1836 for_each_zone(zone) { 1837 1838 zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset), 1839 GFP_KERNEL, cpu_to_node(cpu)); 1840 if (!zone->pageset[cpu]) 1841 goto bad; 1842 1843 setup_pageset(zone->pageset[cpu], zone_batchsize(zone)); 1844 } 1845 1846 return 0; 1847bad: 1848 for_each_zone(dzone) { 1849 if (dzone == zone) 1850 break; 1851 kfree(dzone->pageset[cpu]); 1852 dzone->pageset[cpu] = NULL; 1853 } 1854 return -ENOMEM; 1855} 1856 1857static inline void free_zone_pagesets(int cpu) 1858{ 1859#ifdef CONFIG_NUMA 1860 struct zone *zone; 1861 1862 for_each_zone(zone) { 1863 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 1864 1865 zone_pcp(zone, cpu) = NULL; 1866 kfree(pset); 1867 } 1868#endif 1869} 1870 1871static int __devinit pageset_cpuup_callback(struct notifier_block *nfb, 1872 unsigned long action, 1873 void *hcpu) 1874{ 1875 int cpu = (long)hcpu; 1876 int ret = NOTIFY_OK; 1877 1878 switch (action) { 1879 case CPU_UP_PREPARE: 1880 if (process_zones(cpu)) 1881 ret = NOTIFY_BAD; 1882 break; 1883 case CPU_UP_CANCELED: 1884 case CPU_DEAD: 1885 free_zone_pagesets(cpu); 1886 break; 1887 default: 1888 break; 1889 } 1890 return ret; 1891} 1892 1893static struct notifier_block pageset_notifier = 1894 { &pageset_cpuup_callback, NULL, 0 }; 1895 1896void __init setup_per_cpu_pageset(void) 1897{ 1898 int err; 1899 1900 /* Initialize per_cpu_pageset for cpu 0. 1901 * A cpuup callback will do this for every cpu 1902 * as it comes online 1903 */ 1904 err = process_zones(smp_processor_id()); 1905 BUG_ON(err); 1906 register_cpu_notifier(&pageset_notifier); 1907} 1908 1909#endif 1910 1911static __devinit 1912void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1913{ 1914 int i; 1915 struct pglist_data *pgdat = zone->zone_pgdat; 1916 1917 /* 1918 * The per-page waitqueue mechanism uses hashed waitqueues 1919 * per zone. 1920 */ 1921 zone->wait_table_size = wait_table_size(zone_size_pages); 1922 zone->wait_table_bits = wait_table_bits(zone->wait_table_size); 1923 zone->wait_table = (wait_queue_head_t *) 1924 alloc_bootmem_node(pgdat, zone->wait_table_size 1925 * sizeof(wait_queue_head_t)); 1926 1927 for(i = 0; i < zone->wait_table_size; ++i) 1928 init_waitqueue_head(zone->wait_table + i); 1929} 1930 1931static __devinit void zone_pcp_init(struct zone *zone) 1932{ 1933 int cpu; 1934 unsigned long batch = zone_batchsize(zone); 1935 1936 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1937#ifdef CONFIG_NUMA 1938 /* Early boot. Slab allocator not functional yet */ 1939 zone->pageset[cpu] = &boot_pageset[cpu]; 1940 setup_pageset(&boot_pageset[cpu],0); 1941#else 1942 setup_pageset(zone_pcp(zone,cpu), batch); 1943#endif 1944 } 1945 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 1946 zone->name, zone->present_pages, batch); 1947} 1948 1949static __devinit void init_currently_empty_zone(struct zone *zone, 1950 unsigned long zone_start_pfn, unsigned long size) 1951{ 1952 struct pglist_data *pgdat = zone->zone_pgdat; 1953 1954 zone_wait_table_init(zone, size); 1955 pgdat->nr_zones = zone_idx(zone) + 1; 1956 1957 zone->zone_mem_map = pfn_to_page(zone_start_pfn); 1958 zone->zone_start_pfn = zone_start_pfn; 1959 1960 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 1961 1962 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 1963} 1964 1965/* 1966 * Set up the zone data structures: 1967 * - mark all pages reserved 1968 * - mark all memory queues empty 1969 * - clear the memory bitmaps 1970 */ 1971static void __init free_area_init_core(struct pglist_data *pgdat, 1972 unsigned long *zones_size, unsigned long *zholes_size) 1973{ 1974 unsigned long j; 1975 int nid = pgdat->node_id; 1976 unsigned long zone_start_pfn = pgdat->node_start_pfn; 1977 1978 pgdat_resize_init(pgdat); 1979 pgdat->nr_zones = 0; 1980 init_waitqueue_head(&pgdat->kswapd_wait); 1981 pgdat->kswapd_max_order = 0; 1982 1983 for (j = 0; j < MAX_NR_ZONES; j++) { 1984 struct zone *zone = pgdat->node_zones + j; 1985 unsigned long size, realsize; 1986 1987 realsize = size = zones_size[j]; 1988 if (zholes_size) 1989 realsize -= zholes_size[j]; 1990 1991 if (j < ZONE_HIGHMEM) 1992 nr_kernel_pages += realsize; 1993 nr_all_pages += realsize; 1994 1995 zone->spanned_pages = size; 1996 zone->present_pages = realsize; 1997 zone->name = zone_names[j]; 1998 spin_lock_init(&zone->lock); 1999 spin_lock_init(&zone->lru_lock); 2000 zone_seqlock_init(zone); 2001 zone->zone_pgdat = pgdat; 2002 zone->free_pages = 0; 2003 2004 zone->temp_priority = zone->prev_priority = DEF_PRIORITY; 2005 2006 zone_pcp_init(zone); 2007 INIT_LIST_HEAD(&zone->active_list); 2008 INIT_LIST_HEAD(&zone->inactive_list); 2009 zone->nr_scan_active = 0; 2010 zone->nr_scan_inactive = 0; 2011 zone->nr_active = 0; 2012 zone->nr_inactive = 0; 2013 atomic_set(&zone->reclaim_in_progress, 0); 2014 if (!size) 2015 continue; 2016 2017 zonetable_add(zone, nid, j, zone_start_pfn, size); 2018 init_currently_empty_zone(zone, zone_start_pfn, size); 2019 zone_start_pfn += size; 2020 } 2021} 2022 2023static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2024{ 2025 /* Skip empty nodes */ 2026 if (!pgdat->node_spanned_pages) 2027 return; 2028 2029#ifdef CONFIG_FLAT_NODE_MEM_MAP 2030 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2031 if (!pgdat->node_mem_map) { 2032 unsigned long size; 2033 struct page *map; 2034 2035 size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); 2036 map = alloc_remap(pgdat->node_id, size); 2037 if (!map) 2038 map = alloc_bootmem_node(pgdat, size); 2039 pgdat->node_mem_map = map; 2040 } 2041#ifdef CONFIG_FLATMEM 2042 /* 2043 * With no DISCONTIG, the global mem_map is just set as node 0's 2044 */ 2045 if (pgdat == NODE_DATA(0)) 2046 mem_map = NODE_DATA(0)->node_mem_map; 2047#endif 2048#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2049} 2050 2051void __init free_area_init_node(int nid, struct pglist_data *pgdat, 2052 unsigned long *zones_size, unsigned long node_start_pfn, 2053 unsigned long *zholes_size) 2054{ 2055 pgdat->node_id = nid; 2056 pgdat->node_start_pfn = node_start_pfn; 2057 calculate_zone_totalpages(pgdat, zones_size, zholes_size); 2058 2059 alloc_node_mem_map(pgdat); 2060 2061 free_area_init_core(pgdat, zones_size, zholes_size); 2062} 2063 2064#ifndef CONFIG_NEED_MULTIPLE_NODES 2065static bootmem_data_t contig_bootmem_data; 2066struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2067 2068EXPORT_SYMBOL(contig_page_data); 2069#endif 2070 2071void __init free_area_init(unsigned long *zones_size) 2072{ 2073 free_area_init_node(0, NODE_DATA(0), zones_size, 2074 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2075} 2076 2077#ifdef CONFIG_PROC_FS 2078 2079#include <linux/seq_file.h> 2080 2081static void *frag_start(struct seq_file *m, loff_t *pos) 2082{ 2083 pg_data_t *pgdat; 2084 loff_t node = *pos; 2085 2086 for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next) 2087 --node; 2088 2089 return pgdat; 2090} 2091 2092static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 2093{ 2094 pg_data_t *pgdat = (pg_data_t *)arg; 2095 2096 (*pos)++; 2097 return pgdat->pgdat_next; 2098} 2099 2100static void frag_stop(struct seq_file *m, void *arg) 2101{ 2102} 2103 2104/* 2105 * This walks the free areas for each zone. 2106 */ 2107static int frag_show(struct seq_file *m, void *arg) 2108{ 2109 pg_data_t *pgdat = (pg_data_t *)arg; 2110 struct zone *zone; 2111 struct zone *node_zones = pgdat->node_zones; 2112 unsigned long flags; 2113 int order; 2114 2115 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 2116 if (!zone->present_pages) 2117 continue; 2118 2119 spin_lock_irqsave(&zone->lock, flags); 2120 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 2121 for (order = 0; order < MAX_ORDER; ++order) 2122 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); 2123 spin_unlock_irqrestore(&zone->lock, flags); 2124 seq_putc(m, '\n'); 2125 } 2126 return 0; 2127} 2128 2129struct seq_operations fragmentation_op = { 2130 .start = frag_start, 2131 .next = frag_next, 2132 .stop = frag_stop, 2133 .show = frag_show, 2134}; 2135 2136/* 2137 * Output information about zones in @pgdat. 2138 */ 2139static int zoneinfo_show(struct seq_file *m, void *arg) 2140{ 2141 pg_data_t *pgdat = arg; 2142 struct zone *zone; 2143 struct zone *node_zones = pgdat->node_zones; 2144 unsigned long flags; 2145 2146 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { 2147 int i; 2148 2149 if (!zone->present_pages) 2150 continue; 2151 2152 spin_lock_irqsave(&zone->lock, flags); 2153 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 2154 seq_printf(m, 2155 "\n pages free %lu" 2156 "\n min %lu" 2157 "\n low %lu" 2158 "\n high %lu" 2159 "\n active %lu" 2160 "\n inactive %lu" 2161 "\n scanned %lu (a: %lu i: %lu)" 2162 "\n spanned %lu" 2163 "\n present %lu", 2164 zone->free_pages, 2165 zone->pages_min, 2166 zone->pages_low, 2167 zone->pages_high, 2168 zone->nr_active, 2169 zone->nr_inactive, 2170 zone->pages_scanned, 2171 zone->nr_scan_active, zone->nr_scan_inactive, 2172 zone->spanned_pages, 2173 zone->present_pages); 2174 seq_printf(m, 2175 "\n protection: (%lu", 2176 zone->lowmem_reserve[0]); 2177 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 2178 seq_printf(m, ", %lu", zone->lowmem_reserve[i]); 2179 seq_printf(m, 2180 ")" 2181 "\n pagesets"); 2182 for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) { 2183 struct per_cpu_pageset *pageset; 2184 int j; 2185 2186 pageset = zone_pcp(zone, i); 2187 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 2188 if (pageset->pcp[j].count) 2189 break; 2190 } 2191 if (j == ARRAY_SIZE(pageset->pcp)) 2192 continue; 2193 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 2194 seq_printf(m, 2195 "\n cpu: %i pcp: %i" 2196 "\n count: %i" 2197 "\n low: %i" 2198 "\n high: %i" 2199 "\n batch: %i", 2200 i, j, 2201 pageset->pcp[j].count, 2202 pageset->pcp[j].low, 2203 pageset->pcp[j].high, 2204 pageset->pcp[j].batch); 2205 } 2206#ifdef CONFIG_NUMA 2207 seq_printf(m, 2208 "\n numa_hit: %lu" 2209 "\n numa_miss: %lu" 2210 "\n numa_foreign: %lu" 2211 "\n interleave_hit: %lu" 2212 "\n local_node: %lu" 2213 "\n other_node: %lu", 2214 pageset->numa_hit, 2215 pageset->numa_miss, 2216 pageset->numa_foreign, 2217 pageset->interleave_hit, 2218 pageset->local_node, 2219 pageset->other_node); 2220#endif 2221 } 2222 seq_printf(m, 2223 "\n all_unreclaimable: %u" 2224 "\n prev_priority: %i" 2225 "\n temp_priority: %i" 2226 "\n start_pfn: %lu", 2227 zone->all_unreclaimable, 2228 zone->prev_priority, 2229 zone->temp_priority, 2230 zone->zone_start_pfn); 2231 spin_unlock_irqrestore(&zone->lock, flags); 2232 seq_putc(m, '\n'); 2233 } 2234 return 0; 2235} 2236 2237struct seq_operations zoneinfo_op = { 2238 .start = frag_start, /* iterate over all zones. The same as in 2239 * fragmentation. */ 2240 .next = frag_next, 2241 .stop = frag_stop, 2242 .show = zoneinfo_show, 2243}; 2244 2245static char *vmstat_text[] = { 2246 "nr_dirty", 2247 "nr_writeback", 2248 "nr_unstable", 2249 "nr_page_table_pages", 2250 "nr_mapped", 2251 "nr_slab", 2252 2253 "pgpgin", 2254 "pgpgout", 2255 "pswpin", 2256 "pswpout", 2257 "pgalloc_high", 2258 2259 "pgalloc_normal", 2260 "pgalloc_dma", 2261 "pgfree", 2262 "pgactivate", 2263 "pgdeactivate", 2264 2265 "pgfault", 2266 "pgmajfault", 2267 "pgrefill_high", 2268 "pgrefill_normal", 2269 "pgrefill_dma", 2270 2271 "pgsteal_high", 2272 "pgsteal_normal", 2273 "pgsteal_dma", 2274 "pgscan_kswapd_high", 2275 "pgscan_kswapd_normal", 2276 2277 "pgscan_kswapd_dma", 2278 "pgscan_direct_high", 2279 "pgscan_direct_normal", 2280 "pgscan_direct_dma", 2281 "pginodesteal", 2282 2283 "slabs_scanned", 2284 "kswapd_steal", 2285 "kswapd_inodesteal", 2286 "pageoutrun", 2287 "allocstall", 2288 2289 "pgrotated", 2290 "nr_bounce", 2291}; 2292 2293static void *vmstat_start(struct seq_file *m, loff_t *pos) 2294{ 2295 struct page_state *ps; 2296 2297 if (*pos >= ARRAY_SIZE(vmstat_text)) 2298 return NULL; 2299 2300 ps = kmalloc(sizeof(*ps), GFP_KERNEL); 2301 m->private = ps; 2302 if (!ps) 2303 return ERR_PTR(-ENOMEM); 2304 get_full_page_state(ps); 2305 ps->pgpgin /= 2; /* sectors -> kbytes */ 2306 ps->pgpgout /= 2; 2307 return (unsigned long *)ps + *pos; 2308} 2309 2310static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 2311{ 2312 (*pos)++; 2313 if (*pos >= ARRAY_SIZE(vmstat_text)) 2314 return NULL; 2315 return (unsigned long *)m->private + *pos; 2316} 2317 2318static int vmstat_show(struct seq_file *m, void *arg) 2319{ 2320 unsigned long *l = arg; 2321 unsigned long off = l - (unsigned long *)m->private; 2322 2323 seq_printf(m, "%s %lu\n", vmstat_text[off], *l); 2324 return 0; 2325} 2326 2327static void vmstat_stop(struct seq_file *m, void *arg) 2328{ 2329 kfree(m->private); 2330 m->private = NULL; 2331} 2332 2333struct seq_operations vmstat_op = { 2334 .start = vmstat_start, 2335 .next = vmstat_next, 2336 .stop = vmstat_stop, 2337 .show = vmstat_show, 2338}; 2339 2340#endif /* CONFIG_PROC_FS */ 2341 2342#ifdef CONFIG_HOTPLUG_CPU 2343static int page_alloc_cpu_notify(struct notifier_block *self, 2344 unsigned long action, void *hcpu) 2345{ 2346 int cpu = (unsigned long)hcpu; 2347 long *count; 2348 unsigned long *src, *dest; 2349 2350 if (action == CPU_DEAD) { 2351 int i; 2352 2353 /* Drain local pagecache count. */ 2354 count = &per_cpu(nr_pagecache_local, cpu); 2355 atomic_add(*count, &nr_pagecache); 2356 *count = 0; 2357 local_irq_disable(); 2358 __drain_pages(cpu); 2359 2360 /* Add dead cpu's page_states to our own. */ 2361 dest = (unsigned long *)&__get_cpu_var(page_states); 2362 src = (unsigned long *)&per_cpu(page_states, cpu); 2363 2364 for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long); 2365 i++) { 2366 dest[i] += src[i]; 2367 src[i] = 0; 2368 } 2369 2370 local_irq_enable(); 2371 } 2372 return NOTIFY_OK; 2373} 2374#endif /* CONFIG_HOTPLUG_CPU */ 2375 2376void __init page_alloc_init(void) 2377{ 2378 hotcpu_notifier(page_alloc_cpu_notify, 0); 2379} 2380 2381/* 2382 * setup_per_zone_lowmem_reserve - called whenever 2383 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 2384 * has a correct pages reserved value, so an adequate number of 2385 * pages are left in the zone after a successful __alloc_pages(). 2386 */ 2387static void setup_per_zone_lowmem_reserve(void) 2388{ 2389 struct pglist_data *pgdat; 2390 int j, idx; 2391 2392 for_each_pgdat(pgdat) { 2393 for (j = 0; j < MAX_NR_ZONES; j++) { 2394 struct zone *zone = pgdat->node_zones + j; 2395 unsigned long present_pages = zone->present_pages; 2396 2397 zone->lowmem_reserve[j] = 0; 2398 2399 for (idx = j-1; idx >= 0; idx--) { 2400 struct zone *lower_zone; 2401 2402 if (sysctl_lowmem_reserve_ratio[idx] < 1) 2403 sysctl_lowmem_reserve_ratio[idx] = 1; 2404 2405 lower_zone = pgdat->node_zones + idx; 2406 lower_zone->lowmem_reserve[j] = present_pages / 2407 sysctl_lowmem_reserve_ratio[idx]; 2408 present_pages += lower_zone->present_pages; 2409 } 2410 } 2411 } 2412} 2413 2414/* 2415 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures 2416 * that the pages_{min,low,high} values for each zone are set correctly 2417 * with respect to min_free_kbytes. 2418 */ 2419void setup_per_zone_pages_min(void) 2420{ 2421 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 2422 unsigned long lowmem_pages = 0; 2423 struct zone *zone; 2424 unsigned long flags; 2425 2426 /* Calculate total number of !ZONE_HIGHMEM pages */ 2427 for_each_zone(zone) { 2428 if (!is_highmem(zone)) 2429 lowmem_pages += zone->present_pages; 2430 } 2431 2432 for_each_zone(zone) { 2433 unsigned long tmp; 2434 spin_lock_irqsave(&zone->lru_lock, flags); 2435 tmp = (pages_min * zone->present_pages) / lowmem_pages; 2436 if (is_highmem(zone)) { 2437 /* 2438 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2439 * need highmem pages, so cap pages_min to a small 2440 * value here. 2441 * 2442 * The (pages_high-pages_low) and (pages_low-pages_min) 2443 * deltas controls asynch page reclaim, and so should 2444 * not be capped for highmem. 2445 */ 2446 int min_pages; 2447 2448 min_pages = zone->present_pages / 1024; 2449 if (min_pages < SWAP_CLUSTER_MAX) 2450 min_pages = SWAP_CLUSTER_MAX; 2451 if (min_pages > 128) 2452 min_pages = 128; 2453 zone->pages_min = min_pages; 2454 } else { 2455 /* 2456 * If it's a lowmem zone, reserve a number of pages 2457 * proportionate to the zone's size. 2458 */ 2459 zone->pages_min = tmp; 2460 } 2461 2462 zone->pages_low = zone->pages_min + tmp / 4; 2463 zone->pages_high = zone->pages_min + tmp / 2; 2464 spin_unlock_irqrestore(&zone->lru_lock, flags); 2465 } 2466} 2467 2468/* 2469 * Initialise min_free_kbytes. 2470 * 2471 * For small machines we want it small (128k min). For large machines 2472 * we want it large (64MB max). But it is not linear, because network 2473 * bandwidth does not increase linearly with machine size. We use 2474 * 2475 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 2476 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 2477 * 2478 * which yields 2479 * 2480 * 16MB: 512k 2481 * 32MB: 724k 2482 * 64MB: 1024k 2483 * 128MB: 1448k 2484 * 256MB: 2048k 2485 * 512MB: 2896k 2486 * 1024MB: 4096k 2487 * 2048MB: 5792k 2488 * 4096MB: 8192k 2489 * 8192MB: 11584k 2490 * 16384MB: 16384k 2491 */ 2492static int __init init_per_zone_pages_min(void) 2493{ 2494 unsigned long lowmem_kbytes; 2495 2496 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 2497 2498 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 2499 if (min_free_kbytes < 128) 2500 min_free_kbytes = 128; 2501 if (min_free_kbytes > 65536) 2502 min_free_kbytes = 65536; 2503 setup_per_zone_pages_min(); 2504 setup_per_zone_lowmem_reserve(); 2505 return 0; 2506} 2507module_init(init_per_zone_pages_min) 2508 2509/* 2510 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 2511 * that we can call two helper functions whenever min_free_kbytes 2512 * changes. 2513 */ 2514int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 2515 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2516{ 2517 proc_dointvec(table, write, file, buffer, length, ppos); 2518 setup_per_zone_pages_min(); 2519 return 0; 2520} 2521 2522/* 2523 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 2524 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 2525 * whenever sysctl_lowmem_reserve_ratio changes. 2526 * 2527 * The reserve ratio obviously has absolutely no relation with the 2528 * pages_min watermarks. The lowmem reserve ratio can only make sense 2529 * if in function of the boot time zone sizes. 2530 */ 2531int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 2532 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2533{ 2534 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2535 setup_per_zone_lowmem_reserve(); 2536 return 0; 2537} 2538 2539__initdata int hashdist = HASHDIST_DEFAULT; 2540 2541#ifdef CONFIG_NUMA 2542static int __init set_hashdist(char *str) 2543{ 2544 if (!str) 2545 return 0; 2546 hashdist = simple_strtoul(str, &str, 0); 2547 return 1; 2548} 2549__setup("hashdist=", set_hashdist); 2550#endif 2551 2552/* 2553 * allocate a large system hash table from bootmem 2554 * - it is assumed that the hash table must contain an exact power-of-2 2555 * quantity of entries 2556 * - limit is the number of hash buckets, not the total allocation size 2557 */ 2558void *__init alloc_large_system_hash(const char *tablename, 2559 unsigned long bucketsize, 2560 unsigned long numentries, 2561 int scale, 2562 int flags, 2563 unsigned int *_hash_shift, 2564 unsigned int *_hash_mask, 2565 unsigned long limit) 2566{ 2567 unsigned long long max = limit; 2568 unsigned long log2qty, size; 2569 void *table = NULL; 2570 2571 /* allow the kernel cmdline to have a say */ 2572 if (!numentries) { 2573 /* round applicable memory size up to nearest megabyte */ 2574 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; 2575 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 2576 numentries >>= 20 - PAGE_SHIFT; 2577 numentries <<= 20 - PAGE_SHIFT; 2578 2579 /* limit to 1 bucket per 2^scale bytes of low memory */ 2580 if (scale > PAGE_SHIFT) 2581 numentries >>= (scale - PAGE_SHIFT); 2582 else 2583 numentries <<= (PAGE_SHIFT - scale); 2584 } 2585 /* rounded up to nearest power of 2 in size */ 2586 numentries = 1UL << (long_log2(numentries) + 1); 2587 2588 /* limit allocation size to 1/16 total memory by default */ 2589 if (max == 0) { 2590 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2591 do_div(max, bucketsize); 2592 } 2593 2594 if (numentries > max) 2595 numentries = max; 2596 2597 log2qty = long_log2(numentries); 2598 2599 do { 2600 size = bucketsize << log2qty; 2601 if (flags & HASH_EARLY) 2602 table = alloc_bootmem(size); 2603 else if (hashdist) 2604 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 2605 else { 2606 unsigned long order; 2607 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 2608 ; 2609 table = (void*) __get_free_pages(GFP_ATOMIC, order); 2610 } 2611 } while (!table && size > PAGE_SIZE && --log2qty); 2612 2613 if (!table) 2614 panic("Failed to allocate %s hash table\n", tablename); 2615 2616 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 2617 tablename, 2618 (1U << log2qty), 2619 long_log2(size) - PAGE_SHIFT, 2620 size); 2621 2622 if (_hash_shift) 2623 *_hash_shift = log2qty; 2624 if (_hash_mask) 2625 *_hash_mask = (1 << log2qty) - 1; 2626 2627 return table; 2628} 2629