memcontrol.c revision accf163e6ab729f1fc5fffaa0310e498270bf4e7
1/* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#include <linux/res_counter.h> 21#include <linux/memcontrol.h> 22#include <linux/cgroup.h> 23#include <linux/mm.h> 24#include <linux/smp.h> 25#include <linux/page-flags.h> 26#include <linux/backing-dev.h> 27#include <linux/bit_spinlock.h> 28#include <linux/rcupdate.h> 29#include <linux/slab.h> 30#include <linux/swap.h> 31#include <linux/spinlock.h> 32#include <linux/fs.h> 33#include <linux/seq_file.h> 34#include <linux/vmalloc.h> 35 36#include <asm/uaccess.h> 37 38struct cgroup_subsys mem_cgroup_subsys __read_mostly; 39static struct kmem_cache *page_cgroup_cache __read_mostly; 40#define MEM_CGROUP_RECLAIM_RETRIES 5 41 42/* 43 * Statistics for memory cgroup. 44 */ 45enum mem_cgroup_stat_index { 46 /* 47 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 48 */ 49 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 50 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */ 51 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ 52 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ 53 54 MEM_CGROUP_STAT_NSTATS, 55}; 56 57struct mem_cgroup_stat_cpu { 58 s64 count[MEM_CGROUP_STAT_NSTATS]; 59} ____cacheline_aligned_in_smp; 60 61struct mem_cgroup_stat { 62 struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; 63}; 64 65/* 66 * For accounting under irq disable, no need for increment preempt count. 67 */ 68static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat, 69 enum mem_cgroup_stat_index idx, int val) 70{ 71 int cpu = smp_processor_id(); 72 stat->cpustat[cpu].count[idx] += val; 73} 74 75static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, 76 enum mem_cgroup_stat_index idx) 77{ 78 int cpu; 79 s64 ret = 0; 80 for_each_possible_cpu(cpu) 81 ret += stat->cpustat[cpu].count[idx]; 82 return ret; 83} 84 85/* 86 * per-zone information in memory controller. 87 */ 88 89enum mem_cgroup_zstat_index { 90 MEM_CGROUP_ZSTAT_ACTIVE, 91 MEM_CGROUP_ZSTAT_INACTIVE, 92 93 NR_MEM_CGROUP_ZSTAT, 94}; 95 96struct mem_cgroup_per_zone { 97 /* 98 * spin_lock to protect the per cgroup LRU 99 */ 100 spinlock_t lru_lock; 101 struct list_head active_list; 102 struct list_head inactive_list; 103 unsigned long count[NR_MEM_CGROUP_ZSTAT]; 104}; 105/* Macro for accessing counter */ 106#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 107 108struct mem_cgroup_per_node { 109 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 110}; 111 112struct mem_cgroup_lru_info { 113 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; 114}; 115 116/* 117 * The memory controller data structure. The memory controller controls both 118 * page cache and RSS per cgroup. We would eventually like to provide 119 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 120 * to help the administrator determine what knobs to tune. 121 * 122 * TODO: Add a water mark for the memory controller. Reclaim will begin when 123 * we hit the water mark. May be even add a low water mark, such that 124 * no reclaim occurs from a cgroup at it's low water mark, this is 125 * a feature that will be implemented much later in the future. 126 */ 127struct mem_cgroup { 128 struct cgroup_subsys_state css; 129 /* 130 * the counter to account for memory usage 131 */ 132 struct res_counter res; 133 /* 134 * Per cgroup active and inactive list, similar to the 135 * per zone LRU lists. 136 */ 137 struct mem_cgroup_lru_info info; 138 139 int prev_priority; /* for recording reclaim priority */ 140 /* 141 * statistics. 142 */ 143 struct mem_cgroup_stat stat; 144}; 145static struct mem_cgroup init_mem_cgroup; 146 147/* 148 * We use the lower bit of the page->page_cgroup pointer as a bit spin 149 * lock. We need to ensure that page->page_cgroup is at least two 150 * byte aligned (based on comments from Nick Piggin). But since 151 * bit_spin_lock doesn't actually set that lock bit in a non-debug 152 * uniprocessor kernel, we should avoid setting it here too. 153 */ 154#define PAGE_CGROUP_LOCK_BIT 0x0 155#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 156#define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) 157#else 158#define PAGE_CGROUP_LOCK 0x0 159#endif 160 161/* 162 * A page_cgroup page is associated with every page descriptor. The 163 * page_cgroup helps us identify information about the cgroup 164 */ 165struct page_cgroup { 166 struct list_head lru; /* per cgroup LRU list */ 167 struct page *page; 168 struct mem_cgroup *mem_cgroup; 169 int flags; 170}; 171#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ 172#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ 173 174static int page_cgroup_nid(struct page_cgroup *pc) 175{ 176 return page_to_nid(pc->page); 177} 178 179static enum zone_type page_cgroup_zid(struct page_cgroup *pc) 180{ 181 return page_zonenum(pc->page); 182} 183 184enum charge_type { 185 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 186 MEM_CGROUP_CHARGE_TYPE_MAPPED, 187 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 188}; 189 190/* 191 * Always modified under lru lock. Then, not necessary to preempt_disable() 192 */ 193static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, 194 bool charge) 195{ 196 int val = (charge)? 1 : -1; 197 struct mem_cgroup_stat *stat = &mem->stat; 198 199 VM_BUG_ON(!irqs_disabled()); 200 if (flags & PAGE_CGROUP_FLAG_CACHE) 201 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val); 202 else 203 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); 204 205 if (charge) 206 __mem_cgroup_stat_add_safe(stat, 207 MEM_CGROUP_STAT_PGPGIN_COUNT, 1); 208 else 209 __mem_cgroup_stat_add_safe(stat, 210 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); 211} 212 213static struct mem_cgroup_per_zone * 214mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 215{ 216 return &mem->info.nodeinfo[nid]->zoneinfo[zid]; 217} 218 219static struct mem_cgroup_per_zone * 220page_cgroup_zoneinfo(struct page_cgroup *pc) 221{ 222 struct mem_cgroup *mem = pc->mem_cgroup; 223 int nid = page_cgroup_nid(pc); 224 int zid = page_cgroup_zid(pc); 225 226 return mem_cgroup_zoneinfo(mem, nid, zid); 227} 228 229static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, 230 enum mem_cgroup_zstat_index idx) 231{ 232 int nid, zid; 233 struct mem_cgroup_per_zone *mz; 234 u64 total = 0; 235 236 for_each_online_node(nid) 237 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 238 mz = mem_cgroup_zoneinfo(mem, nid, zid); 239 total += MEM_CGROUP_ZSTAT(mz, idx); 240 } 241 return total; 242} 243 244static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 245{ 246 return container_of(cgroup_subsys_state(cont, 247 mem_cgroup_subsys_id), struct mem_cgroup, 248 css); 249} 250 251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 252{ 253 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 254 struct mem_cgroup, css); 255} 256 257static inline int page_cgroup_locked(struct page *page) 258{ 259 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 260} 261 262static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) 263{ 264 VM_BUG_ON(!page_cgroup_locked(page)); 265 page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK); 266} 267 268struct page_cgroup *page_get_page_cgroup(struct page *page) 269{ 270 return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK); 271} 272 273static void lock_page_cgroup(struct page *page) 274{ 275 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 276} 277 278static int try_lock_page_cgroup(struct page *page) 279{ 280 return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 281} 282 283static void unlock_page_cgroup(struct page *page) 284{ 285 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 286} 287 288static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, 289 struct page_cgroup *pc) 290{ 291 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 292 293 if (from) 294 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; 295 else 296 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; 297 298 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); 299 list_del(&pc->lru); 300} 301 302static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, 303 struct page_cgroup *pc) 304{ 305 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 306 307 if (!to) { 308 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; 309 list_add(&pc->lru, &mz->inactive_list); 310 } else { 311 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; 312 list_add(&pc->lru, &mz->active_list); 313 } 314 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); 315} 316 317static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) 318{ 319 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 320 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); 321 322 if (from) 323 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; 324 else 325 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; 326 327 if (active) { 328 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; 329 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; 330 list_move(&pc->lru, &mz->active_list); 331 } else { 332 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; 333 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; 334 list_move(&pc->lru, &mz->inactive_list); 335 } 336} 337 338int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 339{ 340 int ret; 341 342 task_lock(task); 343 ret = task->mm && mm_match_cgroup(task->mm, mem); 344 task_unlock(task); 345 return ret; 346} 347 348/* 349 * This routine assumes that the appropriate zone's lru lock is already held 350 */ 351void mem_cgroup_move_lists(struct page *page, bool active) 352{ 353 struct page_cgroup *pc; 354 struct mem_cgroup_per_zone *mz; 355 unsigned long flags; 356 357 /* 358 * We cannot lock_page_cgroup while holding zone's lru_lock, 359 * because other holders of lock_page_cgroup can be interrupted 360 * with an attempt to rotate_reclaimable_page. But we cannot 361 * safely get to page_cgroup without it, so just try_lock it: 362 * mem_cgroup_isolate_pages allows for page left on wrong list. 363 */ 364 if (!try_lock_page_cgroup(page)) 365 return; 366 367 pc = page_get_page_cgroup(page); 368 if (pc) { 369 mz = page_cgroup_zoneinfo(pc); 370 spin_lock_irqsave(&mz->lru_lock, flags); 371 __mem_cgroup_move_lists(pc, active); 372 spin_unlock_irqrestore(&mz->lru_lock, flags); 373 } 374 unlock_page_cgroup(page); 375} 376 377/* 378 * Calculate mapped_ratio under memory controller. This will be used in 379 * vmscan.c for deteremining we have to reclaim mapped pages. 380 */ 381int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) 382{ 383 long total, rss; 384 385 /* 386 * usage is recorded in bytes. But, here, we assume the number of 387 * physical pages can be represented by "long" on any arch. 388 */ 389 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L; 390 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); 391 return (int)((rss * 100L) / total); 392} 393 394/* 395 * This function is called from vmscan.c. In page reclaiming loop. balance 396 * between active and inactive list is calculated. For memory controller 397 * page reclaiming, we should use using mem_cgroup's imbalance rather than 398 * zone's global lru imbalance. 399 */ 400long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) 401{ 402 unsigned long active, inactive; 403 /* active and inactive are the number of pages. 'long' is ok.*/ 404 active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); 405 inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); 406 return (long) (active / (inactive + 1)); 407} 408 409/* 410 * prev_priority control...this will be used in memory reclaim path. 411 */ 412int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 413{ 414 return mem->prev_priority; 415} 416 417void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) 418{ 419 if (priority < mem->prev_priority) 420 mem->prev_priority = priority; 421} 422 423void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) 424{ 425 mem->prev_priority = priority; 426} 427 428/* 429 * Calculate # of pages to be scanned in this priority/zone. 430 * See also vmscan.c 431 * 432 * priority starts from "DEF_PRIORITY" and decremented in each loop. 433 * (see include/linux/mmzone.h) 434 */ 435 436long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, 437 struct zone *zone, int priority) 438{ 439 long nr_active; 440 int nid = zone->zone_pgdat->node_id; 441 int zid = zone_idx(zone); 442 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); 443 444 nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); 445 return (nr_active >> priority); 446} 447 448long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, 449 struct zone *zone, int priority) 450{ 451 long nr_inactive; 452 int nid = zone->zone_pgdat->node_id; 453 int zid = zone_idx(zone); 454 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); 455 456 nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); 457 return (nr_inactive >> priority); 458} 459 460unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 461 struct list_head *dst, 462 unsigned long *scanned, int order, 463 int mode, struct zone *z, 464 struct mem_cgroup *mem_cont, 465 int active) 466{ 467 unsigned long nr_taken = 0; 468 struct page *page; 469 unsigned long scan; 470 LIST_HEAD(pc_list); 471 struct list_head *src; 472 struct page_cgroup *pc, *tmp; 473 int nid = z->zone_pgdat->node_id; 474 int zid = zone_idx(z); 475 struct mem_cgroup_per_zone *mz; 476 477 BUG_ON(!mem_cont); 478 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 479 if (active) 480 src = &mz->active_list; 481 else 482 src = &mz->inactive_list; 483 484 485 spin_lock(&mz->lru_lock); 486 scan = 0; 487 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 488 if (scan >= nr_to_scan) 489 break; 490 page = pc->page; 491 492 if (unlikely(!PageLRU(page))) 493 continue; 494 495 if (PageActive(page) && !active) { 496 __mem_cgroup_move_lists(pc, true); 497 continue; 498 } 499 if (!PageActive(page) && active) { 500 __mem_cgroup_move_lists(pc, false); 501 continue; 502 } 503 504 scan++; 505 list_move(&pc->lru, &pc_list); 506 507 if (__isolate_lru_page(page, mode) == 0) { 508 list_move(&page->lru, dst); 509 nr_taken++; 510 } 511 } 512 513 list_splice(&pc_list, src); 514 spin_unlock(&mz->lru_lock); 515 516 *scanned = scan; 517 return nr_taken; 518} 519 520/* 521 * Charge the memory controller for page usage. 522 * Return 523 * 0 if the charge was successful 524 * < 0 if the cgroup is over its limit 525 */ 526static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 527 gfp_t gfp_mask, enum charge_type ctype, 528 struct mem_cgroup *memcg) 529{ 530 struct mem_cgroup *mem; 531 struct page_cgroup *pc; 532 unsigned long flags; 533 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 534 struct mem_cgroup_per_zone *mz; 535 536 if (mem_cgroup_subsys.disabled) 537 return 0; 538 539 pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask); 540 if (unlikely(pc == NULL)) 541 goto err; 542 543 /* 544 * We always charge the cgroup the mm_struct belongs to. 545 * The mm_struct's mem_cgroup changes on task migration if the 546 * thread group leader migrates. It's possible that mm is not 547 * set, if so charge the init_mm (happens for pagecache usage). 548 */ 549 if (likely(!memcg)) { 550 rcu_read_lock(); 551 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 552 /* 553 * For every charge from the cgroup, increment reference count 554 */ 555 css_get(&mem->css); 556 rcu_read_unlock(); 557 } else { 558 mem = memcg; 559 css_get(&memcg->css); 560 } 561 562 while (res_counter_charge(&mem->res, PAGE_SIZE)) { 563 if (!(gfp_mask & __GFP_WAIT)) 564 goto out; 565 566 if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) 567 continue; 568 569 /* 570 * try_to_free_mem_cgroup_pages() might not give us a full 571 * picture of reclaim. Some pages are reclaimed and might be 572 * moved to swap cache or just unmapped from the cgroup. 573 * Check the limit again to see if the reclaim reduced the 574 * current usage of the cgroup before giving up 575 */ 576 if (res_counter_check_under_limit(&mem->res)) 577 continue; 578 579 if (!nr_retries--) { 580 mem_cgroup_out_of_memory(mem, gfp_mask); 581 goto out; 582 } 583 } 584 585 pc->mem_cgroup = mem; 586 pc->page = page; 587 /* 588 * If a page is accounted as a page cache, insert to inactive list. 589 * If anon, insert to active list. 590 */ 591 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) 592 pc->flags = PAGE_CGROUP_FLAG_CACHE; 593 else 594 pc->flags = PAGE_CGROUP_FLAG_ACTIVE; 595 596 lock_page_cgroup(page); 597 if (unlikely(page_get_page_cgroup(page))) { 598 unlock_page_cgroup(page); 599 res_counter_uncharge(&mem->res, PAGE_SIZE); 600 css_put(&mem->css); 601 kmem_cache_free(page_cgroup_cache, pc); 602 goto done; 603 } 604 page_assign_page_cgroup(page, pc); 605 606 mz = page_cgroup_zoneinfo(pc); 607 spin_lock_irqsave(&mz->lru_lock, flags); 608 __mem_cgroup_add_list(mz, pc); 609 spin_unlock_irqrestore(&mz->lru_lock, flags); 610 611 unlock_page_cgroup(page); 612done: 613 return 0; 614out: 615 css_put(&mem->css); 616 kmem_cache_free(page_cgroup_cache, pc); 617err: 618 return -ENOMEM; 619} 620 621int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 622{ 623 /* 624 * If already mapped, we don't have to account. 625 * If page cache, page->mapping has address_space. 626 * But page->mapping may have out-of-use anon_vma pointer, 627 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping 628 * is NULL. 629 */ 630 if (page_mapped(page) || (page->mapping && !PageAnon(page))) 631 return 0; 632 if (unlikely(!mm)) 633 mm = &init_mm; 634 return mem_cgroup_charge_common(page, mm, gfp_mask, 635 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); 636} 637 638int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 639 gfp_t gfp_mask) 640{ 641 /* 642 * Corner case handling. This is called from add_to_page_cache() 643 * in usual. But some FS (shmem) precharges this page before calling it 644 * and call add_to_page_cache() with GFP_NOWAIT. 645 * 646 * For GFP_NOWAIT case, the page may be pre-charged before calling 647 * add_to_page_cache(). (See shmem.c) check it here and avoid to call 648 * charge twice. (It works but has to pay a bit larger cost.) 649 */ 650 if (!(gfp_mask & __GFP_WAIT)) { 651 struct page_cgroup *pc; 652 653 lock_page_cgroup(page); 654 pc = page_get_page_cgroup(page); 655 if (pc) { 656 VM_BUG_ON(pc->page != page); 657 VM_BUG_ON(!pc->mem_cgroup); 658 unlock_page_cgroup(page); 659 return 0; 660 } 661 unlock_page_cgroup(page); 662 } 663 664 if (unlikely(!mm)) 665 mm = &init_mm; 666 667 return mem_cgroup_charge_common(page, mm, gfp_mask, 668 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); 669} 670 671/* 672 * uncharge if !page_mapped(page) 673 */ 674static void 675__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 676{ 677 struct page_cgroup *pc; 678 struct mem_cgroup *mem; 679 struct mem_cgroup_per_zone *mz; 680 unsigned long flags; 681 682 if (mem_cgroup_subsys.disabled) 683 return; 684 685 /* 686 * Check if our page_cgroup is valid 687 */ 688 lock_page_cgroup(page); 689 pc = page_get_page_cgroup(page); 690 if (unlikely(!pc)) 691 goto unlock; 692 693 VM_BUG_ON(pc->page != page); 694 695 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) 696 && ((pc->flags & PAGE_CGROUP_FLAG_CACHE) 697 || page_mapped(page))) 698 goto unlock; 699 700 mz = page_cgroup_zoneinfo(pc); 701 spin_lock_irqsave(&mz->lru_lock, flags); 702 __mem_cgroup_remove_list(mz, pc); 703 spin_unlock_irqrestore(&mz->lru_lock, flags); 704 705 page_assign_page_cgroup(page, NULL); 706 unlock_page_cgroup(page); 707 708 mem = pc->mem_cgroup; 709 res_counter_uncharge(&mem->res, PAGE_SIZE); 710 css_put(&mem->css); 711 712 kmem_cache_free(page_cgroup_cache, pc); 713 return; 714unlock: 715 unlock_page_cgroup(page); 716} 717 718void mem_cgroup_uncharge_page(struct page *page) 719{ 720 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); 721} 722 723void mem_cgroup_uncharge_cache_page(struct page *page) 724{ 725 VM_BUG_ON(page_mapped(page)); 726 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 727} 728 729/* 730 * Before starting migration, account against new page. 731 */ 732int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) 733{ 734 struct page_cgroup *pc; 735 struct mem_cgroup *mem = NULL; 736 enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 737 int ret = 0; 738 739 if (mem_cgroup_subsys.disabled) 740 return 0; 741 742 lock_page_cgroup(page); 743 pc = page_get_page_cgroup(page); 744 if (pc) { 745 mem = pc->mem_cgroup; 746 css_get(&mem->css); 747 if (pc->flags & PAGE_CGROUP_FLAG_CACHE) 748 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 749 } 750 unlock_page_cgroup(page); 751 if (mem) { 752 ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, 753 ctype, mem); 754 css_put(&mem->css); 755 } 756 return ret; 757} 758 759/* remove redundant charge if migration failed*/ 760void mem_cgroup_end_migration(struct page *newpage) 761{ 762 /* 763 * At success, page->mapping is not NULL. 764 * special rollback care is necessary when 765 * 1. at migration failure. (newpage->mapping is cleared in this case) 766 * 2. the newpage was moved but not remapped again because the task 767 * exits and the newpage is obsolete. In this case, the new page 768 * may be a swapcache. So, we just call mem_cgroup_uncharge_page() 769 * always for avoiding mess. The page_cgroup will be removed if 770 * unnecessary. File cache pages is still on radix-tree. Don't 771 * care it. 772 */ 773 if (!newpage->mapping) 774 __mem_cgroup_uncharge_common(newpage, 775 MEM_CGROUP_CHARGE_TYPE_FORCE); 776 else if (PageAnon(newpage)) 777 mem_cgroup_uncharge_page(newpage); 778} 779 780/* 781 * A call to try to shrink memory usage under specified resource controller. 782 * This is typically used for page reclaiming for shmem for reducing side 783 * effect of page allocation from shmem, which is used by some mem_cgroup. 784 */ 785int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) 786{ 787 struct mem_cgroup *mem; 788 int progress = 0; 789 int retry = MEM_CGROUP_RECLAIM_RETRIES; 790 791 rcu_read_lock(); 792 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 793 css_get(&mem->css); 794 rcu_read_unlock(); 795 796 do { 797 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); 798 } while (!progress && --retry); 799 800 css_put(&mem->css); 801 if (!retry) 802 return -ENOMEM; 803 return 0; 804} 805 806/* 807 * This routine traverse page_cgroup in given list and drop them all. 808 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 809 */ 810#define FORCE_UNCHARGE_BATCH (128) 811static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, 812 struct mem_cgroup_per_zone *mz, 813 int active) 814{ 815 struct page_cgroup *pc; 816 struct page *page; 817 int count = FORCE_UNCHARGE_BATCH; 818 unsigned long flags; 819 struct list_head *list; 820 821 if (active) 822 list = &mz->active_list; 823 else 824 list = &mz->inactive_list; 825 826 spin_lock_irqsave(&mz->lru_lock, flags); 827 while (!list_empty(list)) { 828 pc = list_entry(list->prev, struct page_cgroup, lru); 829 page = pc->page; 830 get_page(page); 831 spin_unlock_irqrestore(&mz->lru_lock, flags); 832 /* 833 * Check if this page is on LRU. !LRU page can be found 834 * if it's under page migration. 835 */ 836 if (PageLRU(page)) { 837 __mem_cgroup_uncharge_common(page, 838 MEM_CGROUP_CHARGE_TYPE_FORCE); 839 put_page(page); 840 if (--count <= 0) { 841 count = FORCE_UNCHARGE_BATCH; 842 cond_resched(); 843 } 844 } else 845 cond_resched(); 846 spin_lock_irqsave(&mz->lru_lock, flags); 847 } 848 spin_unlock_irqrestore(&mz->lru_lock, flags); 849} 850 851/* 852 * make mem_cgroup's charge to be 0 if there is no task. 853 * This enables deleting this mem_cgroup. 854 */ 855static int mem_cgroup_force_empty(struct mem_cgroup *mem) 856{ 857 int ret = -EBUSY; 858 int node, zid; 859 860 if (mem_cgroup_subsys.disabled) 861 return 0; 862 863 css_get(&mem->css); 864 /* 865 * page reclaim code (kswapd etc..) will move pages between 866 * active_list <-> inactive_list while we don't take a lock. 867 * So, we have to do loop here until all lists are empty. 868 */ 869 while (mem->res.usage > 0) { 870 if (atomic_read(&mem->css.cgroup->count) > 0) 871 goto out; 872 for_each_node_state(node, N_POSSIBLE) 873 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 874 struct mem_cgroup_per_zone *mz; 875 mz = mem_cgroup_zoneinfo(mem, node, zid); 876 /* drop all page_cgroup in active_list */ 877 mem_cgroup_force_empty_list(mem, mz, 1); 878 /* drop all page_cgroup in inactive_list */ 879 mem_cgroup_force_empty_list(mem, mz, 0); 880 } 881 } 882 ret = 0; 883out: 884 css_put(&mem->css); 885 return ret; 886} 887 888static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 889{ 890 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, 891 cft->private); 892} 893 894static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 895 const char *buffer) 896{ 897 return res_counter_write(&mem_cgroup_from_cont(cont)->res, 898 cft->private, buffer, 899 res_counter_memparse_write_strategy); 900} 901 902static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 903{ 904 struct mem_cgroup *mem; 905 906 mem = mem_cgroup_from_cont(cont); 907 switch (event) { 908 case RES_MAX_USAGE: 909 res_counter_reset_max(&mem->res); 910 break; 911 case RES_FAILCNT: 912 res_counter_reset_failcnt(&mem->res); 913 break; 914 } 915 return 0; 916} 917 918static int mem_force_empty_write(struct cgroup *cont, unsigned int event) 919{ 920 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont)); 921} 922 923static const struct mem_cgroup_stat_desc { 924 const char *msg; 925 u64 unit; 926} mem_cgroup_stat_desc[] = { 927 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, }, 928 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, 929 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, }, 930 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, }, 931}; 932 933static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 934 struct cgroup_map_cb *cb) 935{ 936 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 937 struct mem_cgroup_stat *stat = &mem_cont->stat; 938 int i; 939 940 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) { 941 s64 val; 942 943 val = mem_cgroup_read_stat(stat, i); 944 val *= mem_cgroup_stat_desc[i].unit; 945 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val); 946 } 947 /* showing # of active pages */ 948 { 949 unsigned long active, inactive; 950 951 inactive = mem_cgroup_get_all_zonestat(mem_cont, 952 MEM_CGROUP_ZSTAT_INACTIVE); 953 active = mem_cgroup_get_all_zonestat(mem_cont, 954 MEM_CGROUP_ZSTAT_ACTIVE); 955 cb->fill(cb, "active", (active) * PAGE_SIZE); 956 cb->fill(cb, "inactive", (inactive) * PAGE_SIZE); 957 } 958 return 0; 959} 960 961static struct cftype mem_cgroup_files[] = { 962 { 963 .name = "usage_in_bytes", 964 .private = RES_USAGE, 965 .read_u64 = mem_cgroup_read, 966 }, 967 { 968 .name = "max_usage_in_bytes", 969 .private = RES_MAX_USAGE, 970 .trigger = mem_cgroup_reset, 971 .read_u64 = mem_cgroup_read, 972 }, 973 { 974 .name = "limit_in_bytes", 975 .private = RES_LIMIT, 976 .write_string = mem_cgroup_write, 977 .read_u64 = mem_cgroup_read, 978 }, 979 { 980 .name = "failcnt", 981 .private = RES_FAILCNT, 982 .trigger = mem_cgroup_reset, 983 .read_u64 = mem_cgroup_read, 984 }, 985 { 986 .name = "force_empty", 987 .trigger = mem_force_empty_write, 988 }, 989 { 990 .name = "stat", 991 .read_map = mem_control_stat_show, 992 }, 993}; 994 995static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 996{ 997 struct mem_cgroup_per_node *pn; 998 struct mem_cgroup_per_zone *mz; 999 int zone, tmp = node; 1000 /* 1001 * This routine is called against possible nodes. 1002 * But it's BUG to call kmalloc() against offline node. 1003 * 1004 * TODO: this routine can waste much memory for nodes which will 1005 * never be onlined. It's better to use memory hotplug callback 1006 * function. 1007 */ 1008 if (!node_state(node, N_NORMAL_MEMORY)) 1009 tmp = -1; 1010 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 1011 if (!pn) 1012 return 1; 1013 1014 mem->info.nodeinfo[node] = pn; 1015 memset(pn, 0, sizeof(*pn)); 1016 1017 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 1018 mz = &pn->zoneinfo[zone]; 1019 INIT_LIST_HEAD(&mz->active_list); 1020 INIT_LIST_HEAD(&mz->inactive_list); 1021 spin_lock_init(&mz->lru_lock); 1022 } 1023 return 0; 1024} 1025 1026static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 1027{ 1028 kfree(mem->info.nodeinfo[node]); 1029} 1030 1031static struct mem_cgroup *mem_cgroup_alloc(void) 1032{ 1033 struct mem_cgroup *mem; 1034 1035 if (sizeof(*mem) < PAGE_SIZE) 1036 mem = kmalloc(sizeof(*mem), GFP_KERNEL); 1037 else 1038 mem = vmalloc(sizeof(*mem)); 1039 1040 if (mem) 1041 memset(mem, 0, sizeof(*mem)); 1042 return mem; 1043} 1044 1045static void mem_cgroup_free(struct mem_cgroup *mem) 1046{ 1047 if (sizeof(*mem) < PAGE_SIZE) 1048 kfree(mem); 1049 else 1050 vfree(mem); 1051} 1052 1053 1054static struct cgroup_subsys_state * 1055mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 1056{ 1057 struct mem_cgroup *mem; 1058 int node; 1059 1060 if (unlikely((cont->parent) == NULL)) { 1061 mem = &init_mem_cgroup; 1062 page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC); 1063 } else { 1064 mem = mem_cgroup_alloc(); 1065 if (!mem) 1066 return ERR_PTR(-ENOMEM); 1067 } 1068 1069 res_counter_init(&mem->res); 1070 1071 for_each_node_state(node, N_POSSIBLE) 1072 if (alloc_mem_cgroup_per_zone_info(mem, node)) 1073 goto free_out; 1074 1075 return &mem->css; 1076free_out: 1077 for_each_node_state(node, N_POSSIBLE) 1078 free_mem_cgroup_per_zone_info(mem, node); 1079 if (cont->parent != NULL) 1080 mem_cgroup_free(mem); 1081 return ERR_PTR(-ENOMEM); 1082} 1083 1084static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 1085 struct cgroup *cont) 1086{ 1087 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 1088 mem_cgroup_force_empty(mem); 1089} 1090 1091static void mem_cgroup_destroy(struct cgroup_subsys *ss, 1092 struct cgroup *cont) 1093{ 1094 int node; 1095 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 1096 1097 for_each_node_state(node, N_POSSIBLE) 1098 free_mem_cgroup_per_zone_info(mem, node); 1099 1100 mem_cgroup_free(mem_cgroup_from_cont(cont)); 1101} 1102 1103static int mem_cgroup_populate(struct cgroup_subsys *ss, 1104 struct cgroup *cont) 1105{ 1106 if (mem_cgroup_subsys.disabled) 1107 return 0; 1108 return cgroup_add_files(cont, ss, mem_cgroup_files, 1109 ARRAY_SIZE(mem_cgroup_files)); 1110} 1111 1112static void mem_cgroup_move_task(struct cgroup_subsys *ss, 1113 struct cgroup *cont, 1114 struct cgroup *old_cont, 1115 struct task_struct *p) 1116{ 1117 struct mm_struct *mm; 1118 struct mem_cgroup *mem, *old_mem; 1119 1120 if (mem_cgroup_subsys.disabled) 1121 return; 1122 1123 mm = get_task_mm(p); 1124 if (mm == NULL) 1125 return; 1126 1127 mem = mem_cgroup_from_cont(cont); 1128 old_mem = mem_cgroup_from_cont(old_cont); 1129 1130 if (mem == old_mem) 1131 goto out; 1132 1133 /* 1134 * Only thread group leaders are allowed to migrate, the mm_struct is 1135 * in effect owned by the leader 1136 */ 1137 if (!thread_group_leader(p)) 1138 goto out; 1139 1140out: 1141 mmput(mm); 1142} 1143 1144struct cgroup_subsys mem_cgroup_subsys = { 1145 .name = "memory", 1146 .subsys_id = mem_cgroup_subsys_id, 1147 .create = mem_cgroup_create, 1148 .pre_destroy = mem_cgroup_pre_destroy, 1149 .destroy = mem_cgroup_destroy, 1150 .populate = mem_cgroup_populate, 1151 .attach = mem_cgroup_move_task, 1152 .early_init = 0, 1153}; 1154