memcontrol.c revision b76734e5e34e1889ab9fc5f3756570b1129f0f50
1/* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#include <linux/res_counter.h> 21#include <linux/memcontrol.h> 22#include <linux/cgroup.h> 23#include <linux/mm.h> 24#include <linux/smp.h> 25#include <linux/page-flags.h> 26#include <linux/backing-dev.h> 27#include <linux/bit_spinlock.h> 28#include <linux/rcupdate.h> 29#include <linux/slab.h> 30#include <linux/swap.h> 31#include <linux/spinlock.h> 32#include <linux/fs.h> 33#include <linux/seq_file.h> 34#include <linux/vmalloc.h> 35 36#include <asm/uaccess.h> 37 38struct cgroup_subsys mem_cgroup_subsys __read_mostly; 39static struct kmem_cache *page_cgroup_cache __read_mostly; 40#define MEM_CGROUP_RECLAIM_RETRIES 5 41 42/* 43 * Statistics for memory cgroup. 44 */ 45enum mem_cgroup_stat_index { 46 /* 47 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 48 */ 49 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 50 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */ 51 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ 52 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ 53 54 MEM_CGROUP_STAT_NSTATS, 55}; 56 57struct mem_cgroup_stat_cpu { 58 s64 count[MEM_CGROUP_STAT_NSTATS]; 59} ____cacheline_aligned_in_smp; 60 61struct mem_cgroup_stat { 62 struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; 63}; 64 65/* 66 * For accounting under irq disable, no need for increment preempt count. 67 */ 68static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat, 69 enum mem_cgroup_stat_index idx, int val) 70{ 71 int cpu = smp_processor_id(); 72 stat->cpustat[cpu].count[idx] += val; 73} 74 75static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, 76 enum mem_cgroup_stat_index idx) 77{ 78 int cpu; 79 s64 ret = 0; 80 for_each_possible_cpu(cpu) 81 ret += stat->cpustat[cpu].count[idx]; 82 return ret; 83} 84 85/* 86 * per-zone information in memory controller. 87 */ 88 89enum mem_cgroup_zstat_index { 90 MEM_CGROUP_ZSTAT_ACTIVE, 91 MEM_CGROUP_ZSTAT_INACTIVE, 92 93 NR_MEM_CGROUP_ZSTAT, 94}; 95 96struct mem_cgroup_per_zone { 97 /* 98 * spin_lock to protect the per cgroup LRU 99 */ 100 spinlock_t lru_lock; 101 struct list_head active_list; 102 struct list_head inactive_list; 103 unsigned long count[NR_MEM_CGROUP_ZSTAT]; 104}; 105/* Macro for accessing counter */ 106#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 107 108struct mem_cgroup_per_node { 109 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 110}; 111 112struct mem_cgroup_lru_info { 113 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; 114}; 115 116/* 117 * The memory controller data structure. The memory controller controls both 118 * page cache and RSS per cgroup. We would eventually like to provide 119 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 120 * to help the administrator determine what knobs to tune. 121 * 122 * TODO: Add a water mark for the memory controller. Reclaim will begin when 123 * we hit the water mark. May be even add a low water mark, such that 124 * no reclaim occurs from a cgroup at it's low water mark, this is 125 * a feature that will be implemented much later in the future. 126 */ 127struct mem_cgroup { 128 struct cgroup_subsys_state css; 129 /* 130 * the counter to account for memory usage 131 */ 132 struct res_counter res; 133 /* 134 * Per cgroup active and inactive list, similar to the 135 * per zone LRU lists. 136 */ 137 struct mem_cgroup_lru_info info; 138 139 int prev_priority; /* for recording reclaim priority */ 140 /* 141 * statistics. 142 */ 143 struct mem_cgroup_stat stat; 144}; 145static struct mem_cgroup init_mem_cgroup; 146 147/* 148 * We use the lower bit of the page->page_cgroup pointer as a bit spin 149 * lock. We need to ensure that page->page_cgroup is at least two 150 * byte aligned (based on comments from Nick Piggin). But since 151 * bit_spin_lock doesn't actually set that lock bit in a non-debug 152 * uniprocessor kernel, we should avoid setting it here too. 153 */ 154#define PAGE_CGROUP_LOCK_BIT 0x0 155#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 156#define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) 157#else 158#define PAGE_CGROUP_LOCK 0x0 159#endif 160 161/* 162 * A page_cgroup page is associated with every page descriptor. The 163 * page_cgroup helps us identify information about the cgroup 164 */ 165struct page_cgroup { 166 struct list_head lru; /* per cgroup LRU list */ 167 struct page *page; 168 struct mem_cgroup *mem_cgroup; 169 int flags; 170}; 171#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ 172#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ 173 174static int page_cgroup_nid(struct page_cgroup *pc) 175{ 176 return page_to_nid(pc->page); 177} 178 179static enum zone_type page_cgroup_zid(struct page_cgroup *pc) 180{ 181 return page_zonenum(pc->page); 182} 183 184enum charge_type { 185 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 186 MEM_CGROUP_CHARGE_TYPE_MAPPED, 187 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 188}; 189 190/* 191 * Always modified under lru lock. Then, not necessary to preempt_disable() 192 */ 193static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, 194 bool charge) 195{ 196 int val = (charge)? 1 : -1; 197 struct mem_cgroup_stat *stat = &mem->stat; 198 199 VM_BUG_ON(!irqs_disabled()); 200 if (flags & PAGE_CGROUP_FLAG_CACHE) 201 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val); 202 else 203 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); 204 205 if (charge) 206 __mem_cgroup_stat_add_safe(stat, 207 MEM_CGROUP_STAT_PGPGIN_COUNT, 1); 208 else 209 __mem_cgroup_stat_add_safe(stat, 210 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); 211} 212 213static struct mem_cgroup_per_zone * 214mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 215{ 216 return &mem->info.nodeinfo[nid]->zoneinfo[zid]; 217} 218 219static struct mem_cgroup_per_zone * 220page_cgroup_zoneinfo(struct page_cgroup *pc) 221{ 222 struct mem_cgroup *mem = pc->mem_cgroup; 223 int nid = page_cgroup_nid(pc); 224 int zid = page_cgroup_zid(pc); 225 226 return mem_cgroup_zoneinfo(mem, nid, zid); 227} 228 229static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, 230 enum mem_cgroup_zstat_index idx) 231{ 232 int nid, zid; 233 struct mem_cgroup_per_zone *mz; 234 u64 total = 0; 235 236 for_each_online_node(nid) 237 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 238 mz = mem_cgroup_zoneinfo(mem, nid, zid); 239 total += MEM_CGROUP_ZSTAT(mz, idx); 240 } 241 return total; 242} 243 244static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 245{ 246 return container_of(cgroup_subsys_state(cont, 247 mem_cgroup_subsys_id), struct mem_cgroup, 248 css); 249} 250 251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 252{ 253 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 254 struct mem_cgroup, css); 255} 256 257static inline int page_cgroup_locked(struct page *page) 258{ 259 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 260} 261 262static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) 263{ 264 VM_BUG_ON(!page_cgroup_locked(page)); 265 page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK); 266} 267 268struct page_cgroup *page_get_page_cgroup(struct page *page) 269{ 270 return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK); 271} 272 273static void lock_page_cgroup(struct page *page) 274{ 275 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 276} 277 278static int try_lock_page_cgroup(struct page *page) 279{ 280 return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 281} 282 283static void unlock_page_cgroup(struct page *page) 284{ 285 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 286} 287 288static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, 289 struct page_cgroup *pc) 290{ 291 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 292 293 if (from) 294 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; 295 else 296 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; 297 298 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); 299 list_del(&pc->lru); 300} 301 302static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, 303 struct page_cgroup *pc) 304{ 305 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 306 307 if (!to) { 308 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; 309 list_add(&pc->lru, &mz->inactive_list); 310 } else { 311 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; 312 list_add(&pc->lru, &mz->active_list); 313 } 314 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); 315} 316 317static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) 318{ 319 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 320 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); 321 322 if (from) 323 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; 324 else 325 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; 326 327 if (active) { 328 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; 329 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; 330 list_move(&pc->lru, &mz->active_list); 331 } else { 332 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; 333 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; 334 list_move(&pc->lru, &mz->inactive_list); 335 } 336} 337 338int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 339{ 340 int ret; 341 342 task_lock(task); 343 ret = task->mm && mm_match_cgroup(task->mm, mem); 344 task_unlock(task); 345 return ret; 346} 347 348/* 349 * This routine assumes that the appropriate zone's lru lock is already held 350 */ 351void mem_cgroup_move_lists(struct page *page, bool active) 352{ 353 struct page_cgroup *pc; 354 struct mem_cgroup_per_zone *mz; 355 unsigned long flags; 356 357 /* 358 * We cannot lock_page_cgroup while holding zone's lru_lock, 359 * because other holders of lock_page_cgroup can be interrupted 360 * with an attempt to rotate_reclaimable_page. But we cannot 361 * safely get to page_cgroup without it, so just try_lock it: 362 * mem_cgroup_isolate_pages allows for page left on wrong list. 363 */ 364 if (!try_lock_page_cgroup(page)) 365 return; 366 367 pc = page_get_page_cgroup(page); 368 if (pc) { 369 mz = page_cgroup_zoneinfo(pc); 370 spin_lock_irqsave(&mz->lru_lock, flags); 371 __mem_cgroup_move_lists(pc, active); 372 spin_unlock_irqrestore(&mz->lru_lock, flags); 373 } 374 unlock_page_cgroup(page); 375} 376 377/* 378 * Calculate mapped_ratio under memory controller. This will be used in 379 * vmscan.c for deteremining we have to reclaim mapped pages. 380 */ 381int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) 382{ 383 long total, rss; 384 385 /* 386 * usage is recorded in bytes. But, here, we assume the number of 387 * physical pages can be represented by "long" on any arch. 388 */ 389 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L; 390 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); 391 return (int)((rss * 100L) / total); 392} 393 394/* 395 * This function is called from vmscan.c. In page reclaiming loop. balance 396 * between active and inactive list is calculated. For memory controller 397 * page reclaiming, we should use using mem_cgroup's imbalance rather than 398 * zone's global lru imbalance. 399 */ 400long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) 401{ 402 unsigned long active, inactive; 403 /* active and inactive are the number of pages. 'long' is ok.*/ 404 active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); 405 inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); 406 return (long) (active / (inactive + 1)); 407} 408 409/* 410 * prev_priority control...this will be used in memory reclaim path. 411 */ 412int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 413{ 414 return mem->prev_priority; 415} 416 417void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) 418{ 419 if (priority < mem->prev_priority) 420 mem->prev_priority = priority; 421} 422 423void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) 424{ 425 mem->prev_priority = priority; 426} 427 428/* 429 * Calculate # of pages to be scanned in this priority/zone. 430 * See also vmscan.c 431 * 432 * priority starts from "DEF_PRIORITY" and decremented in each loop. 433 * (see include/linux/mmzone.h) 434 */ 435 436long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, 437 struct zone *zone, int priority) 438{ 439 long nr_active; 440 int nid = zone->zone_pgdat->node_id; 441 int zid = zone_idx(zone); 442 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); 443 444 nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); 445 return (nr_active >> priority); 446} 447 448long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, 449 struct zone *zone, int priority) 450{ 451 long nr_inactive; 452 int nid = zone->zone_pgdat->node_id; 453 int zid = zone_idx(zone); 454 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); 455 456 nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); 457 return (nr_inactive >> priority); 458} 459 460unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 461 struct list_head *dst, 462 unsigned long *scanned, int order, 463 int mode, struct zone *z, 464 struct mem_cgroup *mem_cont, 465 int active) 466{ 467 unsigned long nr_taken = 0; 468 struct page *page; 469 unsigned long scan; 470 LIST_HEAD(pc_list); 471 struct list_head *src; 472 struct page_cgroup *pc, *tmp; 473 int nid = z->zone_pgdat->node_id; 474 int zid = zone_idx(z); 475 struct mem_cgroup_per_zone *mz; 476 477 BUG_ON(!mem_cont); 478 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 479 if (active) 480 src = &mz->active_list; 481 else 482 src = &mz->inactive_list; 483 484 485 spin_lock(&mz->lru_lock); 486 scan = 0; 487 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 488 if (scan >= nr_to_scan) 489 break; 490 page = pc->page; 491 492 if (unlikely(!PageLRU(page))) 493 continue; 494 495 if (PageActive(page) && !active) { 496 __mem_cgroup_move_lists(pc, true); 497 continue; 498 } 499 if (!PageActive(page) && active) { 500 __mem_cgroup_move_lists(pc, false); 501 continue; 502 } 503 504 scan++; 505 list_move(&pc->lru, &pc_list); 506 507 if (__isolate_lru_page(page, mode) == 0) { 508 list_move(&page->lru, dst); 509 nr_taken++; 510 } 511 } 512 513 list_splice(&pc_list, src); 514 spin_unlock(&mz->lru_lock); 515 516 *scanned = scan; 517 return nr_taken; 518} 519 520/* 521 * Charge the memory controller for page usage. 522 * Return 523 * 0 if the charge was successful 524 * < 0 if the cgroup is over its limit 525 */ 526static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 527 gfp_t gfp_mask, enum charge_type ctype, 528 struct mem_cgroup *memcg) 529{ 530 struct mem_cgroup *mem; 531 struct page_cgroup *pc; 532 unsigned long flags; 533 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 534 struct mem_cgroup_per_zone *mz; 535 536 if (mem_cgroup_subsys.disabled) 537 return 0; 538 539 /* 540 * Should page_cgroup's go to their own slab? 541 * One could optimize the performance of the charging routine 542 * by saving a bit in the page_flags and using it as a lock 543 * to see if the cgroup page already has a page_cgroup associated 544 * with it 545 */ 546retry: 547 lock_page_cgroup(page); 548 pc = page_get_page_cgroup(page); 549 /* 550 * The page_cgroup exists and 551 * the page has already been accounted. 552 */ 553 if (unlikely(pc)) { 554 VM_BUG_ON(pc->page != page); 555 VM_BUG_ON(!pc->mem_cgroup); 556 unlock_page_cgroup(page); 557 goto done; 558 } 559 unlock_page_cgroup(page); 560 561 pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask); 562 if (unlikely(pc == NULL)) 563 goto err; 564 565 /* 566 * We always charge the cgroup the mm_struct belongs to. 567 * The mm_struct's mem_cgroup changes on task migration if the 568 * thread group leader migrates. It's possible that mm is not 569 * set, if so charge the init_mm (happens for pagecache usage). 570 */ 571 if (likely(!memcg)) { 572 rcu_read_lock(); 573 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 574 /* 575 * For every charge from the cgroup, increment reference count 576 */ 577 css_get(&mem->css); 578 rcu_read_unlock(); 579 } else { 580 mem = memcg; 581 css_get(&memcg->css); 582 } 583 584 while (res_counter_charge(&mem->res, PAGE_SIZE)) { 585 if (!(gfp_mask & __GFP_WAIT)) 586 goto out; 587 588 if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) 589 continue; 590 591 /* 592 * try_to_free_mem_cgroup_pages() might not give us a full 593 * picture of reclaim. Some pages are reclaimed and might be 594 * moved to swap cache or just unmapped from the cgroup. 595 * Check the limit again to see if the reclaim reduced the 596 * current usage of the cgroup before giving up 597 */ 598 if (res_counter_check_under_limit(&mem->res)) 599 continue; 600 601 if (!nr_retries--) { 602 mem_cgroup_out_of_memory(mem, gfp_mask); 603 goto out; 604 } 605 } 606 607 pc->mem_cgroup = mem; 608 pc->page = page; 609 /* 610 * If a page is accounted as a page cache, insert to inactive list. 611 * If anon, insert to active list. 612 */ 613 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) 614 pc->flags = PAGE_CGROUP_FLAG_CACHE; 615 else 616 pc->flags = PAGE_CGROUP_FLAG_ACTIVE; 617 618 lock_page_cgroup(page); 619 if (unlikely(page_get_page_cgroup(page))) { 620 unlock_page_cgroup(page); 621 /* 622 * Another charge has been added to this page already. 623 * We take lock_page_cgroup(page) again and read 624 * page->cgroup, increment refcnt.... just retry is OK. 625 */ 626 res_counter_uncharge(&mem->res, PAGE_SIZE); 627 css_put(&mem->css); 628 kmem_cache_free(page_cgroup_cache, pc); 629 goto retry; 630 } 631 page_assign_page_cgroup(page, pc); 632 633 mz = page_cgroup_zoneinfo(pc); 634 spin_lock_irqsave(&mz->lru_lock, flags); 635 __mem_cgroup_add_list(mz, pc); 636 spin_unlock_irqrestore(&mz->lru_lock, flags); 637 638 unlock_page_cgroup(page); 639done: 640 return 0; 641out: 642 css_put(&mem->css); 643 kmem_cache_free(page_cgroup_cache, pc); 644err: 645 return -ENOMEM; 646} 647 648int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 649{ 650 /* 651 * If already mapped, we don't have to account. 652 * If page cache, page->mapping has address_space. 653 * But page->mapping may have out-of-use anon_vma pointer, 654 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping 655 * is NULL. 656 */ 657 if (page_mapped(page) || (page->mapping && !PageAnon(page))) 658 return 0; 659 if (unlikely(!mm)) 660 mm = &init_mm; 661 return mem_cgroup_charge_common(page, mm, gfp_mask, 662 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); 663} 664 665int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 666 gfp_t gfp_mask) 667{ 668 if (unlikely(!mm)) 669 mm = &init_mm; 670 return mem_cgroup_charge_common(page, mm, gfp_mask, 671 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); 672} 673 674/* 675 * uncharge if !page_mapped(page) 676 */ 677static void 678__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 679{ 680 struct page_cgroup *pc; 681 struct mem_cgroup *mem; 682 struct mem_cgroup_per_zone *mz; 683 unsigned long flags; 684 685 if (mem_cgroup_subsys.disabled) 686 return; 687 688 /* 689 * Check if our page_cgroup is valid 690 */ 691 lock_page_cgroup(page); 692 pc = page_get_page_cgroup(page); 693 if (unlikely(!pc)) 694 goto unlock; 695 696 VM_BUG_ON(pc->page != page); 697 698 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) 699 && ((pc->flags & PAGE_CGROUP_FLAG_CACHE) 700 || page_mapped(page))) 701 goto unlock; 702 703 mz = page_cgroup_zoneinfo(pc); 704 spin_lock_irqsave(&mz->lru_lock, flags); 705 __mem_cgroup_remove_list(mz, pc); 706 spin_unlock_irqrestore(&mz->lru_lock, flags); 707 708 page_assign_page_cgroup(page, NULL); 709 unlock_page_cgroup(page); 710 711 mem = pc->mem_cgroup; 712 res_counter_uncharge(&mem->res, PAGE_SIZE); 713 css_put(&mem->css); 714 715 kmem_cache_free(page_cgroup_cache, pc); 716 return; 717unlock: 718 unlock_page_cgroup(page); 719} 720 721void mem_cgroup_uncharge_page(struct page *page) 722{ 723 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); 724} 725 726void mem_cgroup_uncharge_cache_page(struct page *page) 727{ 728 VM_BUG_ON(page_mapped(page)); 729 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 730} 731 732/* 733 * Before starting migration, account against new page. 734 */ 735int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) 736{ 737 struct page_cgroup *pc; 738 struct mem_cgroup *mem = NULL; 739 enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 740 int ret = 0; 741 742 if (mem_cgroup_subsys.disabled) 743 return 0; 744 745 lock_page_cgroup(page); 746 pc = page_get_page_cgroup(page); 747 if (pc) { 748 mem = pc->mem_cgroup; 749 css_get(&mem->css); 750 if (pc->flags & PAGE_CGROUP_FLAG_CACHE) 751 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 752 } 753 unlock_page_cgroup(page); 754 if (mem) { 755 ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, 756 ctype, mem); 757 css_put(&mem->css); 758 } 759 return ret; 760} 761 762/* remove redundant charge if migration failed*/ 763void mem_cgroup_end_migration(struct page *newpage) 764{ 765 /* 766 * At success, page->mapping is not NULL. 767 * special rollback care is necessary when 768 * 1. at migration failure. (newpage->mapping is cleared in this case) 769 * 2. the newpage was moved but not remapped again because the task 770 * exits and the newpage is obsolete. In this case, the new page 771 * may be a swapcache. So, we just call mem_cgroup_uncharge_page() 772 * always for avoiding mess. The page_cgroup will be removed if 773 * unnecessary. File cache pages is still on radix-tree. Don't 774 * care it. 775 */ 776 if (!newpage->mapping) 777 __mem_cgroup_uncharge_common(newpage, 778 MEM_CGROUP_CHARGE_TYPE_FORCE); 779 else if (PageAnon(newpage)) 780 mem_cgroup_uncharge_page(newpage); 781} 782 783/* 784 * A call to try to shrink memory usage under specified resource controller. 785 * This is typically used for page reclaiming for shmem for reducing side 786 * effect of page allocation from shmem, which is used by some mem_cgroup. 787 */ 788int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) 789{ 790 struct mem_cgroup *mem; 791 int progress = 0; 792 int retry = MEM_CGROUP_RECLAIM_RETRIES; 793 794 rcu_read_lock(); 795 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 796 css_get(&mem->css); 797 rcu_read_unlock(); 798 799 do { 800 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); 801 } while (!progress && --retry); 802 803 css_put(&mem->css); 804 if (!retry) 805 return -ENOMEM; 806 return 0; 807} 808 809/* 810 * This routine traverse page_cgroup in given list and drop them all. 811 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 812 */ 813#define FORCE_UNCHARGE_BATCH (128) 814static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, 815 struct mem_cgroup_per_zone *mz, 816 int active) 817{ 818 struct page_cgroup *pc; 819 struct page *page; 820 int count = FORCE_UNCHARGE_BATCH; 821 unsigned long flags; 822 struct list_head *list; 823 824 if (active) 825 list = &mz->active_list; 826 else 827 list = &mz->inactive_list; 828 829 spin_lock_irqsave(&mz->lru_lock, flags); 830 while (!list_empty(list)) { 831 pc = list_entry(list->prev, struct page_cgroup, lru); 832 page = pc->page; 833 get_page(page); 834 spin_unlock_irqrestore(&mz->lru_lock, flags); 835 /* 836 * Check if this page is on LRU. !LRU page can be found 837 * if it's under page migration. 838 */ 839 if (PageLRU(page)) { 840 __mem_cgroup_uncharge_common(page, 841 MEM_CGROUP_CHARGE_TYPE_FORCE); 842 put_page(page); 843 if (--count <= 0) { 844 count = FORCE_UNCHARGE_BATCH; 845 cond_resched(); 846 } 847 } else 848 cond_resched(); 849 spin_lock_irqsave(&mz->lru_lock, flags); 850 } 851 spin_unlock_irqrestore(&mz->lru_lock, flags); 852} 853 854/* 855 * make mem_cgroup's charge to be 0 if there is no task. 856 * This enables deleting this mem_cgroup. 857 */ 858static int mem_cgroup_force_empty(struct mem_cgroup *mem) 859{ 860 int ret = -EBUSY; 861 int node, zid; 862 863 if (mem_cgroup_subsys.disabled) 864 return 0; 865 866 css_get(&mem->css); 867 /* 868 * page reclaim code (kswapd etc..) will move pages between 869 * active_list <-> inactive_list while we don't take a lock. 870 * So, we have to do loop here until all lists are empty. 871 */ 872 while (mem->res.usage > 0) { 873 if (atomic_read(&mem->css.cgroup->count) > 0) 874 goto out; 875 for_each_node_state(node, N_POSSIBLE) 876 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 877 struct mem_cgroup_per_zone *mz; 878 mz = mem_cgroup_zoneinfo(mem, node, zid); 879 /* drop all page_cgroup in active_list */ 880 mem_cgroup_force_empty_list(mem, mz, 1); 881 /* drop all page_cgroup in inactive_list */ 882 mem_cgroup_force_empty_list(mem, mz, 0); 883 } 884 } 885 ret = 0; 886out: 887 css_put(&mem->css); 888 return ret; 889} 890 891static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 892{ 893 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, 894 cft->private); 895} 896 897static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 898 const char *buffer) 899{ 900 return res_counter_write(&mem_cgroup_from_cont(cont)->res, 901 cft->private, buffer, 902 res_counter_memparse_write_strategy); 903} 904 905static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 906{ 907 struct mem_cgroup *mem; 908 909 mem = mem_cgroup_from_cont(cont); 910 switch (event) { 911 case RES_MAX_USAGE: 912 res_counter_reset_max(&mem->res); 913 break; 914 case RES_FAILCNT: 915 res_counter_reset_failcnt(&mem->res); 916 break; 917 } 918 return 0; 919} 920 921static int mem_force_empty_write(struct cgroup *cont, unsigned int event) 922{ 923 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont)); 924} 925 926static const struct mem_cgroup_stat_desc { 927 const char *msg; 928 u64 unit; 929} mem_cgroup_stat_desc[] = { 930 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, }, 931 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, 932 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, }, 933 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, }, 934}; 935 936static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 937 struct cgroup_map_cb *cb) 938{ 939 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 940 struct mem_cgroup_stat *stat = &mem_cont->stat; 941 int i; 942 943 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) { 944 s64 val; 945 946 val = mem_cgroup_read_stat(stat, i); 947 val *= mem_cgroup_stat_desc[i].unit; 948 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val); 949 } 950 /* showing # of active pages */ 951 { 952 unsigned long active, inactive; 953 954 inactive = mem_cgroup_get_all_zonestat(mem_cont, 955 MEM_CGROUP_ZSTAT_INACTIVE); 956 active = mem_cgroup_get_all_zonestat(mem_cont, 957 MEM_CGROUP_ZSTAT_ACTIVE); 958 cb->fill(cb, "active", (active) * PAGE_SIZE); 959 cb->fill(cb, "inactive", (inactive) * PAGE_SIZE); 960 } 961 return 0; 962} 963 964static struct cftype mem_cgroup_files[] = { 965 { 966 .name = "usage_in_bytes", 967 .private = RES_USAGE, 968 .read_u64 = mem_cgroup_read, 969 }, 970 { 971 .name = "max_usage_in_bytes", 972 .private = RES_MAX_USAGE, 973 .trigger = mem_cgroup_reset, 974 .read_u64 = mem_cgroup_read, 975 }, 976 { 977 .name = "limit_in_bytes", 978 .private = RES_LIMIT, 979 .write_string = mem_cgroup_write, 980 .read_u64 = mem_cgroup_read, 981 }, 982 { 983 .name = "failcnt", 984 .private = RES_FAILCNT, 985 .trigger = mem_cgroup_reset, 986 .read_u64 = mem_cgroup_read, 987 }, 988 { 989 .name = "force_empty", 990 .trigger = mem_force_empty_write, 991 }, 992 { 993 .name = "stat", 994 .read_map = mem_control_stat_show, 995 }, 996}; 997 998static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 999{ 1000 struct mem_cgroup_per_node *pn; 1001 struct mem_cgroup_per_zone *mz; 1002 int zone, tmp = node; 1003 /* 1004 * This routine is called against possible nodes. 1005 * But it's BUG to call kmalloc() against offline node. 1006 * 1007 * TODO: this routine can waste much memory for nodes which will 1008 * never be onlined. It's better to use memory hotplug callback 1009 * function. 1010 */ 1011 if (!node_state(node, N_NORMAL_MEMORY)) 1012 tmp = -1; 1013 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 1014 if (!pn) 1015 return 1; 1016 1017 mem->info.nodeinfo[node] = pn; 1018 memset(pn, 0, sizeof(*pn)); 1019 1020 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 1021 mz = &pn->zoneinfo[zone]; 1022 INIT_LIST_HEAD(&mz->active_list); 1023 INIT_LIST_HEAD(&mz->inactive_list); 1024 spin_lock_init(&mz->lru_lock); 1025 } 1026 return 0; 1027} 1028 1029static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 1030{ 1031 kfree(mem->info.nodeinfo[node]); 1032} 1033 1034static struct mem_cgroup *mem_cgroup_alloc(void) 1035{ 1036 struct mem_cgroup *mem; 1037 1038 if (sizeof(*mem) < PAGE_SIZE) 1039 mem = kmalloc(sizeof(*mem), GFP_KERNEL); 1040 else 1041 mem = vmalloc(sizeof(*mem)); 1042 1043 if (mem) 1044 memset(mem, 0, sizeof(*mem)); 1045 return mem; 1046} 1047 1048static void mem_cgroup_free(struct mem_cgroup *mem) 1049{ 1050 if (sizeof(*mem) < PAGE_SIZE) 1051 kfree(mem); 1052 else 1053 vfree(mem); 1054} 1055 1056 1057static struct cgroup_subsys_state * 1058mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 1059{ 1060 struct mem_cgroup *mem; 1061 int node; 1062 1063 if (unlikely((cont->parent) == NULL)) { 1064 mem = &init_mem_cgroup; 1065 page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC); 1066 } else { 1067 mem = mem_cgroup_alloc(); 1068 if (!mem) 1069 return ERR_PTR(-ENOMEM); 1070 } 1071 1072 res_counter_init(&mem->res); 1073 1074 for_each_node_state(node, N_POSSIBLE) 1075 if (alloc_mem_cgroup_per_zone_info(mem, node)) 1076 goto free_out; 1077 1078 return &mem->css; 1079free_out: 1080 for_each_node_state(node, N_POSSIBLE) 1081 free_mem_cgroup_per_zone_info(mem, node); 1082 if (cont->parent != NULL) 1083 mem_cgroup_free(mem); 1084 return ERR_PTR(-ENOMEM); 1085} 1086 1087static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 1088 struct cgroup *cont) 1089{ 1090 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 1091 mem_cgroup_force_empty(mem); 1092} 1093 1094static void mem_cgroup_destroy(struct cgroup_subsys *ss, 1095 struct cgroup *cont) 1096{ 1097 int node; 1098 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 1099 1100 for_each_node_state(node, N_POSSIBLE) 1101 free_mem_cgroup_per_zone_info(mem, node); 1102 1103 mem_cgroup_free(mem_cgroup_from_cont(cont)); 1104} 1105 1106static int mem_cgroup_populate(struct cgroup_subsys *ss, 1107 struct cgroup *cont) 1108{ 1109 if (mem_cgroup_subsys.disabled) 1110 return 0; 1111 return cgroup_add_files(cont, ss, mem_cgroup_files, 1112 ARRAY_SIZE(mem_cgroup_files)); 1113} 1114 1115static void mem_cgroup_move_task(struct cgroup_subsys *ss, 1116 struct cgroup *cont, 1117 struct cgroup *old_cont, 1118 struct task_struct *p) 1119{ 1120 struct mm_struct *mm; 1121 struct mem_cgroup *mem, *old_mem; 1122 1123 if (mem_cgroup_subsys.disabled) 1124 return; 1125 1126 mm = get_task_mm(p); 1127 if (mm == NULL) 1128 return; 1129 1130 mem = mem_cgroup_from_cont(cont); 1131 old_mem = mem_cgroup_from_cont(old_cont); 1132 1133 if (mem == old_mem) 1134 goto out; 1135 1136 /* 1137 * Only thread group leaders are allowed to migrate, the mm_struct is 1138 * in effect owned by the leader 1139 */ 1140 if (!thread_group_leader(p)) 1141 goto out; 1142 1143out: 1144 mmput(mm); 1145} 1146 1147struct cgroup_subsys mem_cgroup_subsys = { 1148 .name = "memory", 1149 .subsys_id = mem_cgroup_subsys_id, 1150 .create = mem_cgroup_create, 1151 .pre_destroy = mem_cgroup_pre_destroy, 1152 .destroy = mem_cgroup_destroy, 1153 .populate = mem_cgroup_populate, 1154 .attach = mem_cgroup_move_task, 1155 .early_init = 0, 1156}; 1157