memcontrol.c revision 4be4489feae6da890765cc1bdc1af5e4f8c4b75f
1/* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 */ 23 24#include <linux/res_counter.h> 25#include <linux/memcontrol.h> 26#include <linux/cgroup.h> 27#include <linux/mm.h> 28#include <linux/hugetlb.h> 29#include <linux/pagemap.h> 30#include <linux/smp.h> 31#include <linux/page-flags.h> 32#include <linux/backing-dev.h> 33#include <linux/bit_spinlock.h> 34#include <linux/rcupdate.h> 35#include <linux/limits.h> 36#include <linux/mutex.h> 37#include <linux/rbtree.h> 38#include <linux/slab.h> 39#include <linux/swap.h> 40#include <linux/swapops.h> 41#include <linux/spinlock.h> 42#include <linux/eventfd.h> 43#include <linux/sort.h> 44#include <linux/fs.h> 45#include <linux/seq_file.h> 46#include <linux/vmalloc.h> 47#include <linux/mm_inline.h> 48#include <linux/page_cgroup.h> 49#include <linux/cpu.h> 50#include <linux/oom.h> 51#include "internal.h" 52 53#include <asm/uaccess.h> 54 55#include <trace/events/vmscan.h> 56 57struct cgroup_subsys mem_cgroup_subsys __read_mostly; 58#define MEM_CGROUP_RECLAIM_RETRIES 5 59struct mem_cgroup *root_mem_cgroup __read_mostly; 60 61#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 62/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 63int do_swap_account __read_mostly; 64 65/* for remember boot option*/ 66#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED 67static int really_do_swap_account __initdata = 1; 68#else 69static int really_do_swap_account __initdata = 0; 70#endif 71 72#else 73#define do_swap_account (0) 74#endif 75 76 77/* 78 * Statistics for memory cgroup. 79 */ 80enum mem_cgroup_stat_index { 81 /* 82 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 83 */ 84 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 85 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 86 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 87 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 88 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ 89 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ 90 MEM_CGROUP_STAT_NSTATS, 91}; 92 93enum mem_cgroup_events_index { 94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ 97 MEM_CGROUP_EVENTS_NSTATS, 98}; 99/* 100 * Per memcg event counter is incremented at every pagein/pageout. With THP, 101 * it will be incremated by the number of pages. This counter is used for 102 * for trigger some periodic events. This is straightforward and better 103 * than using jiffies etc. to handle periodic memcg event. 104 */ 105enum mem_cgroup_events_target { 106 MEM_CGROUP_TARGET_THRESH, 107 MEM_CGROUP_TARGET_SOFTLIMIT, 108 MEM_CGROUP_NTARGETS, 109}; 110#define THRESHOLDS_EVENTS_TARGET (128) 111#define SOFTLIMIT_EVENTS_TARGET (1024) 112 113struct mem_cgroup_stat_cpu { 114 long count[MEM_CGROUP_STAT_NSTATS]; 115 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 116 unsigned long targets[MEM_CGROUP_NTARGETS]; 117}; 118 119/* 120 * per-zone information in memory controller. 121 */ 122struct mem_cgroup_per_zone { 123 /* 124 * spin_lock to protect the per cgroup LRU 125 */ 126 struct list_head lists[NR_LRU_LISTS]; 127 unsigned long count[NR_LRU_LISTS]; 128 129 struct zone_reclaim_stat reclaim_stat; 130 struct rb_node tree_node; /* RB tree node */ 131 unsigned long long usage_in_excess;/* Set to the value by which */ 132 /* the soft limit is exceeded*/ 133 bool on_tree; 134 struct mem_cgroup *mem; /* Back pointer, we cannot */ 135 /* use container_of */ 136}; 137/* Macro for accessing counter */ 138#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 139 140struct mem_cgroup_per_node { 141 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 142}; 143 144struct mem_cgroup_lru_info { 145 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; 146}; 147 148/* 149 * Cgroups above their limits are maintained in a RB-Tree, independent of 150 * their hierarchy representation 151 */ 152 153struct mem_cgroup_tree_per_zone { 154 struct rb_root rb_root; 155 spinlock_t lock; 156}; 157 158struct mem_cgroup_tree_per_node { 159 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 160}; 161 162struct mem_cgroup_tree { 163 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 164}; 165 166static struct mem_cgroup_tree soft_limit_tree __read_mostly; 167 168struct mem_cgroup_threshold { 169 struct eventfd_ctx *eventfd; 170 u64 threshold; 171}; 172 173/* For threshold */ 174struct mem_cgroup_threshold_ary { 175 /* An array index points to threshold just below usage. */ 176 int current_threshold; 177 /* Size of entries[] */ 178 unsigned int size; 179 /* Array of thresholds */ 180 struct mem_cgroup_threshold entries[0]; 181}; 182 183struct mem_cgroup_thresholds { 184 /* Primary thresholds array */ 185 struct mem_cgroup_threshold_ary *primary; 186 /* 187 * Spare threshold array. 188 * This is needed to make mem_cgroup_unregister_event() "never fail". 189 * It must be able to store at least primary->size - 1 entries. 190 */ 191 struct mem_cgroup_threshold_ary *spare; 192}; 193 194/* for OOM */ 195struct mem_cgroup_eventfd_list { 196 struct list_head list; 197 struct eventfd_ctx *eventfd; 198}; 199 200static void mem_cgroup_threshold(struct mem_cgroup *mem); 201static void mem_cgroup_oom_notify(struct mem_cgroup *mem); 202 203/* 204 * The memory controller data structure. The memory controller controls both 205 * page cache and RSS per cgroup. We would eventually like to provide 206 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 207 * to help the administrator determine what knobs to tune. 208 * 209 * TODO: Add a water mark for the memory controller. Reclaim will begin when 210 * we hit the water mark. May be even add a low water mark, such that 211 * no reclaim occurs from a cgroup at it's low water mark, this is 212 * a feature that will be implemented much later in the future. 213 */ 214struct mem_cgroup { 215 struct cgroup_subsys_state css; 216 /* 217 * the counter to account for memory usage 218 */ 219 struct res_counter res; 220 /* 221 * the counter to account for mem+swap usage. 222 */ 223 struct res_counter memsw; 224 /* 225 * Per cgroup active and inactive list, similar to the 226 * per zone LRU lists. 227 */ 228 struct mem_cgroup_lru_info info; 229 /* 230 * While reclaiming in a hierarchy, we cache the last child we 231 * reclaimed from. 232 */ 233 int last_scanned_child; 234 /* 235 * Should the accounting and control be hierarchical, per subtree? 236 */ 237 bool use_hierarchy; 238 atomic_t oom_lock; 239 atomic_t refcnt; 240 241 unsigned int swappiness; 242 /* OOM-Killer disable */ 243 int oom_kill_disable; 244 245 /* set when res.limit == memsw.limit */ 246 bool memsw_is_minimum; 247 248 /* protect arrays of thresholds */ 249 struct mutex thresholds_lock; 250 251 /* thresholds for memory usage. RCU-protected */ 252 struct mem_cgroup_thresholds thresholds; 253 254 /* thresholds for mem+swap usage. RCU-protected */ 255 struct mem_cgroup_thresholds memsw_thresholds; 256 257 /* For oom notifier event fd */ 258 struct list_head oom_notify; 259 260 /* 261 * Should we move charges of a task when a task is moved into this 262 * mem_cgroup ? And what type of charges should we move ? 263 */ 264 unsigned long move_charge_at_immigrate; 265 /* 266 * percpu counter. 267 */ 268 struct mem_cgroup_stat_cpu *stat; 269 /* 270 * used when a cpu is offlined or other synchronizations 271 * See mem_cgroup_read_stat(). 272 */ 273 struct mem_cgroup_stat_cpu nocpu_base; 274 spinlock_t pcp_counter_lock; 275}; 276 277/* Stuffs for move charges at task migration. */ 278/* 279 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a 280 * left-shifted bitmap of these types. 281 */ 282enum move_type { 283 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 284 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 285 NR_MOVE_TYPE, 286}; 287 288/* "mc" and its members are protected by cgroup_mutex */ 289static struct move_charge_struct { 290 spinlock_t lock; /* for from, to */ 291 struct mem_cgroup *from; 292 struct mem_cgroup *to; 293 unsigned long precharge; 294 unsigned long moved_charge; 295 unsigned long moved_swap; 296 struct task_struct *moving_task; /* a task moving charges */ 297 wait_queue_head_t waitq; /* a waitq for other context */ 298} mc = { 299 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 300 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 301}; 302 303static bool move_anon(void) 304{ 305 return test_bit(MOVE_CHARGE_TYPE_ANON, 306 &mc.to->move_charge_at_immigrate); 307} 308 309static bool move_file(void) 310{ 311 return test_bit(MOVE_CHARGE_TYPE_FILE, 312 &mc.to->move_charge_at_immigrate); 313} 314 315/* 316 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 317 * limit reclaim to prevent infinite loops, if they ever occur. 318 */ 319#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) 320#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) 321 322enum charge_type { 323 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 324 MEM_CGROUP_CHARGE_TYPE_MAPPED, 325 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 326 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 327 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 328 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 329 NR_CHARGE_TYPE, 330}; 331 332/* for encoding cft->private value on file */ 333#define _MEM (0) 334#define _MEMSWAP (1) 335#define _OOM_TYPE (2) 336#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 337#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 338#define MEMFILE_ATTR(val) ((val) & 0xffff) 339/* Used for OOM nofiier */ 340#define OOM_CONTROL (0) 341 342/* 343 * Reclaim flags for mem_cgroup_hierarchical_reclaim 344 */ 345#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 346#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 347#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 348#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 349#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2 350#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT) 351 352static void mem_cgroup_get(struct mem_cgroup *mem); 353static void mem_cgroup_put(struct mem_cgroup *mem); 354static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 355static void drain_all_stock_async(void); 356 357static struct mem_cgroup_per_zone * 358mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 359{ 360 return &mem->info.nodeinfo[nid]->zoneinfo[zid]; 361} 362 363struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 364{ 365 return &mem->css; 366} 367 368static struct mem_cgroup_per_zone * 369page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page) 370{ 371 int nid = page_to_nid(page); 372 int zid = page_zonenum(page); 373 374 return mem_cgroup_zoneinfo(mem, nid, zid); 375} 376 377static struct mem_cgroup_tree_per_zone * 378soft_limit_tree_node_zone(int nid, int zid) 379{ 380 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 381} 382 383static struct mem_cgroup_tree_per_zone * 384soft_limit_tree_from_page(struct page *page) 385{ 386 int nid = page_to_nid(page); 387 int zid = page_zonenum(page); 388 389 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 390} 391 392static void 393__mem_cgroup_insert_exceeded(struct mem_cgroup *mem, 394 struct mem_cgroup_per_zone *mz, 395 struct mem_cgroup_tree_per_zone *mctz, 396 unsigned long long new_usage_in_excess) 397{ 398 struct rb_node **p = &mctz->rb_root.rb_node; 399 struct rb_node *parent = NULL; 400 struct mem_cgroup_per_zone *mz_node; 401 402 if (mz->on_tree) 403 return; 404 405 mz->usage_in_excess = new_usage_in_excess; 406 if (!mz->usage_in_excess) 407 return; 408 while (*p) { 409 parent = *p; 410 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 411 tree_node); 412 if (mz->usage_in_excess < mz_node->usage_in_excess) 413 p = &(*p)->rb_left; 414 /* 415 * We can't avoid mem cgroups that are over their soft 416 * limit by the same amount 417 */ 418 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 419 p = &(*p)->rb_right; 420 } 421 rb_link_node(&mz->tree_node, parent, p); 422 rb_insert_color(&mz->tree_node, &mctz->rb_root); 423 mz->on_tree = true; 424} 425 426static void 427__mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 428 struct mem_cgroup_per_zone *mz, 429 struct mem_cgroup_tree_per_zone *mctz) 430{ 431 if (!mz->on_tree) 432 return; 433 rb_erase(&mz->tree_node, &mctz->rb_root); 434 mz->on_tree = false; 435} 436 437static void 438mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 439 struct mem_cgroup_per_zone *mz, 440 struct mem_cgroup_tree_per_zone *mctz) 441{ 442 spin_lock(&mctz->lock); 443 __mem_cgroup_remove_exceeded(mem, mz, mctz); 444 spin_unlock(&mctz->lock); 445} 446 447 448static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) 449{ 450 unsigned long long excess; 451 struct mem_cgroup_per_zone *mz; 452 struct mem_cgroup_tree_per_zone *mctz; 453 int nid = page_to_nid(page); 454 int zid = page_zonenum(page); 455 mctz = soft_limit_tree_from_page(page); 456 457 /* 458 * Necessary to update all ancestors when hierarchy is used. 459 * because their event counter is not touched. 460 */ 461 for (; mem; mem = parent_mem_cgroup(mem)) { 462 mz = mem_cgroup_zoneinfo(mem, nid, zid); 463 excess = res_counter_soft_limit_excess(&mem->res); 464 /* 465 * We have to update the tree if mz is on RB-tree or 466 * mem is over its softlimit. 467 */ 468 if (excess || mz->on_tree) { 469 spin_lock(&mctz->lock); 470 /* if on-tree, remove it */ 471 if (mz->on_tree) 472 __mem_cgroup_remove_exceeded(mem, mz, mctz); 473 /* 474 * Insert again. mz->usage_in_excess will be updated. 475 * If excess is 0, no tree ops. 476 */ 477 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess); 478 spin_unlock(&mctz->lock); 479 } 480 } 481} 482 483static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) 484{ 485 int node, zone; 486 struct mem_cgroup_per_zone *mz; 487 struct mem_cgroup_tree_per_zone *mctz; 488 489 for_each_node_state(node, N_POSSIBLE) { 490 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 491 mz = mem_cgroup_zoneinfo(mem, node, zone); 492 mctz = soft_limit_tree_node_zone(node, zone); 493 mem_cgroup_remove_exceeded(mem, mz, mctz); 494 } 495 } 496} 497 498static struct mem_cgroup_per_zone * 499__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 500{ 501 struct rb_node *rightmost = NULL; 502 struct mem_cgroup_per_zone *mz; 503 504retry: 505 mz = NULL; 506 rightmost = rb_last(&mctz->rb_root); 507 if (!rightmost) 508 goto done; /* Nothing to reclaim from */ 509 510 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 511 /* 512 * Remove the node now but someone else can add it back, 513 * we will to add it back at the end of reclaim to its correct 514 * position in the tree. 515 */ 516 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 517 if (!res_counter_soft_limit_excess(&mz->mem->res) || 518 !css_tryget(&mz->mem->css)) 519 goto retry; 520done: 521 return mz; 522} 523 524static struct mem_cgroup_per_zone * 525mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 526{ 527 struct mem_cgroup_per_zone *mz; 528 529 spin_lock(&mctz->lock); 530 mz = __mem_cgroup_largest_soft_limit_node(mctz); 531 spin_unlock(&mctz->lock); 532 return mz; 533} 534 535/* 536 * Implementation Note: reading percpu statistics for memcg. 537 * 538 * Both of vmstat[] and percpu_counter has threshold and do periodic 539 * synchronization to implement "quick" read. There are trade-off between 540 * reading cost and precision of value. Then, we may have a chance to implement 541 * a periodic synchronizion of counter in memcg's counter. 542 * 543 * But this _read() function is used for user interface now. The user accounts 544 * memory usage by memory cgroup and he _always_ requires exact value because 545 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 546 * have to visit all online cpus and make sum. So, for now, unnecessary 547 * synchronization is not implemented. (just implemented for cpu hotplug) 548 * 549 * If there are kernel internal actions which can make use of some not-exact 550 * value, and reading all cpu value can be performance bottleneck in some 551 * common workload, threashold and synchonization as vmstat[] should be 552 * implemented. 553 */ 554static long mem_cgroup_read_stat(struct mem_cgroup *mem, 555 enum mem_cgroup_stat_index idx) 556{ 557 long val = 0; 558 int cpu; 559 560 get_online_cpus(); 561 for_each_online_cpu(cpu) 562 val += per_cpu(mem->stat->count[idx], cpu); 563#ifdef CONFIG_HOTPLUG_CPU 564 spin_lock(&mem->pcp_counter_lock); 565 val += mem->nocpu_base.count[idx]; 566 spin_unlock(&mem->pcp_counter_lock); 567#endif 568 put_online_cpus(); 569 return val; 570} 571 572static long mem_cgroup_local_usage(struct mem_cgroup *mem) 573{ 574 long ret; 575 576 ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); 577 ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); 578 return ret; 579} 580 581static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, 582 bool charge) 583{ 584 int val = (charge) ? 1 : -1; 585 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); 586} 587 588static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, 589 enum mem_cgroup_events_index idx) 590{ 591 unsigned long val = 0; 592 int cpu; 593 594 for_each_online_cpu(cpu) 595 val += per_cpu(mem->stat->events[idx], cpu); 596#ifdef CONFIG_HOTPLUG_CPU 597 spin_lock(&mem->pcp_counter_lock); 598 val += mem->nocpu_base.events[idx]; 599 spin_unlock(&mem->pcp_counter_lock); 600#endif 601 return val; 602} 603 604static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, 605 bool file, int nr_pages) 606{ 607 preempt_disable(); 608 609 if (file) 610 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); 611 else 612 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages); 613 614 /* pagein of a big page is an event. So, ignore page size */ 615 if (nr_pages > 0) 616 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 617 else { 618 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 619 nr_pages = -nr_pages; /* for event */ 620 } 621 622 __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); 623 624 preempt_enable(); 625} 626 627static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, 628 enum lru_list idx) 629{ 630 int nid, zid; 631 struct mem_cgroup_per_zone *mz; 632 u64 total = 0; 633 634 for_each_online_node(nid) 635 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 636 mz = mem_cgroup_zoneinfo(mem, nid, zid); 637 total += MEM_CGROUP_ZSTAT(mz, idx); 638 } 639 return total; 640} 641 642static bool __memcg_event_check(struct mem_cgroup *mem, int target) 643{ 644 unsigned long val, next; 645 646 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); 647 next = this_cpu_read(mem->stat->targets[target]); 648 /* from time_after() in jiffies.h */ 649 return ((long)next - (long)val < 0); 650} 651 652static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target) 653{ 654 unsigned long val, next; 655 656 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); 657 658 switch (target) { 659 case MEM_CGROUP_TARGET_THRESH: 660 next = val + THRESHOLDS_EVENTS_TARGET; 661 break; 662 case MEM_CGROUP_TARGET_SOFTLIMIT: 663 next = val + SOFTLIMIT_EVENTS_TARGET; 664 break; 665 default: 666 return; 667 } 668 669 this_cpu_write(mem->stat->targets[target], next); 670} 671 672/* 673 * Check events in order. 674 * 675 */ 676static void memcg_check_events(struct mem_cgroup *mem, struct page *page) 677{ 678 /* threshold event is triggered in finer grain than soft limit */ 679 if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) { 680 mem_cgroup_threshold(mem); 681 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH); 682 if (unlikely(__memcg_event_check(mem, 683 MEM_CGROUP_TARGET_SOFTLIMIT))){ 684 mem_cgroup_update_tree(mem, page); 685 __mem_cgroup_target_update(mem, 686 MEM_CGROUP_TARGET_SOFTLIMIT); 687 } 688 } 689} 690 691static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 692{ 693 return container_of(cgroup_subsys_state(cont, 694 mem_cgroup_subsys_id), struct mem_cgroup, 695 css); 696} 697 698struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 699{ 700 /* 701 * mm_update_next_owner() may clear mm->owner to NULL 702 * if it races with swapoff, page migration, etc. 703 * So this can be called with p == NULL. 704 */ 705 if (unlikely(!p)) 706 return NULL; 707 708 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 709 struct mem_cgroup, css); 710} 711 712static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 713{ 714 struct mem_cgroup *mem = NULL; 715 716 if (!mm) 717 return NULL; 718 /* 719 * Because we have no locks, mm->owner's may be being moved to other 720 * cgroup. We use css_tryget() here even if this looks 721 * pessimistic (rather than adding locks here). 722 */ 723 rcu_read_lock(); 724 do { 725 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 726 if (unlikely(!mem)) 727 break; 728 } while (!css_tryget(&mem->css)); 729 rcu_read_unlock(); 730 return mem; 731} 732 733/* The caller has to guarantee "mem" exists before calling this */ 734static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) 735{ 736 struct cgroup_subsys_state *css; 737 int found; 738 739 if (!mem) /* ROOT cgroup has the smallest ID */ 740 return root_mem_cgroup; /*css_put/get against root is ignored*/ 741 if (!mem->use_hierarchy) { 742 if (css_tryget(&mem->css)) 743 return mem; 744 return NULL; 745 } 746 rcu_read_lock(); 747 /* 748 * searching a memory cgroup which has the smallest ID under given 749 * ROOT cgroup. (ID >= 1) 750 */ 751 css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found); 752 if (css && css_tryget(css)) 753 mem = container_of(css, struct mem_cgroup, css); 754 else 755 mem = NULL; 756 rcu_read_unlock(); 757 return mem; 758} 759 760static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, 761 struct mem_cgroup *root, 762 bool cond) 763{ 764 int nextid = css_id(&iter->css) + 1; 765 int found; 766 int hierarchy_used; 767 struct cgroup_subsys_state *css; 768 769 hierarchy_used = iter->use_hierarchy; 770 771 css_put(&iter->css); 772 /* If no ROOT, walk all, ignore hierarchy */ 773 if (!cond || (root && !hierarchy_used)) 774 return NULL; 775 776 if (!root) 777 root = root_mem_cgroup; 778 779 do { 780 iter = NULL; 781 rcu_read_lock(); 782 783 css = css_get_next(&mem_cgroup_subsys, nextid, 784 &root->css, &found); 785 if (css && css_tryget(css)) 786 iter = container_of(css, struct mem_cgroup, css); 787 rcu_read_unlock(); 788 /* If css is NULL, no more cgroups will be found */ 789 nextid = found + 1; 790 } while (css && !iter); 791 792 return iter; 793} 794/* 795 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please 796 * be careful that "break" loop is not allowed. We have reference count. 797 * Instead of that modify "cond" to be false and "continue" to exit the loop. 798 */ 799#define for_each_mem_cgroup_tree_cond(iter, root, cond) \ 800 for (iter = mem_cgroup_start_loop(root);\ 801 iter != NULL;\ 802 iter = mem_cgroup_get_next(iter, root, cond)) 803 804#define for_each_mem_cgroup_tree(iter, root) \ 805 for_each_mem_cgroup_tree_cond(iter, root, true) 806 807#define for_each_mem_cgroup_all(iter) \ 808 for_each_mem_cgroup_tree_cond(iter, NULL, true) 809 810 811static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) 812{ 813 return (mem == root_mem_cgroup); 814} 815 816/* 817 * Following LRU functions are allowed to be used without PCG_LOCK. 818 * Operations are called by routine of global LRU independently from memcg. 819 * What we have to take care of here is validness of pc->mem_cgroup. 820 * 821 * Changes to pc->mem_cgroup happens when 822 * 1. charge 823 * 2. moving account 824 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 825 * It is added to LRU before charge. 826 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 827 * When moving account, the page is not on LRU. It's isolated. 828 */ 829 830void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) 831{ 832 struct page_cgroup *pc; 833 struct mem_cgroup_per_zone *mz; 834 835 if (mem_cgroup_disabled()) 836 return; 837 pc = lookup_page_cgroup(page); 838 /* can happen while we handle swapcache. */ 839 if (!TestClearPageCgroupAcctLRU(pc)) 840 return; 841 VM_BUG_ON(!pc->mem_cgroup); 842 /* 843 * We don't check PCG_USED bit. It's cleared when the "page" is finally 844 * removed from global LRU. 845 */ 846 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 847 /* huge page split is done under lru_lock. so, we have no races. */ 848 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); 849 if (mem_cgroup_is_root(pc->mem_cgroup)) 850 return; 851 VM_BUG_ON(list_empty(&pc->lru)); 852 list_del_init(&pc->lru); 853} 854 855void mem_cgroup_del_lru(struct page *page) 856{ 857 mem_cgroup_del_lru_list(page, page_lru(page)); 858} 859 860/* 861 * Writeback is about to end against a page which has been marked for immediate 862 * reclaim. If it still appears to be reclaimable, move it to the tail of the 863 * inactive list. 864 */ 865void mem_cgroup_rotate_reclaimable_page(struct page *page) 866{ 867 struct mem_cgroup_per_zone *mz; 868 struct page_cgroup *pc; 869 enum lru_list lru = page_lru(page); 870 871 if (mem_cgroup_disabled()) 872 return; 873 874 pc = lookup_page_cgroup(page); 875 /* unused or root page is not rotated. */ 876 if (!PageCgroupUsed(pc)) 877 return; 878 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 879 smp_rmb(); 880 if (mem_cgroup_is_root(pc->mem_cgroup)) 881 return; 882 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 883 list_move_tail(&pc->lru, &mz->lists[lru]); 884} 885 886void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 887{ 888 struct mem_cgroup_per_zone *mz; 889 struct page_cgroup *pc; 890 891 if (mem_cgroup_disabled()) 892 return; 893 894 pc = lookup_page_cgroup(page); 895 /* unused or root page is not rotated. */ 896 if (!PageCgroupUsed(pc)) 897 return; 898 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 899 smp_rmb(); 900 if (mem_cgroup_is_root(pc->mem_cgroup)) 901 return; 902 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 903 list_move(&pc->lru, &mz->lists[lru]); 904} 905 906void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) 907{ 908 struct page_cgroup *pc; 909 struct mem_cgroup_per_zone *mz; 910 911 if (mem_cgroup_disabled()) 912 return; 913 pc = lookup_page_cgroup(page); 914 VM_BUG_ON(PageCgroupAcctLRU(pc)); 915 if (!PageCgroupUsed(pc)) 916 return; 917 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 918 smp_rmb(); 919 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 920 /* huge page split is done under lru_lock. so, we have no races. */ 921 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 922 SetPageCgroupAcctLRU(pc); 923 if (mem_cgroup_is_root(pc->mem_cgroup)) 924 return; 925 list_add(&pc->lru, &mz->lists[lru]); 926} 927 928/* 929 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to 930 * lru because the page may.be reused after it's fully uncharged (because of 931 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge 932 * it again. This function is only used to charge SwapCache. It's done under 933 * lock_page and expected that zone->lru_lock is never held. 934 */ 935static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page) 936{ 937 unsigned long flags; 938 struct zone *zone = page_zone(page); 939 struct page_cgroup *pc = lookup_page_cgroup(page); 940 941 spin_lock_irqsave(&zone->lru_lock, flags); 942 /* 943 * Forget old LRU when this page_cgroup is *not* used. This Used bit 944 * is guarded by lock_page() because the page is SwapCache. 945 */ 946 if (!PageCgroupUsed(pc)) 947 mem_cgroup_del_lru_list(page, page_lru(page)); 948 spin_unlock_irqrestore(&zone->lru_lock, flags); 949} 950 951static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page) 952{ 953 unsigned long flags; 954 struct zone *zone = page_zone(page); 955 struct page_cgroup *pc = lookup_page_cgroup(page); 956 957 spin_lock_irqsave(&zone->lru_lock, flags); 958 /* link when the page is linked to LRU but page_cgroup isn't */ 959 if (PageLRU(page) && !PageCgroupAcctLRU(pc)) 960 mem_cgroup_add_lru_list(page, page_lru(page)); 961 spin_unlock_irqrestore(&zone->lru_lock, flags); 962} 963 964 965void mem_cgroup_move_lists(struct page *page, 966 enum lru_list from, enum lru_list to) 967{ 968 if (mem_cgroup_disabled()) 969 return; 970 mem_cgroup_del_lru_list(page, from); 971 mem_cgroup_add_lru_list(page, to); 972} 973 974int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 975{ 976 int ret; 977 struct mem_cgroup *curr = NULL; 978 struct task_struct *p; 979 980 p = find_lock_task_mm(task); 981 if (!p) 982 return 0; 983 curr = try_get_mem_cgroup_from_mm(p->mm); 984 task_unlock(p); 985 if (!curr) 986 return 0; 987 /* 988 * We should check use_hierarchy of "mem" not "curr". Because checking 989 * use_hierarchy of "curr" here make this function true if hierarchy is 990 * enabled in "curr" and "curr" is a child of "mem" in *cgroup* 991 * hierarchy(even if use_hierarchy is disabled in "mem"). 992 */ 993 if (mem->use_hierarchy) 994 ret = css_is_ancestor(&curr->css, &mem->css); 995 else 996 ret = (curr == mem); 997 css_put(&curr->css); 998 return ret; 999} 1000 1001static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages) 1002{ 1003 unsigned long active; 1004 unsigned long inactive; 1005 unsigned long gb; 1006 unsigned long inactive_ratio; 1007 1008 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON); 1009 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON); 1010 1011 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1012 if (gb) 1013 inactive_ratio = int_sqrt(10 * gb); 1014 else 1015 inactive_ratio = 1; 1016 1017 if (present_pages) { 1018 present_pages[0] = inactive; 1019 present_pages[1] = active; 1020 } 1021 1022 return inactive_ratio; 1023} 1024 1025int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 1026{ 1027 unsigned long active; 1028 unsigned long inactive; 1029 unsigned long present_pages[2]; 1030 unsigned long inactive_ratio; 1031 1032 inactive_ratio = calc_inactive_ratio(memcg, present_pages); 1033 1034 inactive = present_pages[0]; 1035 active = present_pages[1]; 1036 1037 if (inactive * inactive_ratio < active) 1038 return 1; 1039 1040 return 0; 1041} 1042 1043int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 1044{ 1045 unsigned long active; 1046 unsigned long inactive; 1047 1048 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE); 1049 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE); 1050 1051 return (active > inactive); 1052} 1053 1054unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 1055 struct zone *zone, 1056 enum lru_list lru) 1057{ 1058 int nid = zone_to_nid(zone); 1059 int zid = zone_idx(zone); 1060 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); 1061 1062 return MEM_CGROUP_ZSTAT(mz, lru); 1063} 1064 1065struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 1066 struct zone *zone) 1067{ 1068 int nid = zone_to_nid(zone); 1069 int zid = zone_idx(zone); 1070 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); 1071 1072 return &mz->reclaim_stat; 1073} 1074 1075struct zone_reclaim_stat * 1076mem_cgroup_get_reclaim_stat_from_page(struct page *page) 1077{ 1078 struct page_cgroup *pc; 1079 struct mem_cgroup_per_zone *mz; 1080 1081 if (mem_cgroup_disabled()) 1082 return NULL; 1083 1084 pc = lookup_page_cgroup(page); 1085 if (!PageCgroupUsed(pc)) 1086 return NULL; 1087 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1088 smp_rmb(); 1089 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1090 return &mz->reclaim_stat; 1091} 1092 1093unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 1094 struct list_head *dst, 1095 unsigned long *scanned, int order, 1096 int mode, struct zone *z, 1097 struct mem_cgroup *mem_cont, 1098 int active, int file) 1099{ 1100 unsigned long nr_taken = 0; 1101 struct page *page; 1102 unsigned long scan; 1103 LIST_HEAD(pc_list); 1104 struct list_head *src; 1105 struct page_cgroup *pc, *tmp; 1106 int nid = zone_to_nid(z); 1107 int zid = zone_idx(z); 1108 struct mem_cgroup_per_zone *mz; 1109 int lru = LRU_FILE * file + active; 1110 int ret; 1111 1112 BUG_ON(!mem_cont); 1113 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 1114 src = &mz->lists[lru]; 1115 1116 scan = 0; 1117 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 1118 if (scan >= nr_to_scan) 1119 break; 1120 1121 if (unlikely(!PageCgroupUsed(pc))) 1122 continue; 1123 1124 page = lookup_cgroup_page(pc); 1125 1126 if (unlikely(!PageLRU(page))) 1127 continue; 1128 1129 scan++; 1130 ret = __isolate_lru_page(page, mode, file); 1131 switch (ret) { 1132 case 0: 1133 list_move(&page->lru, dst); 1134 mem_cgroup_del_lru(page); 1135 nr_taken += hpage_nr_pages(page); 1136 break; 1137 case -EBUSY: 1138 /* we don't affect global LRU but rotate in our LRU */ 1139 mem_cgroup_rotate_lru_list(page, page_lru(page)); 1140 break; 1141 default: 1142 break; 1143 } 1144 } 1145 1146 *scanned = scan; 1147 1148 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken, 1149 0, 0, 0, mode); 1150 1151 return nr_taken; 1152} 1153 1154#define mem_cgroup_from_res_counter(counter, member) \ 1155 container_of(counter, struct mem_cgroup, member) 1156 1157/** 1158 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1159 * @mem: the memory cgroup 1160 * 1161 * Returns the maximum amount of memory @mem can be charged with, in 1162 * pages. 1163 */ 1164static unsigned long mem_cgroup_margin(struct mem_cgroup *mem) 1165{ 1166 unsigned long long margin; 1167 1168 margin = res_counter_margin(&mem->res); 1169 if (do_swap_account) 1170 margin = min(margin, res_counter_margin(&mem->memsw)); 1171 return margin >> PAGE_SHIFT; 1172} 1173 1174static unsigned int get_swappiness(struct mem_cgroup *memcg) 1175{ 1176 struct cgroup *cgrp = memcg->css.cgroup; 1177 1178 /* root ? */ 1179 if (cgrp->parent == NULL) 1180 return vm_swappiness; 1181 1182 return memcg->swappiness; 1183} 1184 1185static void mem_cgroup_start_move(struct mem_cgroup *mem) 1186{ 1187 int cpu; 1188 1189 get_online_cpus(); 1190 spin_lock(&mem->pcp_counter_lock); 1191 for_each_online_cpu(cpu) 1192 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; 1193 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; 1194 spin_unlock(&mem->pcp_counter_lock); 1195 put_online_cpus(); 1196 1197 synchronize_rcu(); 1198} 1199 1200static void mem_cgroup_end_move(struct mem_cgroup *mem) 1201{ 1202 int cpu; 1203 1204 if (!mem) 1205 return; 1206 get_online_cpus(); 1207 spin_lock(&mem->pcp_counter_lock); 1208 for_each_online_cpu(cpu) 1209 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; 1210 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; 1211 spin_unlock(&mem->pcp_counter_lock); 1212 put_online_cpus(); 1213} 1214/* 1215 * 2 routines for checking "mem" is under move_account() or not. 1216 * 1217 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used 1218 * for avoiding race in accounting. If true, 1219 * pc->mem_cgroup may be overwritten. 1220 * 1221 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1222 * under hierarchy of moving cgroups. This is for 1223 * waiting at hith-memory prressure caused by "move". 1224 */ 1225 1226static bool mem_cgroup_stealed(struct mem_cgroup *mem) 1227{ 1228 VM_BUG_ON(!rcu_read_lock_held()); 1229 return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0; 1230} 1231 1232static bool mem_cgroup_under_move(struct mem_cgroup *mem) 1233{ 1234 struct mem_cgroup *from; 1235 struct mem_cgroup *to; 1236 bool ret = false; 1237 /* 1238 * Unlike task_move routines, we access mc.to, mc.from not under 1239 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1240 */ 1241 spin_lock(&mc.lock); 1242 from = mc.from; 1243 to = mc.to; 1244 if (!from) 1245 goto unlock; 1246 if (from == mem || to == mem 1247 || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css)) 1248 || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css))) 1249 ret = true; 1250unlock: 1251 spin_unlock(&mc.lock); 1252 return ret; 1253} 1254 1255static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem) 1256{ 1257 if (mc.moving_task && current != mc.moving_task) { 1258 if (mem_cgroup_under_move(mem)) { 1259 DEFINE_WAIT(wait); 1260 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1261 /* moving charge context might have finished. */ 1262 if (mc.moving_task) 1263 schedule(); 1264 finish_wait(&mc.waitq, &wait); 1265 return true; 1266 } 1267 } 1268 return false; 1269} 1270 1271/** 1272 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. 1273 * @memcg: The memory cgroup that went over limit 1274 * @p: Task that is going to be killed 1275 * 1276 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1277 * enabled 1278 */ 1279void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1280{ 1281 struct cgroup *task_cgrp; 1282 struct cgroup *mem_cgrp; 1283 /* 1284 * Need a buffer in BSS, can't rely on allocations. The code relies 1285 * on the assumption that OOM is serialized for memory controller. 1286 * If this assumption is broken, revisit this code. 1287 */ 1288 static char memcg_name[PATH_MAX]; 1289 int ret; 1290 1291 if (!memcg || !p) 1292 return; 1293 1294 1295 rcu_read_lock(); 1296 1297 mem_cgrp = memcg->css.cgroup; 1298 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1299 1300 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1301 if (ret < 0) { 1302 /* 1303 * Unfortunately, we are unable to convert to a useful name 1304 * But we'll still print out the usage information 1305 */ 1306 rcu_read_unlock(); 1307 goto done; 1308 } 1309 rcu_read_unlock(); 1310 1311 printk(KERN_INFO "Task in %s killed", memcg_name); 1312 1313 rcu_read_lock(); 1314 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1315 if (ret < 0) { 1316 rcu_read_unlock(); 1317 goto done; 1318 } 1319 rcu_read_unlock(); 1320 1321 /* 1322 * Continues from above, so we don't need an KERN_ level 1323 */ 1324 printk(KERN_CONT " as a result of limit of %s\n", memcg_name); 1325done: 1326 1327 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n", 1328 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1329 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1330 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1331 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, " 1332 "failcnt %llu\n", 1333 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1334 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1335 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1336} 1337 1338/* 1339 * This function returns the number of memcg under hierarchy tree. Returns 1340 * 1(self count) if no children. 1341 */ 1342static int mem_cgroup_count_children(struct mem_cgroup *mem) 1343{ 1344 int num = 0; 1345 struct mem_cgroup *iter; 1346 1347 for_each_mem_cgroup_tree(iter, mem) 1348 num++; 1349 return num; 1350} 1351 1352/* 1353 * Return the memory (and swap, if configured) limit for a memcg. 1354 */ 1355u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1356{ 1357 u64 limit; 1358 u64 memsw; 1359 1360 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1361 limit += total_swap_pages << PAGE_SHIFT; 1362 1363 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1364 /* 1365 * If memsw is finite and limits the amount of swap space available 1366 * to this memcg, return that limit. 1367 */ 1368 return min(limit, memsw); 1369} 1370 1371/* 1372 * Visit the first child (need not be the first child as per the ordering 1373 * of the cgroup list, since we track last_scanned_child) of @mem and use 1374 * that to reclaim free pages from. 1375 */ 1376static struct mem_cgroup * 1377mem_cgroup_select_victim(struct mem_cgroup *root_mem) 1378{ 1379 struct mem_cgroup *ret = NULL; 1380 struct cgroup_subsys_state *css; 1381 int nextid, found; 1382 1383 if (!root_mem->use_hierarchy) { 1384 css_get(&root_mem->css); 1385 ret = root_mem; 1386 } 1387 1388 while (!ret) { 1389 rcu_read_lock(); 1390 nextid = root_mem->last_scanned_child + 1; 1391 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css, 1392 &found); 1393 if (css && css_tryget(css)) 1394 ret = container_of(css, struct mem_cgroup, css); 1395 1396 rcu_read_unlock(); 1397 /* Updates scanning parameter */ 1398 if (!css) { 1399 /* this means start scan from ID:1 */ 1400 root_mem->last_scanned_child = 0; 1401 } else 1402 root_mem->last_scanned_child = found; 1403 } 1404 1405 return ret; 1406} 1407 1408/* 1409 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1410 * we reclaimed from, so that we don't end up penalizing one child extensively 1411 * based on its position in the children list. 1412 * 1413 * root_mem is the original ancestor that we've been reclaim from. 1414 * 1415 * We give up and return to the caller when we visit root_mem twice. 1416 * (other groups can be removed while we're walking....) 1417 * 1418 * If shrink==true, for avoiding to free too much, this returns immedieately. 1419 */ 1420static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, 1421 struct zone *zone, 1422 gfp_t gfp_mask, 1423 unsigned long reclaim_options) 1424{ 1425 struct mem_cgroup *victim; 1426 int ret, total = 0; 1427 int loop = 0; 1428 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; 1429 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1430 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1431 unsigned long excess; 1432 1433 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1434 1435 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1436 if (root_mem->memsw_is_minimum) 1437 noswap = true; 1438 1439 while (1) { 1440 victim = mem_cgroup_select_victim(root_mem); 1441 if (victim == root_mem) { 1442 loop++; 1443 if (loop >= 1) 1444 drain_all_stock_async(); 1445 if (loop >= 2) { 1446 /* 1447 * If we have not been able to reclaim 1448 * anything, it might because there are 1449 * no reclaimable pages under this hierarchy 1450 */ 1451 if (!check_soft || !total) { 1452 css_put(&victim->css); 1453 break; 1454 } 1455 /* 1456 * We want to do more targetted reclaim. 1457 * excess >> 2 is not to excessive so as to 1458 * reclaim too much, nor too less that we keep 1459 * coming back to reclaim from this cgroup 1460 */ 1461 if (total >= (excess >> 2) || 1462 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) { 1463 css_put(&victim->css); 1464 break; 1465 } 1466 } 1467 } 1468 if (!mem_cgroup_local_usage(victim)) { 1469 /* this cgroup's local usage == 0 */ 1470 css_put(&victim->css); 1471 continue; 1472 } 1473 /* we use swappiness of local cgroup */ 1474 if (check_soft) 1475 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1476 noswap, get_swappiness(victim), zone); 1477 else 1478 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1479 noswap, get_swappiness(victim)); 1480 css_put(&victim->css); 1481 /* 1482 * At shrinking usage, we can't check we should stop here or 1483 * reclaim more. It's depends on callers. last_scanned_child 1484 * will work enough for keeping fairness under tree. 1485 */ 1486 if (shrink) 1487 return ret; 1488 total += ret; 1489 if (check_soft) { 1490 if (!res_counter_soft_limit_excess(&root_mem->res)) 1491 return total; 1492 } else if (mem_cgroup_margin(root_mem)) 1493 return 1 + total; 1494 } 1495 return total; 1496} 1497 1498/* 1499 * Check OOM-Killer is already running under our hierarchy. 1500 * If someone is running, return false. 1501 */ 1502static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) 1503{ 1504 int x, lock_count = 0; 1505 struct mem_cgroup *iter; 1506 1507 for_each_mem_cgroup_tree(iter, mem) { 1508 x = atomic_inc_return(&iter->oom_lock); 1509 lock_count = max(x, lock_count); 1510 } 1511 1512 if (lock_count == 1) 1513 return true; 1514 return false; 1515} 1516 1517static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) 1518{ 1519 struct mem_cgroup *iter; 1520 1521 /* 1522 * When a new child is created while the hierarchy is under oom, 1523 * mem_cgroup_oom_lock() may not be called. We have to use 1524 * atomic_add_unless() here. 1525 */ 1526 for_each_mem_cgroup_tree(iter, mem) 1527 atomic_add_unless(&iter->oom_lock, -1, 0); 1528 return 0; 1529} 1530 1531 1532static DEFINE_MUTEX(memcg_oom_mutex); 1533static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1534 1535struct oom_wait_info { 1536 struct mem_cgroup *mem; 1537 wait_queue_t wait; 1538}; 1539 1540static int memcg_oom_wake_function(wait_queue_t *wait, 1541 unsigned mode, int sync, void *arg) 1542{ 1543 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg; 1544 struct oom_wait_info *oom_wait_info; 1545 1546 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1547 1548 if (oom_wait_info->mem == wake_mem) 1549 goto wakeup; 1550 /* if no hierarchy, no match */ 1551 if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy) 1552 return 0; 1553 /* 1554 * Both of oom_wait_info->mem and wake_mem are stable under us. 1555 * Then we can use css_is_ancestor without taking care of RCU. 1556 */ 1557 if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) && 1558 !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css)) 1559 return 0; 1560 1561wakeup: 1562 return autoremove_wake_function(wait, mode, sync, arg); 1563} 1564 1565static void memcg_wakeup_oom(struct mem_cgroup *mem) 1566{ 1567 /* for filtering, pass "mem" as argument. */ 1568 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem); 1569} 1570 1571static void memcg_oom_recover(struct mem_cgroup *mem) 1572{ 1573 if (mem && atomic_read(&mem->oom_lock)) 1574 memcg_wakeup_oom(mem); 1575} 1576 1577/* 1578 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1579 */ 1580bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) 1581{ 1582 struct oom_wait_info owait; 1583 bool locked, need_to_kill; 1584 1585 owait.mem = mem; 1586 owait.wait.flags = 0; 1587 owait.wait.func = memcg_oom_wake_function; 1588 owait.wait.private = current; 1589 INIT_LIST_HEAD(&owait.wait.task_list); 1590 need_to_kill = true; 1591 /* At first, try to OOM lock hierarchy under mem.*/ 1592 mutex_lock(&memcg_oom_mutex); 1593 locked = mem_cgroup_oom_lock(mem); 1594 /* 1595 * Even if signal_pending(), we can't quit charge() loop without 1596 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 1597 * under OOM is always welcomed, use TASK_KILLABLE here. 1598 */ 1599 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1600 if (!locked || mem->oom_kill_disable) 1601 need_to_kill = false; 1602 if (locked) 1603 mem_cgroup_oom_notify(mem); 1604 mutex_unlock(&memcg_oom_mutex); 1605 1606 if (need_to_kill) { 1607 finish_wait(&memcg_oom_waitq, &owait.wait); 1608 mem_cgroup_out_of_memory(mem, mask); 1609 } else { 1610 schedule(); 1611 finish_wait(&memcg_oom_waitq, &owait.wait); 1612 } 1613 mutex_lock(&memcg_oom_mutex); 1614 mem_cgroup_oom_unlock(mem); 1615 memcg_wakeup_oom(mem); 1616 mutex_unlock(&memcg_oom_mutex); 1617 1618 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 1619 return false; 1620 /* Give chance to dying process */ 1621 schedule_timeout(1); 1622 return true; 1623} 1624 1625/* 1626 * Currently used to update mapped file statistics, but the routine can be 1627 * generalized to update other statistics as well. 1628 * 1629 * Notes: Race condition 1630 * 1631 * We usually use page_cgroup_lock() for accessing page_cgroup member but 1632 * it tends to be costly. But considering some conditions, we doesn't need 1633 * to do so _always_. 1634 * 1635 * Considering "charge", lock_page_cgroup() is not required because all 1636 * file-stat operations happen after a page is attached to radix-tree. There 1637 * are no race with "charge". 1638 * 1639 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 1640 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 1641 * if there are race with "uncharge". Statistics itself is properly handled 1642 * by flags. 1643 * 1644 * Considering "move", this is an only case we see a race. To make the race 1645 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are 1646 * possibility of race condition. If there is, we take a lock. 1647 */ 1648 1649void mem_cgroup_update_page_stat(struct page *page, 1650 enum mem_cgroup_page_stat_item idx, int val) 1651{ 1652 struct mem_cgroup *mem; 1653 struct page_cgroup *pc = lookup_page_cgroup(page); 1654 bool need_unlock = false; 1655 unsigned long uninitialized_var(flags); 1656 1657 if (unlikely(!pc)) 1658 return; 1659 1660 rcu_read_lock(); 1661 mem = pc->mem_cgroup; 1662 if (unlikely(!mem || !PageCgroupUsed(pc))) 1663 goto out; 1664 /* pc->mem_cgroup is unstable ? */ 1665 if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) { 1666 /* take a lock against to access pc->mem_cgroup */ 1667 move_lock_page_cgroup(pc, &flags); 1668 need_unlock = true; 1669 mem = pc->mem_cgroup; 1670 if (!mem || !PageCgroupUsed(pc)) 1671 goto out; 1672 } 1673 1674 switch (idx) { 1675 case MEMCG_NR_FILE_MAPPED: 1676 if (val > 0) 1677 SetPageCgroupFileMapped(pc); 1678 else if (!page_mapped(page)) 1679 ClearPageCgroupFileMapped(pc); 1680 idx = MEM_CGROUP_STAT_FILE_MAPPED; 1681 break; 1682 default: 1683 BUG(); 1684 } 1685 1686 this_cpu_add(mem->stat->count[idx], val); 1687 1688out: 1689 if (unlikely(need_unlock)) 1690 move_unlock_page_cgroup(pc, &flags); 1691 rcu_read_unlock(); 1692 return; 1693} 1694EXPORT_SYMBOL(mem_cgroup_update_page_stat); 1695 1696/* 1697 * size of first charge trial. "32" comes from vmscan.c's magic value. 1698 * TODO: maybe necessary to use big numbers in big irons. 1699 */ 1700#define CHARGE_BATCH 32U 1701struct memcg_stock_pcp { 1702 struct mem_cgroup *cached; /* this never be root cgroup */ 1703 unsigned int nr_pages; 1704 struct work_struct work; 1705}; 1706static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1707static atomic_t memcg_drain_count; 1708 1709/* 1710 * Try to consume stocked charge on this cpu. If success, one page is consumed 1711 * from local stock and true is returned. If the stock is 0 or charges from a 1712 * cgroup which is not current target, returns false. This stock will be 1713 * refilled. 1714 */ 1715static bool consume_stock(struct mem_cgroup *mem) 1716{ 1717 struct memcg_stock_pcp *stock; 1718 bool ret = true; 1719 1720 stock = &get_cpu_var(memcg_stock); 1721 if (mem == stock->cached && stock->nr_pages) 1722 stock->nr_pages--; 1723 else /* need to call res_counter_charge */ 1724 ret = false; 1725 put_cpu_var(memcg_stock); 1726 return ret; 1727} 1728 1729/* 1730 * Returns stocks cached in percpu to res_counter and reset cached information. 1731 */ 1732static void drain_stock(struct memcg_stock_pcp *stock) 1733{ 1734 struct mem_cgroup *old = stock->cached; 1735 1736 if (stock->nr_pages) { 1737 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 1738 1739 res_counter_uncharge(&old->res, bytes); 1740 if (do_swap_account) 1741 res_counter_uncharge(&old->memsw, bytes); 1742 stock->nr_pages = 0; 1743 } 1744 stock->cached = NULL; 1745} 1746 1747/* 1748 * This must be called under preempt disabled or must be called by 1749 * a thread which is pinned to local cpu. 1750 */ 1751static void drain_local_stock(struct work_struct *dummy) 1752{ 1753 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 1754 drain_stock(stock); 1755} 1756 1757/* 1758 * Cache charges(val) which is from res_counter, to local per_cpu area. 1759 * This will be consumed by consume_stock() function, later. 1760 */ 1761static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages) 1762{ 1763 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 1764 1765 if (stock->cached != mem) { /* reset if necessary */ 1766 drain_stock(stock); 1767 stock->cached = mem; 1768 } 1769 stock->nr_pages += nr_pages; 1770 put_cpu_var(memcg_stock); 1771} 1772 1773/* 1774 * Tries to drain stocked charges in other cpus. This function is asynchronous 1775 * and just put a work per cpu for draining localy on each cpu. Caller can 1776 * expects some charges will be back to res_counter later but cannot wait for 1777 * it. 1778 */ 1779static void drain_all_stock_async(void) 1780{ 1781 int cpu; 1782 /* This function is for scheduling "drain" in asynchronous way. 1783 * The result of "drain" is not directly handled by callers. Then, 1784 * if someone is calling drain, we don't have to call drain more. 1785 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if 1786 * there is a race. We just do loose check here. 1787 */ 1788 if (atomic_read(&memcg_drain_count)) 1789 return; 1790 /* Notify other cpus that system-wide "drain" is running */ 1791 atomic_inc(&memcg_drain_count); 1792 get_online_cpus(); 1793 for_each_online_cpu(cpu) { 1794 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1795 schedule_work_on(cpu, &stock->work); 1796 } 1797 put_online_cpus(); 1798 atomic_dec(&memcg_drain_count); 1799 /* We don't wait for flush_work */ 1800} 1801 1802/* This is a synchronous drain interface. */ 1803static void drain_all_stock_sync(void) 1804{ 1805 /* called when force_empty is called */ 1806 atomic_inc(&memcg_drain_count); 1807 schedule_on_each_cpu(drain_local_stock); 1808 atomic_dec(&memcg_drain_count); 1809} 1810 1811/* 1812 * This function drains percpu counter value from DEAD cpu and 1813 * move it to local cpu. Note that this function can be preempted. 1814 */ 1815static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) 1816{ 1817 int i; 1818 1819 spin_lock(&mem->pcp_counter_lock); 1820 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { 1821 long x = per_cpu(mem->stat->count[i], cpu); 1822 1823 per_cpu(mem->stat->count[i], cpu) = 0; 1824 mem->nocpu_base.count[i] += x; 1825 } 1826 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 1827 unsigned long x = per_cpu(mem->stat->events[i], cpu); 1828 1829 per_cpu(mem->stat->events[i], cpu) = 0; 1830 mem->nocpu_base.events[i] += x; 1831 } 1832 /* need to clear ON_MOVE value, works as a kind of lock. */ 1833 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; 1834 spin_unlock(&mem->pcp_counter_lock); 1835} 1836 1837static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) 1838{ 1839 int idx = MEM_CGROUP_ON_MOVE; 1840 1841 spin_lock(&mem->pcp_counter_lock); 1842 per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; 1843 spin_unlock(&mem->pcp_counter_lock); 1844} 1845 1846static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 1847 unsigned long action, 1848 void *hcpu) 1849{ 1850 int cpu = (unsigned long)hcpu; 1851 struct memcg_stock_pcp *stock; 1852 struct mem_cgroup *iter; 1853 1854 if ((action == CPU_ONLINE)) { 1855 for_each_mem_cgroup_all(iter) 1856 synchronize_mem_cgroup_on_move(iter, cpu); 1857 return NOTIFY_OK; 1858 } 1859 1860 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) 1861 return NOTIFY_OK; 1862 1863 for_each_mem_cgroup_all(iter) 1864 mem_cgroup_drain_pcp_counter(iter, cpu); 1865 1866 stock = &per_cpu(memcg_stock, cpu); 1867 drain_stock(stock); 1868 return NOTIFY_OK; 1869} 1870 1871 1872/* See __mem_cgroup_try_charge() for details */ 1873enum { 1874 CHARGE_OK, /* success */ 1875 CHARGE_RETRY, /* need to retry but retry is not bad */ 1876 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 1877 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 1878 CHARGE_OOM_DIE, /* the current is killed because of OOM */ 1879}; 1880 1881static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, 1882 unsigned int nr_pages, bool oom_check) 1883{ 1884 unsigned long csize = nr_pages * PAGE_SIZE; 1885 struct mem_cgroup *mem_over_limit; 1886 struct res_counter *fail_res; 1887 unsigned long flags = 0; 1888 int ret; 1889 1890 ret = res_counter_charge(&mem->res, csize, &fail_res); 1891 1892 if (likely(!ret)) { 1893 if (!do_swap_account) 1894 return CHARGE_OK; 1895 ret = res_counter_charge(&mem->memsw, csize, &fail_res); 1896 if (likely(!ret)) 1897 return CHARGE_OK; 1898 1899 res_counter_uncharge(&mem->res, csize); 1900 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 1901 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 1902 } else 1903 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 1904 /* 1905 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch 1906 * of regular pages (CHARGE_BATCH), or a single regular page (1). 1907 * 1908 * Never reclaim on behalf of optional batching, retry with a 1909 * single page instead. 1910 */ 1911 if (nr_pages == CHARGE_BATCH) 1912 return CHARGE_RETRY; 1913 1914 if (!(gfp_mask & __GFP_WAIT)) 1915 return CHARGE_WOULDBLOCK; 1916 1917 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, 1918 gfp_mask, flags); 1919 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 1920 return CHARGE_RETRY; 1921 /* 1922 * Even though the limit is exceeded at this point, reclaim 1923 * may have been able to free some pages. Retry the charge 1924 * before killing the task. 1925 * 1926 * Only for regular pages, though: huge pages are rather 1927 * unlikely to succeed so close to the limit, and we fall back 1928 * to regular pages anyway in case of failure. 1929 */ 1930 if (nr_pages == 1 && ret) 1931 return CHARGE_RETRY; 1932 1933 /* 1934 * At task move, charge accounts can be doubly counted. So, it's 1935 * better to wait until the end of task_move if something is going on. 1936 */ 1937 if (mem_cgroup_wait_acct_move(mem_over_limit)) 1938 return CHARGE_RETRY; 1939 1940 /* If we don't need to call oom-killer at el, return immediately */ 1941 if (!oom_check) 1942 return CHARGE_NOMEM; 1943 /* check OOM */ 1944 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) 1945 return CHARGE_OOM_DIE; 1946 1947 return CHARGE_RETRY; 1948} 1949 1950/* 1951 * Unlike exported interface, "oom" parameter is added. if oom==true, 1952 * oom-killer can be invoked. 1953 */ 1954static int __mem_cgroup_try_charge(struct mm_struct *mm, 1955 gfp_t gfp_mask, 1956 unsigned int nr_pages, 1957 struct mem_cgroup **memcg, 1958 bool oom) 1959{ 1960 unsigned int batch = max(CHARGE_BATCH, nr_pages); 1961 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 1962 struct mem_cgroup *mem = NULL; 1963 int ret; 1964 1965 /* 1966 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 1967 * in system level. So, allow to go ahead dying process in addition to 1968 * MEMDIE process. 1969 */ 1970 if (unlikely(test_thread_flag(TIF_MEMDIE) 1971 || fatal_signal_pending(current))) 1972 goto bypass; 1973 1974 /* 1975 * We always charge the cgroup the mm_struct belongs to. 1976 * The mm_struct's mem_cgroup changes on task migration if the 1977 * thread group leader migrates. It's possible that mm is not 1978 * set, if so charge the init_mm (happens for pagecache usage). 1979 */ 1980 if (!*memcg && !mm) 1981 goto bypass; 1982again: 1983 if (*memcg) { /* css should be a valid one */ 1984 mem = *memcg; 1985 VM_BUG_ON(css_is_removed(&mem->css)); 1986 if (mem_cgroup_is_root(mem)) 1987 goto done; 1988 if (nr_pages == 1 && consume_stock(mem)) 1989 goto done; 1990 css_get(&mem->css); 1991 } else { 1992 struct task_struct *p; 1993 1994 rcu_read_lock(); 1995 p = rcu_dereference(mm->owner); 1996 /* 1997 * Because we don't have task_lock(), "p" can exit. 1998 * In that case, "mem" can point to root or p can be NULL with 1999 * race with swapoff. Then, we have small risk of mis-accouning. 2000 * But such kind of mis-account by race always happens because 2001 * we don't have cgroup_mutex(). It's overkill and we allo that 2002 * small race, here. 2003 * (*) swapoff at el will charge against mm-struct not against 2004 * task-struct. So, mm->owner can be NULL. 2005 */ 2006 mem = mem_cgroup_from_task(p); 2007 if (!mem || mem_cgroup_is_root(mem)) { 2008 rcu_read_unlock(); 2009 goto done; 2010 } 2011 if (nr_pages == 1 && consume_stock(mem)) { 2012 /* 2013 * It seems dagerous to access memcg without css_get(). 2014 * But considering how consume_stok works, it's not 2015 * necessary. If consume_stock success, some charges 2016 * from this memcg are cached on this cpu. So, we 2017 * don't need to call css_get()/css_tryget() before 2018 * calling consume_stock(). 2019 */ 2020 rcu_read_unlock(); 2021 goto done; 2022 } 2023 /* after here, we may be blocked. we need to get refcnt */ 2024 if (!css_tryget(&mem->css)) { 2025 rcu_read_unlock(); 2026 goto again; 2027 } 2028 rcu_read_unlock(); 2029 } 2030 2031 do { 2032 bool oom_check; 2033 2034 /* If killed, bypass charge */ 2035 if (fatal_signal_pending(current)) { 2036 css_put(&mem->css); 2037 goto bypass; 2038 } 2039 2040 oom_check = false; 2041 if (oom && !nr_oom_retries) { 2042 oom_check = true; 2043 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2044 } 2045 2046 ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check); 2047 switch (ret) { 2048 case CHARGE_OK: 2049 break; 2050 case CHARGE_RETRY: /* not in OOM situation but retry */ 2051 batch = nr_pages; 2052 css_put(&mem->css); 2053 mem = NULL; 2054 goto again; 2055 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2056 css_put(&mem->css); 2057 goto nomem; 2058 case CHARGE_NOMEM: /* OOM routine works */ 2059 if (!oom) { 2060 css_put(&mem->css); 2061 goto nomem; 2062 } 2063 /* If oom, we never return -ENOMEM */ 2064 nr_oom_retries--; 2065 break; 2066 case CHARGE_OOM_DIE: /* Killed by OOM Killer */ 2067 css_put(&mem->css); 2068 goto bypass; 2069 } 2070 } while (ret != CHARGE_OK); 2071 2072 if (batch > nr_pages) 2073 refill_stock(mem, batch - nr_pages); 2074 css_put(&mem->css); 2075done: 2076 *memcg = mem; 2077 return 0; 2078nomem: 2079 *memcg = NULL; 2080 return -ENOMEM; 2081bypass: 2082 *memcg = NULL; 2083 return 0; 2084} 2085 2086/* 2087 * Somemtimes we have to undo a charge we got by try_charge(). 2088 * This function is for that and do uncharge, put css's refcnt. 2089 * gotten by try_charge(). 2090 */ 2091static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, 2092 unsigned int nr_pages) 2093{ 2094 if (!mem_cgroup_is_root(mem)) { 2095 unsigned long bytes = nr_pages * PAGE_SIZE; 2096 2097 res_counter_uncharge(&mem->res, bytes); 2098 if (do_swap_account) 2099 res_counter_uncharge(&mem->memsw, bytes); 2100 } 2101} 2102 2103/* 2104 * A helper function to get mem_cgroup from ID. must be called under 2105 * rcu_read_lock(). The caller must check css_is_removed() or some if 2106 * it's concern. (dropping refcnt from swap can be called against removed 2107 * memcg.) 2108 */ 2109static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2110{ 2111 struct cgroup_subsys_state *css; 2112 2113 /* ID 0 is unused ID */ 2114 if (!id) 2115 return NULL; 2116 css = css_lookup(&mem_cgroup_subsys, id); 2117 if (!css) 2118 return NULL; 2119 return container_of(css, struct mem_cgroup, css); 2120} 2121 2122struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2123{ 2124 struct mem_cgroup *mem = NULL; 2125 struct page_cgroup *pc; 2126 unsigned short id; 2127 swp_entry_t ent; 2128 2129 VM_BUG_ON(!PageLocked(page)); 2130 2131 pc = lookup_page_cgroup(page); 2132 lock_page_cgroup(pc); 2133 if (PageCgroupUsed(pc)) { 2134 mem = pc->mem_cgroup; 2135 if (mem && !css_tryget(&mem->css)) 2136 mem = NULL; 2137 } else if (PageSwapCache(page)) { 2138 ent.val = page_private(page); 2139 id = lookup_swap_cgroup(ent); 2140 rcu_read_lock(); 2141 mem = mem_cgroup_lookup(id); 2142 if (mem && !css_tryget(&mem->css)) 2143 mem = NULL; 2144 rcu_read_unlock(); 2145 } 2146 unlock_page_cgroup(pc); 2147 return mem; 2148} 2149 2150static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, 2151 struct page *page, 2152 unsigned int nr_pages, 2153 struct page_cgroup *pc, 2154 enum charge_type ctype) 2155{ 2156 lock_page_cgroup(pc); 2157 if (unlikely(PageCgroupUsed(pc))) { 2158 unlock_page_cgroup(pc); 2159 __mem_cgroup_cancel_charge(mem, nr_pages); 2160 return; 2161 } 2162 /* 2163 * we don't need page_cgroup_lock about tail pages, becase they are not 2164 * accessed by any other context at this point. 2165 */ 2166 pc->mem_cgroup = mem; 2167 /* 2168 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2169 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2170 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2171 * before USED bit, we need memory barrier here. 2172 * See mem_cgroup_add_lru_list(), etc. 2173 */ 2174 smp_wmb(); 2175 switch (ctype) { 2176 case MEM_CGROUP_CHARGE_TYPE_CACHE: 2177 case MEM_CGROUP_CHARGE_TYPE_SHMEM: 2178 SetPageCgroupCache(pc); 2179 SetPageCgroupUsed(pc); 2180 break; 2181 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2182 ClearPageCgroupCache(pc); 2183 SetPageCgroupUsed(pc); 2184 break; 2185 default: 2186 break; 2187 } 2188 2189 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages); 2190 unlock_page_cgroup(pc); 2191 /* 2192 * "charge_statistics" updated event counter. Then, check it. 2193 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2194 * if they exceeds softlimit. 2195 */ 2196 memcg_check_events(mem, page); 2197} 2198 2199#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2200 2201#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ 2202 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) 2203/* 2204 * Because tail pages are not marked as "used", set it. We're under 2205 * zone->lru_lock, 'splitting on pmd' and compund_lock. 2206 */ 2207void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) 2208{ 2209 struct page_cgroup *head_pc = lookup_page_cgroup(head); 2210 struct page_cgroup *tail_pc = lookup_page_cgroup(tail); 2211 unsigned long flags; 2212 2213 if (mem_cgroup_disabled()) 2214 return; 2215 /* 2216 * We have no races with charge/uncharge but will have races with 2217 * page state accounting. 2218 */ 2219 move_lock_page_cgroup(head_pc, &flags); 2220 2221 tail_pc->mem_cgroup = head_pc->mem_cgroup; 2222 smp_wmb(); /* see __commit_charge() */ 2223 if (PageCgroupAcctLRU(head_pc)) { 2224 enum lru_list lru; 2225 struct mem_cgroup_per_zone *mz; 2226 2227 /* 2228 * LRU flags cannot be copied because we need to add tail 2229 *.page to LRU by generic call and our hook will be called. 2230 * We hold lru_lock, then, reduce counter directly. 2231 */ 2232 lru = page_lru(head); 2233 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); 2234 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 2235 } 2236 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 2237 move_unlock_page_cgroup(head_pc, &flags); 2238} 2239#endif 2240 2241/** 2242 * mem_cgroup_move_account - move account of the page 2243 * @page: the page 2244 * @nr_pages: number of regular pages (>1 for huge pages) 2245 * @pc: page_cgroup of the page. 2246 * @from: mem_cgroup which the page is moved from. 2247 * @to: mem_cgroup which the page is moved to. @from != @to. 2248 * @uncharge: whether we should call uncharge and css_put against @from. 2249 * 2250 * The caller must confirm following. 2251 * - page is not on LRU (isolate_page() is useful.) 2252 * - compound_lock is held when nr_pages > 1 2253 * 2254 * This function doesn't do "charge" nor css_get to new cgroup. It should be 2255 * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is 2256 * true, this function does "uncharge" from old cgroup, but it doesn't if 2257 * @uncharge is false, so a caller should do "uncharge". 2258 */ 2259static int mem_cgroup_move_account(struct page *page, 2260 unsigned int nr_pages, 2261 struct page_cgroup *pc, 2262 struct mem_cgroup *from, 2263 struct mem_cgroup *to, 2264 bool uncharge) 2265{ 2266 unsigned long flags; 2267 int ret; 2268 2269 VM_BUG_ON(from == to); 2270 VM_BUG_ON(PageLRU(page)); 2271 /* 2272 * The page is isolated from LRU. So, collapse function 2273 * will not handle this page. But page splitting can happen. 2274 * Do this check under compound_page_lock(). The caller should 2275 * hold it. 2276 */ 2277 ret = -EBUSY; 2278 if (nr_pages > 1 && !PageTransHuge(page)) 2279 goto out; 2280 2281 lock_page_cgroup(pc); 2282 2283 ret = -EINVAL; 2284 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 2285 goto unlock; 2286 2287 move_lock_page_cgroup(pc, &flags); 2288 2289 if (PageCgroupFileMapped(pc)) { 2290 /* Update mapped_file data for mem_cgroup */ 2291 preempt_disable(); 2292 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2293 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2294 preempt_enable(); 2295 } 2296 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); 2297 if (uncharge) 2298 /* This is not "cancel", but cancel_charge does all we need. */ 2299 __mem_cgroup_cancel_charge(from, nr_pages); 2300 2301 /* caller should have done css_get */ 2302 pc->mem_cgroup = to; 2303 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); 2304 /* 2305 * We charges against "to" which may not have any tasks. Then, "to" 2306 * can be under rmdir(). But in current implementation, caller of 2307 * this function is just force_empty() and move charge, so it's 2308 * garanteed that "to" is never removed. So, we don't check rmdir 2309 * status here. 2310 */ 2311 move_unlock_page_cgroup(pc, &flags); 2312 ret = 0; 2313unlock: 2314 unlock_page_cgroup(pc); 2315 /* 2316 * check events 2317 */ 2318 memcg_check_events(to, page); 2319 memcg_check_events(from, page); 2320out: 2321 return ret; 2322} 2323 2324/* 2325 * move charges to its parent. 2326 */ 2327 2328static int mem_cgroup_move_parent(struct page *page, 2329 struct page_cgroup *pc, 2330 struct mem_cgroup *child, 2331 gfp_t gfp_mask) 2332{ 2333 struct cgroup *cg = child->css.cgroup; 2334 struct cgroup *pcg = cg->parent; 2335 struct mem_cgroup *parent; 2336 unsigned int nr_pages; 2337 unsigned long uninitialized_var(flags); 2338 int ret; 2339 2340 /* Is ROOT ? */ 2341 if (!pcg) 2342 return -EINVAL; 2343 2344 ret = -EBUSY; 2345 if (!get_page_unless_zero(page)) 2346 goto out; 2347 if (isolate_lru_page(page)) 2348 goto put; 2349 2350 nr_pages = hpage_nr_pages(page); 2351 2352 parent = mem_cgroup_from_cont(pcg); 2353 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); 2354 if (ret || !parent) 2355 goto put_back; 2356 2357 if (nr_pages > 1) 2358 flags = compound_lock_irqsave(page); 2359 2360 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); 2361 if (ret) 2362 __mem_cgroup_cancel_charge(parent, nr_pages); 2363 2364 if (nr_pages > 1) 2365 compound_unlock_irqrestore(page, flags); 2366put_back: 2367 putback_lru_page(page); 2368put: 2369 put_page(page); 2370out: 2371 return ret; 2372} 2373 2374/* 2375 * Charge the memory controller for page usage. 2376 * Return 2377 * 0 if the charge was successful 2378 * < 0 if the cgroup is over its limit 2379 */ 2380static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 2381 gfp_t gfp_mask, enum charge_type ctype) 2382{ 2383 struct mem_cgroup *mem = NULL; 2384 unsigned int nr_pages = 1; 2385 struct page_cgroup *pc; 2386 bool oom = true; 2387 int ret; 2388 2389 if (PageTransHuge(page)) { 2390 nr_pages <<= compound_order(page); 2391 VM_BUG_ON(!PageTransHuge(page)); 2392 /* 2393 * Never OOM-kill a process for a huge page. The 2394 * fault handler will fall back to regular pages. 2395 */ 2396 oom = false; 2397 } 2398 2399 pc = lookup_page_cgroup(page); 2400 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */ 2401 2402 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom); 2403 if (ret || !mem) 2404 return ret; 2405 2406 __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype); 2407 return 0; 2408} 2409 2410int mem_cgroup_newpage_charge(struct page *page, 2411 struct mm_struct *mm, gfp_t gfp_mask) 2412{ 2413 if (mem_cgroup_disabled()) 2414 return 0; 2415 /* 2416 * If already mapped, we don't have to account. 2417 * If page cache, page->mapping has address_space. 2418 * But page->mapping may have out-of-use anon_vma pointer, 2419 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping 2420 * is NULL. 2421 */ 2422 if (page_mapped(page) || (page->mapping && !PageAnon(page))) 2423 return 0; 2424 if (unlikely(!mm)) 2425 mm = &init_mm; 2426 return mem_cgroup_charge_common(page, mm, gfp_mask, 2427 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2428} 2429 2430static void 2431__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2432 enum charge_type ctype); 2433 2434int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2435 gfp_t gfp_mask) 2436{ 2437 int ret; 2438 2439 if (mem_cgroup_disabled()) 2440 return 0; 2441 if (PageCompound(page)) 2442 return 0; 2443 /* 2444 * Corner case handling. This is called from add_to_page_cache() 2445 * in usual. But some FS (shmem) precharges this page before calling it 2446 * and call add_to_page_cache() with GFP_NOWAIT. 2447 * 2448 * For GFP_NOWAIT case, the page may be pre-charged before calling 2449 * add_to_page_cache(). (See shmem.c) check it here and avoid to call 2450 * charge twice. (It works but has to pay a bit larger cost.) 2451 * And when the page is SwapCache, it should take swap information 2452 * into account. This is under lock_page() now. 2453 */ 2454 if (!(gfp_mask & __GFP_WAIT)) { 2455 struct page_cgroup *pc; 2456 2457 pc = lookup_page_cgroup(page); 2458 if (!pc) 2459 return 0; 2460 lock_page_cgroup(pc); 2461 if (PageCgroupUsed(pc)) { 2462 unlock_page_cgroup(pc); 2463 return 0; 2464 } 2465 unlock_page_cgroup(pc); 2466 } 2467 2468 if (unlikely(!mm)) 2469 mm = &init_mm; 2470 2471 if (page_is_file_cache(page)) 2472 return mem_cgroup_charge_common(page, mm, gfp_mask, 2473 MEM_CGROUP_CHARGE_TYPE_CACHE); 2474 2475 /* shmem */ 2476 if (PageSwapCache(page)) { 2477 struct mem_cgroup *mem; 2478 2479 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); 2480 if (!ret) 2481 __mem_cgroup_commit_charge_swapin(page, mem, 2482 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2483 } else 2484 ret = mem_cgroup_charge_common(page, mm, gfp_mask, 2485 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2486 2487 return ret; 2488} 2489 2490/* 2491 * While swap-in, try_charge -> commit or cancel, the page is locked. 2492 * And when try_charge() successfully returns, one refcnt to memcg without 2493 * struct page_cgroup is acquired. This refcnt will be consumed by 2494 * "commit()" or removed by "cancel()" 2495 */ 2496int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 2497 struct page *page, 2498 gfp_t mask, struct mem_cgroup **ptr) 2499{ 2500 struct mem_cgroup *mem; 2501 int ret; 2502 2503 *ptr = NULL; 2504 2505 if (mem_cgroup_disabled()) 2506 return 0; 2507 2508 if (!do_swap_account) 2509 goto charge_cur_mm; 2510 /* 2511 * A racing thread's fault, or swapoff, may have already updated 2512 * the pte, and even removed page from swap cache: in those cases 2513 * do_swap_page()'s pte_same() test will fail; but there's also a 2514 * KSM case which does need to charge the page. 2515 */ 2516 if (!PageSwapCache(page)) 2517 goto charge_cur_mm; 2518 mem = try_get_mem_cgroup_from_page(page); 2519 if (!mem) 2520 goto charge_cur_mm; 2521 *ptr = mem; 2522 ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true); 2523 css_put(&mem->css); 2524 return ret; 2525charge_cur_mm: 2526 if (unlikely(!mm)) 2527 mm = &init_mm; 2528 return __mem_cgroup_try_charge(mm, mask, 1, ptr, true); 2529} 2530 2531static void 2532__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2533 enum charge_type ctype) 2534{ 2535 struct page_cgroup *pc; 2536 2537 if (mem_cgroup_disabled()) 2538 return; 2539 if (!ptr) 2540 return; 2541 cgroup_exclude_rmdir(&ptr->css); 2542 pc = lookup_page_cgroup(page); 2543 mem_cgroup_lru_del_before_commit_swapcache(page); 2544 __mem_cgroup_commit_charge(ptr, page, 1, pc, ctype); 2545 mem_cgroup_lru_add_after_commit_swapcache(page); 2546 /* 2547 * Now swap is on-memory. This means this page may be 2548 * counted both as mem and swap....double count. 2549 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 2550 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 2551 * may call delete_from_swap_cache() before reach here. 2552 */ 2553 if (do_swap_account && PageSwapCache(page)) { 2554 swp_entry_t ent = {.val = page_private(page)}; 2555 unsigned short id; 2556 struct mem_cgroup *memcg; 2557 2558 id = swap_cgroup_record(ent, 0); 2559 rcu_read_lock(); 2560 memcg = mem_cgroup_lookup(id); 2561 if (memcg) { 2562 /* 2563 * This recorded memcg can be obsolete one. So, avoid 2564 * calling css_tryget 2565 */ 2566 if (!mem_cgroup_is_root(memcg)) 2567 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 2568 mem_cgroup_swap_statistics(memcg, false); 2569 mem_cgroup_put(memcg); 2570 } 2571 rcu_read_unlock(); 2572 } 2573 /* 2574 * At swapin, we may charge account against cgroup which has no tasks. 2575 * So, rmdir()->pre_destroy() can be called while we do this charge. 2576 * In that case, we need to call pre_destroy() again. check it here. 2577 */ 2578 cgroup_release_and_wakeup_rmdir(&ptr->css); 2579} 2580 2581void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) 2582{ 2583 __mem_cgroup_commit_charge_swapin(page, ptr, 2584 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2585} 2586 2587void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) 2588{ 2589 if (mem_cgroup_disabled()) 2590 return; 2591 if (!mem) 2592 return; 2593 __mem_cgroup_cancel_charge(mem, 1); 2594} 2595 2596static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, 2597 unsigned int nr_pages, 2598 const enum charge_type ctype) 2599{ 2600 struct memcg_batch_info *batch = NULL; 2601 bool uncharge_memsw = true; 2602 2603 /* If swapout, usage of swap doesn't decrease */ 2604 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2605 uncharge_memsw = false; 2606 2607 batch = ¤t->memcg_batch; 2608 /* 2609 * In usual, we do css_get() when we remember memcg pointer. 2610 * But in this case, we keep res->usage until end of a series of 2611 * uncharges. Then, it's ok to ignore memcg's refcnt. 2612 */ 2613 if (!batch->memcg) 2614 batch->memcg = mem; 2615 /* 2616 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 2617 * In those cases, all pages freed continously can be expected to be in 2618 * the same cgroup and we have chance to coalesce uncharges. 2619 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 2620 * because we want to do uncharge as soon as possible. 2621 */ 2622 2623 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 2624 goto direct_uncharge; 2625 2626 if (nr_pages > 1) 2627 goto direct_uncharge; 2628 2629 /* 2630 * In typical case, batch->memcg == mem. This means we can 2631 * merge a series of uncharges to an uncharge of res_counter. 2632 * If not, we uncharge res_counter ony by one. 2633 */ 2634 if (batch->memcg != mem) 2635 goto direct_uncharge; 2636 /* remember freed charge and uncharge it later */ 2637 batch->nr_pages++; 2638 if (uncharge_memsw) 2639 batch->memsw_nr_pages++; 2640 return; 2641direct_uncharge: 2642 res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE); 2643 if (uncharge_memsw) 2644 res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE); 2645 if (unlikely(batch->memcg != mem)) 2646 memcg_oom_recover(mem); 2647 return; 2648} 2649 2650/* 2651 * uncharge if !page_mapped(page) 2652 */ 2653static struct mem_cgroup * 2654__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 2655{ 2656 struct mem_cgroup *mem = NULL; 2657 unsigned int nr_pages = 1; 2658 struct page_cgroup *pc; 2659 2660 if (mem_cgroup_disabled()) 2661 return NULL; 2662 2663 if (PageSwapCache(page)) 2664 return NULL; 2665 2666 if (PageTransHuge(page)) { 2667 nr_pages <<= compound_order(page); 2668 VM_BUG_ON(!PageTransHuge(page)); 2669 } 2670 /* 2671 * Check if our page_cgroup is valid 2672 */ 2673 pc = lookup_page_cgroup(page); 2674 if (unlikely(!pc || !PageCgroupUsed(pc))) 2675 return NULL; 2676 2677 lock_page_cgroup(pc); 2678 2679 mem = pc->mem_cgroup; 2680 2681 if (!PageCgroupUsed(pc)) 2682 goto unlock_out; 2683 2684 switch (ctype) { 2685 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2686 case MEM_CGROUP_CHARGE_TYPE_DROP: 2687 /* See mem_cgroup_prepare_migration() */ 2688 if (page_mapped(page) || PageCgroupMigration(pc)) 2689 goto unlock_out; 2690 break; 2691 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 2692 if (!PageAnon(page)) { /* Shared memory */ 2693 if (page->mapping && !page_is_file_cache(page)) 2694 goto unlock_out; 2695 } else if (page_mapped(page)) /* Anon */ 2696 goto unlock_out; 2697 break; 2698 default: 2699 break; 2700 } 2701 2702 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages); 2703 2704 ClearPageCgroupUsed(pc); 2705 /* 2706 * pc->mem_cgroup is not cleared here. It will be accessed when it's 2707 * freed from LRU. This is safe because uncharged page is expected not 2708 * to be reused (freed soon). Exception is SwapCache, it's handled by 2709 * special functions. 2710 */ 2711 2712 unlock_page_cgroup(pc); 2713 /* 2714 * even after unlock, we have mem->res.usage here and this memcg 2715 * will never be freed. 2716 */ 2717 memcg_check_events(mem, page); 2718 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 2719 mem_cgroup_swap_statistics(mem, true); 2720 mem_cgroup_get(mem); 2721 } 2722 if (!mem_cgroup_is_root(mem)) 2723 mem_cgroup_do_uncharge(mem, nr_pages, ctype); 2724 2725 return mem; 2726 2727unlock_out: 2728 unlock_page_cgroup(pc); 2729 return NULL; 2730} 2731 2732void mem_cgroup_uncharge_page(struct page *page) 2733{ 2734 /* early check. */ 2735 if (page_mapped(page)) 2736 return; 2737 if (page->mapping && !PageAnon(page)) 2738 return; 2739 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); 2740} 2741 2742void mem_cgroup_uncharge_cache_page(struct page *page) 2743{ 2744 VM_BUG_ON(page_mapped(page)); 2745 VM_BUG_ON(page->mapping); 2746 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 2747} 2748 2749/* 2750 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 2751 * In that cases, pages are freed continuously and we can expect pages 2752 * are in the same memcg. All these calls itself limits the number of 2753 * pages freed at once, then uncharge_start/end() is called properly. 2754 * This may be called prural(2) times in a context, 2755 */ 2756 2757void mem_cgroup_uncharge_start(void) 2758{ 2759 current->memcg_batch.do_batch++; 2760 /* We can do nest. */ 2761 if (current->memcg_batch.do_batch == 1) { 2762 current->memcg_batch.memcg = NULL; 2763 current->memcg_batch.nr_pages = 0; 2764 current->memcg_batch.memsw_nr_pages = 0; 2765 } 2766} 2767 2768void mem_cgroup_uncharge_end(void) 2769{ 2770 struct memcg_batch_info *batch = ¤t->memcg_batch; 2771 2772 if (!batch->do_batch) 2773 return; 2774 2775 batch->do_batch--; 2776 if (batch->do_batch) /* If stacked, do nothing. */ 2777 return; 2778 2779 if (!batch->memcg) 2780 return; 2781 /* 2782 * This "batch->memcg" is valid without any css_get/put etc... 2783 * bacause we hide charges behind us. 2784 */ 2785 if (batch->nr_pages) 2786 res_counter_uncharge(&batch->memcg->res, 2787 batch->nr_pages * PAGE_SIZE); 2788 if (batch->memsw_nr_pages) 2789 res_counter_uncharge(&batch->memcg->memsw, 2790 batch->memsw_nr_pages * PAGE_SIZE); 2791 memcg_oom_recover(batch->memcg); 2792 /* forget this pointer (for sanity check) */ 2793 batch->memcg = NULL; 2794} 2795 2796#ifdef CONFIG_SWAP 2797/* 2798 * called after __delete_from_swap_cache() and drop "page" account. 2799 * memcg information is recorded to swap_cgroup of "ent" 2800 */ 2801void 2802mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 2803{ 2804 struct mem_cgroup *memcg; 2805 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 2806 2807 if (!swapout) /* this was a swap cache but the swap is unused ! */ 2808 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 2809 2810 memcg = __mem_cgroup_uncharge_common(page, ctype); 2811 2812 /* 2813 * record memcg information, if swapout && memcg != NULL, 2814 * mem_cgroup_get() was called in uncharge(). 2815 */ 2816 if (do_swap_account && swapout && memcg) 2817 swap_cgroup_record(ent, css_id(&memcg->css)); 2818} 2819#endif 2820 2821#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 2822/* 2823 * called from swap_entry_free(). remove record in swap_cgroup and 2824 * uncharge "memsw" account. 2825 */ 2826void mem_cgroup_uncharge_swap(swp_entry_t ent) 2827{ 2828 struct mem_cgroup *memcg; 2829 unsigned short id; 2830 2831 if (!do_swap_account) 2832 return; 2833 2834 id = swap_cgroup_record(ent, 0); 2835 rcu_read_lock(); 2836 memcg = mem_cgroup_lookup(id); 2837 if (memcg) { 2838 /* 2839 * We uncharge this because swap is freed. 2840 * This memcg can be obsolete one. We avoid calling css_tryget 2841 */ 2842 if (!mem_cgroup_is_root(memcg)) 2843 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 2844 mem_cgroup_swap_statistics(memcg, false); 2845 mem_cgroup_put(memcg); 2846 } 2847 rcu_read_unlock(); 2848} 2849 2850/** 2851 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2852 * @entry: swap entry to be moved 2853 * @from: mem_cgroup which the entry is moved from 2854 * @to: mem_cgroup which the entry is moved to 2855 * @need_fixup: whether we should fixup res_counters and refcounts. 2856 * 2857 * It succeeds only when the swap_cgroup's record for this entry is the same 2858 * as the mem_cgroup's id of @from. 2859 * 2860 * Returns 0 on success, -EINVAL on failure. 2861 * 2862 * The caller must have charged to @to, IOW, called res_counter_charge() about 2863 * both res and memsw, and called css_get(). 2864 */ 2865static int mem_cgroup_move_swap_account(swp_entry_t entry, 2866 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 2867{ 2868 unsigned short old_id, new_id; 2869 2870 old_id = css_id(&from->css); 2871 new_id = css_id(&to->css); 2872 2873 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2874 mem_cgroup_swap_statistics(from, false); 2875 mem_cgroup_swap_statistics(to, true); 2876 /* 2877 * This function is only called from task migration context now. 2878 * It postpones res_counter and refcount handling till the end 2879 * of task migration(mem_cgroup_clear_mc()) for performance 2880 * improvement. But we cannot postpone mem_cgroup_get(to) 2881 * because if the process that has been moved to @to does 2882 * swap-in, the refcount of @to might be decreased to 0. 2883 */ 2884 mem_cgroup_get(to); 2885 if (need_fixup) { 2886 if (!mem_cgroup_is_root(from)) 2887 res_counter_uncharge(&from->memsw, PAGE_SIZE); 2888 mem_cgroup_put(from); 2889 /* 2890 * we charged both to->res and to->memsw, so we should 2891 * uncharge to->res. 2892 */ 2893 if (!mem_cgroup_is_root(to)) 2894 res_counter_uncharge(&to->res, PAGE_SIZE); 2895 } 2896 return 0; 2897 } 2898 return -EINVAL; 2899} 2900#else 2901static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2902 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 2903{ 2904 return -EINVAL; 2905} 2906#endif 2907 2908/* 2909 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 2910 * page belongs to. 2911 */ 2912int mem_cgroup_prepare_migration(struct page *page, 2913 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) 2914{ 2915 struct mem_cgroup *mem = NULL; 2916 struct page_cgroup *pc; 2917 enum charge_type ctype; 2918 int ret = 0; 2919 2920 *ptr = NULL; 2921 2922 VM_BUG_ON(PageTransHuge(page)); 2923 if (mem_cgroup_disabled()) 2924 return 0; 2925 2926 pc = lookup_page_cgroup(page); 2927 lock_page_cgroup(pc); 2928 if (PageCgroupUsed(pc)) { 2929 mem = pc->mem_cgroup; 2930 css_get(&mem->css); 2931 /* 2932 * At migrating an anonymous page, its mapcount goes down 2933 * to 0 and uncharge() will be called. But, even if it's fully 2934 * unmapped, migration may fail and this page has to be 2935 * charged again. We set MIGRATION flag here and delay uncharge 2936 * until end_migration() is called 2937 * 2938 * Corner Case Thinking 2939 * A) 2940 * When the old page was mapped as Anon and it's unmap-and-freed 2941 * while migration was ongoing. 2942 * If unmap finds the old page, uncharge() of it will be delayed 2943 * until end_migration(). If unmap finds a new page, it's 2944 * uncharged when it make mapcount to be 1->0. If unmap code 2945 * finds swap_migration_entry, the new page will not be mapped 2946 * and end_migration() will find it(mapcount==0). 2947 * 2948 * B) 2949 * When the old page was mapped but migraion fails, the kernel 2950 * remaps it. A charge for it is kept by MIGRATION flag even 2951 * if mapcount goes down to 0. We can do remap successfully 2952 * without charging it again. 2953 * 2954 * C) 2955 * The "old" page is under lock_page() until the end of 2956 * migration, so, the old page itself will not be swapped-out. 2957 * If the new page is swapped out before end_migraton, our 2958 * hook to usual swap-out path will catch the event. 2959 */ 2960 if (PageAnon(page)) 2961 SetPageCgroupMigration(pc); 2962 } 2963 unlock_page_cgroup(pc); 2964 /* 2965 * If the page is not charged at this point, 2966 * we return here. 2967 */ 2968 if (!mem) 2969 return 0; 2970 2971 *ptr = mem; 2972 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false); 2973 css_put(&mem->css);/* drop extra refcnt */ 2974 if (ret || *ptr == NULL) { 2975 if (PageAnon(page)) { 2976 lock_page_cgroup(pc); 2977 ClearPageCgroupMigration(pc); 2978 unlock_page_cgroup(pc); 2979 /* 2980 * The old page may be fully unmapped while we kept it. 2981 */ 2982 mem_cgroup_uncharge_page(page); 2983 } 2984 return -ENOMEM; 2985 } 2986 /* 2987 * We charge new page before it's used/mapped. So, even if unlock_page() 2988 * is called before end_migration, we can catch all events on this new 2989 * page. In the case new page is migrated but not remapped, new page's 2990 * mapcount will be finally 0 and we call uncharge in end_migration(). 2991 */ 2992 pc = lookup_page_cgroup(newpage); 2993 if (PageAnon(page)) 2994 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 2995 else if (page_is_file_cache(page)) 2996 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 2997 else 2998 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 2999 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); 3000 return ret; 3001} 3002 3003/* remove redundant charge if migration failed*/ 3004void mem_cgroup_end_migration(struct mem_cgroup *mem, 3005 struct page *oldpage, struct page *newpage, bool migration_ok) 3006{ 3007 struct page *used, *unused; 3008 struct page_cgroup *pc; 3009 3010 if (!mem) 3011 return; 3012 /* blocks rmdir() */ 3013 cgroup_exclude_rmdir(&mem->css); 3014 if (!migration_ok) { 3015 used = oldpage; 3016 unused = newpage; 3017 } else { 3018 used = newpage; 3019 unused = oldpage; 3020 } 3021 /* 3022 * We disallowed uncharge of pages under migration because mapcount 3023 * of the page goes down to zero, temporarly. 3024 * Clear the flag and check the page should be charged. 3025 */ 3026 pc = lookup_page_cgroup(oldpage); 3027 lock_page_cgroup(pc); 3028 ClearPageCgroupMigration(pc); 3029 unlock_page_cgroup(pc); 3030 3031 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); 3032 3033 /* 3034 * If a page is a file cache, radix-tree replacement is very atomic 3035 * and we can skip this check. When it was an Anon page, its mapcount 3036 * goes down to 0. But because we added MIGRATION flage, it's not 3037 * uncharged yet. There are several case but page->mapcount check 3038 * and USED bit check in mem_cgroup_uncharge_page() will do enough 3039 * check. (see prepare_charge() also) 3040 */ 3041 if (PageAnon(used)) 3042 mem_cgroup_uncharge_page(used); 3043 /* 3044 * At migration, we may charge account against cgroup which has no 3045 * tasks. 3046 * So, rmdir()->pre_destroy() can be called while we do this charge. 3047 * In that case, we need to call pre_destroy() again. check it here. 3048 */ 3049 cgroup_release_and_wakeup_rmdir(&mem->css); 3050} 3051 3052/* 3053 * A call to try to shrink memory usage on charge failure at shmem's swapin. 3054 * Calling hierarchical_reclaim is not enough because we should update 3055 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM. 3056 * Moreover considering hierarchy, we should reclaim from the mem_over_limit, 3057 * not from the memcg which this page would be charged to. 3058 * try_charge_swapin does all of these works properly. 3059 */ 3060int mem_cgroup_shmem_charge_fallback(struct page *page, 3061 struct mm_struct *mm, 3062 gfp_t gfp_mask) 3063{ 3064 struct mem_cgroup *mem; 3065 int ret; 3066 3067 if (mem_cgroup_disabled()) 3068 return 0; 3069 3070 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); 3071 if (!ret) 3072 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */ 3073 3074 return ret; 3075} 3076 3077#ifdef CONFIG_DEBUG_VM 3078static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 3079{ 3080 struct page_cgroup *pc; 3081 3082 pc = lookup_page_cgroup(page); 3083 if (likely(pc) && PageCgroupUsed(pc)) 3084 return pc; 3085 return NULL; 3086} 3087 3088bool mem_cgroup_bad_page_check(struct page *page) 3089{ 3090 if (mem_cgroup_disabled()) 3091 return false; 3092 3093 return lookup_page_cgroup_used(page) != NULL; 3094} 3095 3096void mem_cgroup_print_bad_page(struct page *page) 3097{ 3098 struct page_cgroup *pc; 3099 3100 pc = lookup_page_cgroup_used(page); 3101 if (pc) { 3102 int ret = -1; 3103 char *path; 3104 3105 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", 3106 pc, pc->flags, pc->mem_cgroup); 3107 3108 path = kmalloc(PATH_MAX, GFP_KERNEL); 3109 if (path) { 3110 rcu_read_lock(); 3111 ret = cgroup_path(pc->mem_cgroup->css.cgroup, 3112 path, PATH_MAX); 3113 rcu_read_unlock(); 3114 } 3115 3116 printk(KERN_CONT "(%s)\n", 3117 (ret < 0) ? "cannot get the path" : path); 3118 kfree(path); 3119 } 3120} 3121#endif 3122 3123static DEFINE_MUTEX(set_limit_mutex); 3124 3125static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 3126 unsigned long long val) 3127{ 3128 int retry_count; 3129 u64 memswlimit, memlimit; 3130 int ret = 0; 3131 int children = mem_cgroup_count_children(memcg); 3132 u64 curusage, oldusage; 3133 int enlarge; 3134 3135 /* 3136 * For keeping hierarchical_reclaim simple, how long we should retry 3137 * is depends on callers. We set our retry-count to be function 3138 * of # of children which we should visit in this loop. 3139 */ 3140 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 3141 3142 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3143 3144 enlarge = 0; 3145 while (retry_count) { 3146 if (signal_pending(current)) { 3147 ret = -EINTR; 3148 break; 3149 } 3150 /* 3151 * Rather than hide all in some function, I do this in 3152 * open coded manner. You see what this really does. 3153 * We have to guarantee mem->res.limit < mem->memsw.limit. 3154 */ 3155 mutex_lock(&set_limit_mutex); 3156 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3157 if (memswlimit < val) { 3158 ret = -EINVAL; 3159 mutex_unlock(&set_limit_mutex); 3160 break; 3161 } 3162 3163 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3164 if (memlimit < val) 3165 enlarge = 1; 3166 3167 ret = res_counter_set_limit(&memcg->res, val); 3168 if (!ret) { 3169 if (memswlimit == val) 3170 memcg->memsw_is_minimum = true; 3171 else 3172 memcg->memsw_is_minimum = false; 3173 } 3174 mutex_unlock(&set_limit_mutex); 3175 3176 if (!ret) 3177 break; 3178 3179 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3180 MEM_CGROUP_RECLAIM_SHRINK); 3181 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3182 /* Usage is reduced ? */ 3183 if (curusage >= oldusage) 3184 retry_count--; 3185 else 3186 oldusage = curusage; 3187 } 3188 if (!ret && enlarge) 3189 memcg_oom_recover(memcg); 3190 3191 return ret; 3192} 3193 3194static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 3195 unsigned long long val) 3196{ 3197 int retry_count; 3198 u64 memlimit, memswlimit, oldusage, curusage; 3199 int children = mem_cgroup_count_children(memcg); 3200 int ret = -EBUSY; 3201 int enlarge = 0; 3202 3203 /* see mem_cgroup_resize_res_limit */ 3204 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 3205 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3206 while (retry_count) { 3207 if (signal_pending(current)) { 3208 ret = -EINTR; 3209 break; 3210 } 3211 /* 3212 * Rather than hide all in some function, I do this in 3213 * open coded manner. You see what this really does. 3214 * We have to guarantee mem->res.limit < mem->memsw.limit. 3215 */ 3216 mutex_lock(&set_limit_mutex); 3217 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3218 if (memlimit > val) { 3219 ret = -EINVAL; 3220 mutex_unlock(&set_limit_mutex); 3221 break; 3222 } 3223 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3224 if (memswlimit < val) 3225 enlarge = 1; 3226 ret = res_counter_set_limit(&memcg->memsw, val); 3227 if (!ret) { 3228 if (memlimit == val) 3229 memcg->memsw_is_minimum = true; 3230 else 3231 memcg->memsw_is_minimum = false; 3232 } 3233 mutex_unlock(&set_limit_mutex); 3234 3235 if (!ret) 3236 break; 3237 3238 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3239 MEM_CGROUP_RECLAIM_NOSWAP | 3240 MEM_CGROUP_RECLAIM_SHRINK); 3241 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3242 /* Usage is reduced ? */ 3243 if (curusage >= oldusage) 3244 retry_count--; 3245 else 3246 oldusage = curusage; 3247 } 3248 if (!ret && enlarge) 3249 memcg_oom_recover(memcg); 3250 return ret; 3251} 3252 3253unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 3254 gfp_t gfp_mask) 3255{ 3256 unsigned long nr_reclaimed = 0; 3257 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 3258 unsigned long reclaimed; 3259 int loop = 0; 3260 struct mem_cgroup_tree_per_zone *mctz; 3261 unsigned long long excess; 3262 3263 if (order > 0) 3264 return 0; 3265 3266 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 3267 /* 3268 * This loop can run a while, specially if mem_cgroup's continuously 3269 * keep exceeding their soft limit and putting the system under 3270 * pressure 3271 */ 3272 do { 3273 if (next_mz) 3274 mz = next_mz; 3275 else 3276 mz = mem_cgroup_largest_soft_limit_node(mctz); 3277 if (!mz) 3278 break; 3279 3280 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, 3281 gfp_mask, 3282 MEM_CGROUP_RECLAIM_SOFT); 3283 nr_reclaimed += reclaimed; 3284 spin_lock(&mctz->lock); 3285 3286 /* 3287 * If we failed to reclaim anything from this memory cgroup 3288 * it is time to move on to the next cgroup 3289 */ 3290 next_mz = NULL; 3291 if (!reclaimed) { 3292 do { 3293 /* 3294 * Loop until we find yet another one. 3295 * 3296 * By the time we get the soft_limit lock 3297 * again, someone might have aded the 3298 * group back on the RB tree. Iterate to 3299 * make sure we get a different mem. 3300 * mem_cgroup_largest_soft_limit_node returns 3301 * NULL if no other cgroup is present on 3302 * the tree 3303 */ 3304 next_mz = 3305 __mem_cgroup_largest_soft_limit_node(mctz); 3306 if (next_mz == mz) { 3307 css_put(&next_mz->mem->css); 3308 next_mz = NULL; 3309 } else /* next_mz == NULL or other memcg */ 3310 break; 3311 } while (1); 3312 } 3313 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 3314 excess = res_counter_soft_limit_excess(&mz->mem->res); 3315 /* 3316 * One school of thought says that we should not add 3317 * back the node to the tree if reclaim returns 0. 3318 * But our reclaim could return 0, simply because due 3319 * to priority we are exposing a smaller subset of 3320 * memory to reclaim from. Consider this as a longer 3321 * term TODO. 3322 */ 3323 /* If excess == 0, no tree ops */ 3324 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); 3325 spin_unlock(&mctz->lock); 3326 css_put(&mz->mem->css); 3327 loop++; 3328 /* 3329 * Could not reclaim anything and there are no more 3330 * mem cgroups to try or we seem to be looping without 3331 * reclaiming anything. 3332 */ 3333 if (!nr_reclaimed && 3334 (next_mz == NULL || 3335 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3336 break; 3337 } while (!nr_reclaimed); 3338 if (next_mz) 3339 css_put(&next_mz->mem->css); 3340 return nr_reclaimed; 3341} 3342 3343/* 3344 * This routine traverse page_cgroup in given list and drop them all. 3345 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 3346 */ 3347static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, 3348 int node, int zid, enum lru_list lru) 3349{ 3350 struct zone *zone; 3351 struct mem_cgroup_per_zone *mz; 3352 struct page_cgroup *pc, *busy; 3353 unsigned long flags, loop; 3354 struct list_head *list; 3355 int ret = 0; 3356 3357 zone = &NODE_DATA(node)->node_zones[zid]; 3358 mz = mem_cgroup_zoneinfo(mem, node, zid); 3359 list = &mz->lists[lru]; 3360 3361 loop = MEM_CGROUP_ZSTAT(mz, lru); 3362 /* give some margin against EBUSY etc...*/ 3363 loop += 256; 3364 busy = NULL; 3365 while (loop--) { 3366 struct page *page; 3367 3368 ret = 0; 3369 spin_lock_irqsave(&zone->lru_lock, flags); 3370 if (list_empty(list)) { 3371 spin_unlock_irqrestore(&zone->lru_lock, flags); 3372 break; 3373 } 3374 pc = list_entry(list->prev, struct page_cgroup, lru); 3375 if (busy == pc) { 3376 list_move(&pc->lru, list); 3377 busy = NULL; 3378 spin_unlock_irqrestore(&zone->lru_lock, flags); 3379 continue; 3380 } 3381 spin_unlock_irqrestore(&zone->lru_lock, flags); 3382 3383 page = lookup_cgroup_page(pc); 3384 3385 ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL); 3386 if (ret == -ENOMEM) 3387 break; 3388 3389 if (ret == -EBUSY || ret == -EINVAL) { 3390 /* found lock contention or "pc" is obsolete. */ 3391 busy = pc; 3392 cond_resched(); 3393 } else 3394 busy = NULL; 3395 } 3396 3397 if (!ret && !list_empty(list)) 3398 return -EBUSY; 3399 return ret; 3400} 3401 3402/* 3403 * make mem_cgroup's charge to be 0 if there is no task. 3404 * This enables deleting this mem_cgroup. 3405 */ 3406static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) 3407{ 3408 int ret; 3409 int node, zid, shrink; 3410 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3411 struct cgroup *cgrp = mem->css.cgroup; 3412 3413 css_get(&mem->css); 3414 3415 shrink = 0; 3416 /* should free all ? */ 3417 if (free_all) 3418 goto try_to_free; 3419move_account: 3420 do { 3421 ret = -EBUSY; 3422 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 3423 goto out; 3424 ret = -EINTR; 3425 if (signal_pending(current)) 3426 goto out; 3427 /* This is for making all *used* pages to be on LRU. */ 3428 lru_add_drain_all(); 3429 drain_all_stock_sync(); 3430 ret = 0; 3431 mem_cgroup_start_move(mem); 3432 for_each_node_state(node, N_HIGH_MEMORY) { 3433 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { 3434 enum lru_list l; 3435 for_each_lru(l) { 3436 ret = mem_cgroup_force_empty_list(mem, 3437 node, zid, l); 3438 if (ret) 3439 break; 3440 } 3441 } 3442 if (ret) 3443 break; 3444 } 3445 mem_cgroup_end_move(mem); 3446 memcg_oom_recover(mem); 3447 /* it seems parent cgroup doesn't have enough mem */ 3448 if (ret == -ENOMEM) 3449 goto try_to_free; 3450 cond_resched(); 3451 /* "ret" should also be checked to ensure all lists are empty. */ 3452 } while (mem->res.usage > 0 || ret); 3453out: 3454 css_put(&mem->css); 3455 return ret; 3456 3457try_to_free: 3458 /* returns EBUSY if there is a task or if we come here twice. */ 3459 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { 3460 ret = -EBUSY; 3461 goto out; 3462 } 3463 /* we call try-to-free pages for make this cgroup empty */ 3464 lru_add_drain_all(); 3465 /* try to free all pages in this cgroup */ 3466 shrink = 1; 3467 while (nr_retries && mem->res.usage > 0) { 3468 int progress; 3469 3470 if (signal_pending(current)) { 3471 ret = -EINTR; 3472 goto out; 3473 } 3474 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, 3475 false, get_swappiness(mem)); 3476 if (!progress) { 3477 nr_retries--; 3478 /* maybe some writeback is necessary */ 3479 congestion_wait(BLK_RW_ASYNC, HZ/10); 3480 } 3481 3482 } 3483 lru_add_drain(); 3484 /* try move_account...there may be some *locked* pages. */ 3485 goto move_account; 3486} 3487 3488int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 3489{ 3490 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); 3491} 3492 3493 3494static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) 3495{ 3496 return mem_cgroup_from_cont(cont)->use_hierarchy; 3497} 3498 3499static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, 3500 u64 val) 3501{ 3502 int retval = 0; 3503 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 3504 struct cgroup *parent = cont->parent; 3505 struct mem_cgroup *parent_mem = NULL; 3506 3507 if (parent) 3508 parent_mem = mem_cgroup_from_cont(parent); 3509 3510 cgroup_lock(); 3511 /* 3512 * If parent's use_hierarchy is set, we can't make any modifications 3513 * in the child subtrees. If it is unset, then the change can 3514 * occur, provided the current cgroup has no children. 3515 * 3516 * For the root cgroup, parent_mem is NULL, we allow value to be 3517 * set if there are no children. 3518 */ 3519 if ((!parent_mem || !parent_mem->use_hierarchy) && 3520 (val == 1 || val == 0)) { 3521 if (list_empty(&cont->children)) 3522 mem->use_hierarchy = val; 3523 else 3524 retval = -EBUSY; 3525 } else 3526 retval = -EINVAL; 3527 cgroup_unlock(); 3528 3529 return retval; 3530} 3531 3532 3533static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem, 3534 enum mem_cgroup_stat_index idx) 3535{ 3536 struct mem_cgroup *iter; 3537 long val = 0; 3538 3539 /* Per-cpu values can be negative, use a signed accumulator */ 3540 for_each_mem_cgroup_tree(iter, mem) 3541 val += mem_cgroup_read_stat(iter, idx); 3542 3543 if (val < 0) /* race ? */ 3544 val = 0; 3545 return val; 3546} 3547 3548static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) 3549{ 3550 u64 val; 3551 3552 if (!mem_cgroup_is_root(mem)) { 3553 if (!swap) 3554 return res_counter_read_u64(&mem->res, RES_USAGE); 3555 else 3556 return res_counter_read_u64(&mem->memsw, RES_USAGE); 3557 } 3558 3559 val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE); 3560 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS); 3561 3562 if (swap) 3563 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 3564 3565 return val << PAGE_SHIFT; 3566} 3567 3568static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 3569{ 3570 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 3571 u64 val; 3572 int type, name; 3573 3574 type = MEMFILE_TYPE(cft->private); 3575 name = MEMFILE_ATTR(cft->private); 3576 switch (type) { 3577 case _MEM: 3578 if (name == RES_USAGE) 3579 val = mem_cgroup_usage(mem, false); 3580 else 3581 val = res_counter_read_u64(&mem->res, name); 3582 break; 3583 case _MEMSWAP: 3584 if (name == RES_USAGE) 3585 val = mem_cgroup_usage(mem, true); 3586 else 3587 val = res_counter_read_u64(&mem->memsw, name); 3588 break; 3589 default: 3590 BUG(); 3591 break; 3592 } 3593 return val; 3594} 3595/* 3596 * The user of this function is... 3597 * RES_LIMIT. 3598 */ 3599static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 3600 const char *buffer) 3601{ 3602 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3603 int type, name; 3604 unsigned long long val; 3605 int ret; 3606 3607 type = MEMFILE_TYPE(cft->private); 3608 name = MEMFILE_ATTR(cft->private); 3609 switch (name) { 3610 case RES_LIMIT: 3611 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3612 ret = -EINVAL; 3613 break; 3614 } 3615 /* This function does all necessary parse...reuse it */ 3616 ret = res_counter_memparse_write_strategy(buffer, &val); 3617 if (ret) 3618 break; 3619 if (type == _MEM) 3620 ret = mem_cgroup_resize_limit(memcg, val); 3621 else 3622 ret = mem_cgroup_resize_memsw_limit(memcg, val); 3623 break; 3624 case RES_SOFT_LIMIT: 3625 ret = res_counter_memparse_write_strategy(buffer, &val); 3626 if (ret) 3627 break; 3628 /* 3629 * For memsw, soft limits are hard to implement in terms 3630 * of semantics, for now, we support soft limits for 3631 * control without swap 3632 */ 3633 if (type == _MEM) 3634 ret = res_counter_set_soft_limit(&memcg->res, val); 3635 else 3636 ret = -EINVAL; 3637 break; 3638 default: 3639 ret = -EINVAL; /* should be BUG() ? */ 3640 break; 3641 } 3642 return ret; 3643} 3644 3645static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 3646 unsigned long long *mem_limit, unsigned long long *memsw_limit) 3647{ 3648 struct cgroup *cgroup; 3649 unsigned long long min_limit, min_memsw_limit, tmp; 3650 3651 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3652 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3653 cgroup = memcg->css.cgroup; 3654 if (!memcg->use_hierarchy) 3655 goto out; 3656 3657 while (cgroup->parent) { 3658 cgroup = cgroup->parent; 3659 memcg = mem_cgroup_from_cont(cgroup); 3660 if (!memcg->use_hierarchy) 3661 break; 3662 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 3663 min_limit = min(min_limit, tmp); 3664 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3665 min_memsw_limit = min(min_memsw_limit, tmp); 3666 } 3667out: 3668 *mem_limit = min_limit; 3669 *memsw_limit = min_memsw_limit; 3670 return; 3671} 3672 3673static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 3674{ 3675 struct mem_cgroup *mem; 3676 int type, name; 3677 3678 mem = mem_cgroup_from_cont(cont); 3679 type = MEMFILE_TYPE(event); 3680 name = MEMFILE_ATTR(event); 3681 switch (name) { 3682 case RES_MAX_USAGE: 3683 if (type == _MEM) 3684 res_counter_reset_max(&mem->res); 3685 else 3686 res_counter_reset_max(&mem->memsw); 3687 break; 3688 case RES_FAILCNT: 3689 if (type == _MEM) 3690 res_counter_reset_failcnt(&mem->res); 3691 else 3692 res_counter_reset_failcnt(&mem->memsw); 3693 break; 3694 } 3695 3696 return 0; 3697} 3698 3699static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, 3700 struct cftype *cft) 3701{ 3702 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; 3703} 3704 3705#ifdef CONFIG_MMU 3706static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 3707 struct cftype *cft, u64 val) 3708{ 3709 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 3710 3711 if (val >= (1 << NR_MOVE_TYPE)) 3712 return -EINVAL; 3713 /* 3714 * We check this value several times in both in can_attach() and 3715 * attach(), so we need cgroup lock to prevent this value from being 3716 * inconsistent. 3717 */ 3718 cgroup_lock(); 3719 mem->move_charge_at_immigrate = val; 3720 cgroup_unlock(); 3721 3722 return 0; 3723} 3724#else 3725static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 3726 struct cftype *cft, u64 val) 3727{ 3728 return -ENOSYS; 3729} 3730#endif 3731 3732 3733/* For read statistics */ 3734enum { 3735 MCS_CACHE, 3736 MCS_RSS, 3737 MCS_FILE_MAPPED, 3738 MCS_PGPGIN, 3739 MCS_PGPGOUT, 3740 MCS_SWAP, 3741 MCS_INACTIVE_ANON, 3742 MCS_ACTIVE_ANON, 3743 MCS_INACTIVE_FILE, 3744 MCS_ACTIVE_FILE, 3745 MCS_UNEVICTABLE, 3746 NR_MCS_STAT, 3747}; 3748 3749struct mcs_total_stat { 3750 s64 stat[NR_MCS_STAT]; 3751}; 3752 3753struct { 3754 char *local_name; 3755 char *total_name; 3756} memcg_stat_strings[NR_MCS_STAT] = { 3757 {"cache", "total_cache"}, 3758 {"rss", "total_rss"}, 3759 {"mapped_file", "total_mapped_file"}, 3760 {"pgpgin", "total_pgpgin"}, 3761 {"pgpgout", "total_pgpgout"}, 3762 {"swap", "total_swap"}, 3763 {"inactive_anon", "total_inactive_anon"}, 3764 {"active_anon", "total_active_anon"}, 3765 {"inactive_file", "total_inactive_file"}, 3766 {"active_file", "total_active_file"}, 3767 {"unevictable", "total_unevictable"} 3768}; 3769 3770 3771static void 3772mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) 3773{ 3774 s64 val; 3775 3776 /* per cpu stat */ 3777 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); 3778 s->stat[MCS_CACHE] += val * PAGE_SIZE; 3779 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); 3780 s->stat[MCS_RSS] += val * PAGE_SIZE; 3781 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); 3782 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; 3783 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN); 3784 s->stat[MCS_PGPGIN] += val; 3785 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT); 3786 s->stat[MCS_PGPGOUT] += val; 3787 if (do_swap_account) { 3788 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 3789 s->stat[MCS_SWAP] += val * PAGE_SIZE; 3790 } 3791 3792 /* per zone stat */ 3793 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON); 3794 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; 3795 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON); 3796 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; 3797 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE); 3798 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; 3799 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE); 3800 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; 3801 val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE); 3802 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; 3803} 3804 3805static void 3806mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) 3807{ 3808 struct mem_cgroup *iter; 3809 3810 for_each_mem_cgroup_tree(iter, mem) 3811 mem_cgroup_get_local_stat(iter, s); 3812} 3813 3814static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 3815 struct cgroup_map_cb *cb) 3816{ 3817 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 3818 struct mcs_total_stat mystat; 3819 int i; 3820 3821 memset(&mystat, 0, sizeof(mystat)); 3822 mem_cgroup_get_local_stat(mem_cont, &mystat); 3823 3824 for (i = 0; i < NR_MCS_STAT; i++) { 3825 if (i == MCS_SWAP && !do_swap_account) 3826 continue; 3827 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); 3828 } 3829 3830 /* Hierarchical information */ 3831 { 3832 unsigned long long limit, memsw_limit; 3833 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); 3834 cb->fill(cb, "hierarchical_memory_limit", limit); 3835 if (do_swap_account) 3836 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 3837 } 3838 3839 memset(&mystat, 0, sizeof(mystat)); 3840 mem_cgroup_get_total_stat(mem_cont, &mystat); 3841 for (i = 0; i < NR_MCS_STAT; i++) { 3842 if (i == MCS_SWAP && !do_swap_account) 3843 continue; 3844 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); 3845 } 3846 3847#ifdef CONFIG_DEBUG_VM 3848 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); 3849 3850 { 3851 int nid, zid; 3852 struct mem_cgroup_per_zone *mz; 3853 unsigned long recent_rotated[2] = {0, 0}; 3854 unsigned long recent_scanned[2] = {0, 0}; 3855 3856 for_each_online_node(nid) 3857 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 3858 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 3859 3860 recent_rotated[0] += 3861 mz->reclaim_stat.recent_rotated[0]; 3862 recent_rotated[1] += 3863 mz->reclaim_stat.recent_rotated[1]; 3864 recent_scanned[0] += 3865 mz->reclaim_stat.recent_scanned[0]; 3866 recent_scanned[1] += 3867 mz->reclaim_stat.recent_scanned[1]; 3868 } 3869 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); 3870 cb->fill(cb, "recent_rotated_file", recent_rotated[1]); 3871 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); 3872 cb->fill(cb, "recent_scanned_file", recent_scanned[1]); 3873 } 3874#endif 3875 3876 return 0; 3877} 3878 3879static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) 3880{ 3881 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 3882 3883 return get_swappiness(memcg); 3884} 3885 3886static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, 3887 u64 val) 3888{ 3889 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 3890 struct mem_cgroup *parent; 3891 3892 if (val > 100) 3893 return -EINVAL; 3894 3895 if (cgrp->parent == NULL) 3896 return -EINVAL; 3897 3898 parent = mem_cgroup_from_cont(cgrp->parent); 3899 3900 cgroup_lock(); 3901 3902 /* If under hierarchy, only empty-root can set this value */ 3903 if ((parent->use_hierarchy) || 3904 (memcg->use_hierarchy && !list_empty(&cgrp->children))) { 3905 cgroup_unlock(); 3906 return -EINVAL; 3907 } 3908 3909 memcg->swappiness = val; 3910 3911 cgroup_unlock(); 3912 3913 return 0; 3914} 3915 3916static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3917{ 3918 struct mem_cgroup_threshold_ary *t; 3919 u64 usage; 3920 int i; 3921 3922 rcu_read_lock(); 3923 if (!swap) 3924 t = rcu_dereference(memcg->thresholds.primary); 3925 else 3926 t = rcu_dereference(memcg->memsw_thresholds.primary); 3927 3928 if (!t) 3929 goto unlock; 3930 3931 usage = mem_cgroup_usage(memcg, swap); 3932 3933 /* 3934 * current_threshold points to threshold just below usage. 3935 * If it's not true, a threshold was crossed after last 3936 * call of __mem_cgroup_threshold(). 3937 */ 3938 i = t->current_threshold; 3939 3940 /* 3941 * Iterate backward over array of thresholds starting from 3942 * current_threshold and check if a threshold is crossed. 3943 * If none of thresholds below usage is crossed, we read 3944 * only one element of the array here. 3945 */ 3946 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3947 eventfd_signal(t->entries[i].eventfd, 1); 3948 3949 /* i = current_threshold + 1 */ 3950 i++; 3951 3952 /* 3953 * Iterate forward over array of thresholds starting from 3954 * current_threshold+1 and check if a threshold is crossed. 3955 * If none of thresholds above usage is crossed, we read 3956 * only one element of the array here. 3957 */ 3958 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3959 eventfd_signal(t->entries[i].eventfd, 1); 3960 3961 /* Update current_threshold */ 3962 t->current_threshold = i - 1; 3963unlock: 3964 rcu_read_unlock(); 3965} 3966 3967static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3968{ 3969 while (memcg) { 3970 __mem_cgroup_threshold(memcg, false); 3971 if (do_swap_account) 3972 __mem_cgroup_threshold(memcg, true); 3973 3974 memcg = parent_mem_cgroup(memcg); 3975 } 3976} 3977 3978static int compare_thresholds(const void *a, const void *b) 3979{ 3980 const struct mem_cgroup_threshold *_a = a; 3981 const struct mem_cgroup_threshold *_b = b; 3982 3983 return _a->threshold - _b->threshold; 3984} 3985 3986static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) 3987{ 3988 struct mem_cgroup_eventfd_list *ev; 3989 3990 list_for_each_entry(ev, &mem->oom_notify, list) 3991 eventfd_signal(ev->eventfd, 1); 3992 return 0; 3993} 3994 3995static void mem_cgroup_oom_notify(struct mem_cgroup *mem) 3996{ 3997 struct mem_cgroup *iter; 3998 3999 for_each_mem_cgroup_tree(iter, mem) 4000 mem_cgroup_oom_notify_cb(iter); 4001} 4002 4003static int mem_cgroup_usage_register_event(struct cgroup *cgrp, 4004 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4005{ 4006 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4007 struct mem_cgroup_thresholds *thresholds; 4008 struct mem_cgroup_threshold_ary *new; 4009 int type = MEMFILE_TYPE(cft->private); 4010 u64 threshold, usage; 4011 int i, size, ret; 4012 4013 ret = res_counter_memparse_write_strategy(args, &threshold); 4014 if (ret) 4015 return ret; 4016 4017 mutex_lock(&memcg->thresholds_lock); 4018 4019 if (type == _MEM) 4020 thresholds = &memcg->thresholds; 4021 else if (type == _MEMSWAP) 4022 thresholds = &memcg->memsw_thresholds; 4023 else 4024 BUG(); 4025 4026 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4027 4028 /* Check if a threshold crossed before adding a new one */ 4029 if (thresholds->primary) 4030 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4031 4032 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4033 4034 /* Allocate memory for new array of thresholds */ 4035 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 4036 GFP_KERNEL); 4037 if (!new) { 4038 ret = -ENOMEM; 4039 goto unlock; 4040 } 4041 new->size = size; 4042 4043 /* Copy thresholds (if any) to new array */ 4044 if (thresholds->primary) { 4045 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 4046 sizeof(struct mem_cgroup_threshold)); 4047 } 4048 4049 /* Add new threshold */ 4050 new->entries[size - 1].eventfd = eventfd; 4051 new->entries[size - 1].threshold = threshold; 4052 4053 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4054 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 4055 compare_thresholds, NULL); 4056 4057 /* Find current threshold */ 4058 new->current_threshold = -1; 4059 for (i = 0; i < size; i++) { 4060 if (new->entries[i].threshold < usage) { 4061 /* 4062 * new->current_threshold will not be used until 4063 * rcu_assign_pointer(), so it's safe to increment 4064 * it here. 4065 */ 4066 ++new->current_threshold; 4067 } 4068 } 4069 4070 /* Free old spare buffer and save old primary buffer as spare */ 4071 kfree(thresholds->spare); 4072 thresholds->spare = thresholds->primary; 4073 4074 rcu_assign_pointer(thresholds->primary, new); 4075 4076 /* To be sure that nobody uses thresholds */ 4077 synchronize_rcu(); 4078 4079unlock: 4080 mutex_unlock(&memcg->thresholds_lock); 4081 4082 return ret; 4083} 4084 4085static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, 4086 struct cftype *cft, struct eventfd_ctx *eventfd) 4087{ 4088 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4089 struct mem_cgroup_thresholds *thresholds; 4090 struct mem_cgroup_threshold_ary *new; 4091 int type = MEMFILE_TYPE(cft->private); 4092 u64 usage; 4093 int i, j, size; 4094 4095 mutex_lock(&memcg->thresholds_lock); 4096 if (type == _MEM) 4097 thresholds = &memcg->thresholds; 4098 else if (type == _MEMSWAP) 4099 thresholds = &memcg->memsw_thresholds; 4100 else 4101 BUG(); 4102 4103 /* 4104 * Something went wrong if we trying to unregister a threshold 4105 * if we don't have thresholds 4106 */ 4107 BUG_ON(!thresholds); 4108 4109 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4110 4111 /* Check if a threshold crossed before removing */ 4112 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4113 4114 /* Calculate new number of threshold */ 4115 size = 0; 4116 for (i = 0; i < thresholds->primary->size; i++) { 4117 if (thresholds->primary->entries[i].eventfd != eventfd) 4118 size++; 4119 } 4120 4121 new = thresholds->spare; 4122 4123 /* Set thresholds array to NULL if we don't have thresholds */ 4124 if (!size) { 4125 kfree(new); 4126 new = NULL; 4127 goto swap_buffers; 4128 } 4129 4130 new->size = size; 4131 4132 /* Copy thresholds and find current threshold */ 4133 new->current_threshold = -1; 4134 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4135 if (thresholds->primary->entries[i].eventfd == eventfd) 4136 continue; 4137 4138 new->entries[j] = thresholds->primary->entries[i]; 4139 if (new->entries[j].threshold < usage) { 4140 /* 4141 * new->current_threshold will not be used 4142 * until rcu_assign_pointer(), so it's safe to increment 4143 * it here. 4144 */ 4145 ++new->current_threshold; 4146 } 4147 j++; 4148 } 4149 4150swap_buffers: 4151 /* Swap primary and spare array */ 4152 thresholds->spare = thresholds->primary; 4153 rcu_assign_pointer(thresholds->primary, new); 4154 4155 /* To be sure that nobody uses thresholds */ 4156 synchronize_rcu(); 4157 4158 mutex_unlock(&memcg->thresholds_lock); 4159} 4160 4161static int mem_cgroup_oom_register_event(struct cgroup *cgrp, 4162 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4163{ 4164 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4165 struct mem_cgroup_eventfd_list *event; 4166 int type = MEMFILE_TYPE(cft->private); 4167 4168 BUG_ON(type != _OOM_TYPE); 4169 event = kmalloc(sizeof(*event), GFP_KERNEL); 4170 if (!event) 4171 return -ENOMEM; 4172 4173 mutex_lock(&memcg_oom_mutex); 4174 4175 event->eventfd = eventfd; 4176 list_add(&event->list, &memcg->oom_notify); 4177 4178 /* already in OOM ? */ 4179 if (atomic_read(&memcg->oom_lock)) 4180 eventfd_signal(eventfd, 1); 4181 mutex_unlock(&memcg_oom_mutex); 4182 4183 return 0; 4184} 4185 4186static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, 4187 struct cftype *cft, struct eventfd_ctx *eventfd) 4188{ 4189 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4190 struct mem_cgroup_eventfd_list *ev, *tmp; 4191 int type = MEMFILE_TYPE(cft->private); 4192 4193 BUG_ON(type != _OOM_TYPE); 4194 4195 mutex_lock(&memcg_oom_mutex); 4196 4197 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { 4198 if (ev->eventfd == eventfd) { 4199 list_del(&ev->list); 4200 kfree(ev); 4201 } 4202 } 4203 4204 mutex_unlock(&memcg_oom_mutex); 4205} 4206 4207static int mem_cgroup_oom_control_read(struct cgroup *cgrp, 4208 struct cftype *cft, struct cgroup_map_cb *cb) 4209{ 4210 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4211 4212 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable); 4213 4214 if (atomic_read(&mem->oom_lock)) 4215 cb->fill(cb, "under_oom", 1); 4216 else 4217 cb->fill(cb, "under_oom", 0); 4218 return 0; 4219} 4220 4221static int mem_cgroup_oom_control_write(struct cgroup *cgrp, 4222 struct cftype *cft, u64 val) 4223{ 4224 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4225 struct mem_cgroup *parent; 4226 4227 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4228 if (!cgrp->parent || !((val == 0) || (val == 1))) 4229 return -EINVAL; 4230 4231 parent = mem_cgroup_from_cont(cgrp->parent); 4232 4233 cgroup_lock(); 4234 /* oom-kill-disable is a flag for subhierarchy. */ 4235 if ((parent->use_hierarchy) || 4236 (mem->use_hierarchy && !list_empty(&cgrp->children))) { 4237 cgroup_unlock(); 4238 return -EINVAL; 4239 } 4240 mem->oom_kill_disable = val; 4241 if (!val) 4242 memcg_oom_recover(mem); 4243 cgroup_unlock(); 4244 return 0; 4245} 4246 4247static struct cftype mem_cgroup_files[] = { 4248 { 4249 .name = "usage_in_bytes", 4250 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4251 .read_u64 = mem_cgroup_read, 4252 .register_event = mem_cgroup_usage_register_event, 4253 .unregister_event = mem_cgroup_usage_unregister_event, 4254 }, 4255 { 4256 .name = "max_usage_in_bytes", 4257 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4258 .trigger = mem_cgroup_reset, 4259 .read_u64 = mem_cgroup_read, 4260 }, 4261 { 4262 .name = "limit_in_bytes", 4263 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4264 .write_string = mem_cgroup_write, 4265 .read_u64 = mem_cgroup_read, 4266 }, 4267 { 4268 .name = "soft_limit_in_bytes", 4269 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4270 .write_string = mem_cgroup_write, 4271 .read_u64 = mem_cgroup_read, 4272 }, 4273 { 4274 .name = "failcnt", 4275 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4276 .trigger = mem_cgroup_reset, 4277 .read_u64 = mem_cgroup_read, 4278 }, 4279 { 4280 .name = "stat", 4281 .read_map = mem_control_stat_show, 4282 }, 4283 { 4284 .name = "force_empty", 4285 .trigger = mem_cgroup_force_empty_write, 4286 }, 4287 { 4288 .name = "use_hierarchy", 4289 .write_u64 = mem_cgroup_hierarchy_write, 4290 .read_u64 = mem_cgroup_hierarchy_read, 4291 }, 4292 { 4293 .name = "swappiness", 4294 .read_u64 = mem_cgroup_swappiness_read, 4295 .write_u64 = mem_cgroup_swappiness_write, 4296 }, 4297 { 4298 .name = "move_charge_at_immigrate", 4299 .read_u64 = mem_cgroup_move_charge_read, 4300 .write_u64 = mem_cgroup_move_charge_write, 4301 }, 4302 { 4303 .name = "oom_control", 4304 .read_map = mem_cgroup_oom_control_read, 4305 .write_u64 = mem_cgroup_oom_control_write, 4306 .register_event = mem_cgroup_oom_register_event, 4307 .unregister_event = mem_cgroup_oom_unregister_event, 4308 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4309 }, 4310}; 4311 4312#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4313static struct cftype memsw_cgroup_files[] = { 4314 { 4315 .name = "memsw.usage_in_bytes", 4316 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 4317 .read_u64 = mem_cgroup_read, 4318 .register_event = mem_cgroup_usage_register_event, 4319 .unregister_event = mem_cgroup_usage_unregister_event, 4320 }, 4321 { 4322 .name = "memsw.max_usage_in_bytes", 4323 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 4324 .trigger = mem_cgroup_reset, 4325 .read_u64 = mem_cgroup_read, 4326 }, 4327 { 4328 .name = "memsw.limit_in_bytes", 4329 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 4330 .write_string = mem_cgroup_write, 4331 .read_u64 = mem_cgroup_read, 4332 }, 4333 { 4334 .name = "memsw.failcnt", 4335 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 4336 .trigger = mem_cgroup_reset, 4337 .read_u64 = mem_cgroup_read, 4338 }, 4339}; 4340 4341static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4342{ 4343 if (!do_swap_account) 4344 return 0; 4345 return cgroup_add_files(cont, ss, memsw_cgroup_files, 4346 ARRAY_SIZE(memsw_cgroup_files)); 4347}; 4348#else 4349static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4350{ 4351 return 0; 4352} 4353#endif 4354 4355static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 4356{ 4357 struct mem_cgroup_per_node *pn; 4358 struct mem_cgroup_per_zone *mz; 4359 enum lru_list l; 4360 int zone, tmp = node; 4361 /* 4362 * This routine is called against possible nodes. 4363 * But it's BUG to call kmalloc() against offline node. 4364 * 4365 * TODO: this routine can waste much memory for nodes which will 4366 * never be onlined. It's better to use memory hotplug callback 4367 * function. 4368 */ 4369 if (!node_state(node, N_NORMAL_MEMORY)) 4370 tmp = -1; 4371 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4372 if (!pn) 4373 return 1; 4374 4375 mem->info.nodeinfo[node] = pn; 4376 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4377 mz = &pn->zoneinfo[zone]; 4378 for_each_lru(l) 4379 INIT_LIST_HEAD(&mz->lists[l]); 4380 mz->usage_in_excess = 0; 4381 mz->on_tree = false; 4382 mz->mem = mem; 4383 } 4384 return 0; 4385} 4386 4387static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 4388{ 4389 kfree(mem->info.nodeinfo[node]); 4390} 4391 4392static struct mem_cgroup *mem_cgroup_alloc(void) 4393{ 4394 struct mem_cgroup *mem; 4395 int size = sizeof(struct mem_cgroup); 4396 4397 /* Can be very big if MAX_NUMNODES is very big */ 4398 if (size < PAGE_SIZE) 4399 mem = kzalloc(size, GFP_KERNEL); 4400 else 4401 mem = vzalloc(size); 4402 4403 if (!mem) 4404 return NULL; 4405 4406 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4407 if (!mem->stat) 4408 goto out_free; 4409 spin_lock_init(&mem->pcp_counter_lock); 4410 return mem; 4411 4412out_free: 4413 if (size < PAGE_SIZE) 4414 kfree(mem); 4415 else 4416 vfree(mem); 4417 return NULL; 4418} 4419 4420/* 4421 * At destroying mem_cgroup, references from swap_cgroup can remain. 4422 * (scanning all at force_empty is too costly...) 4423 * 4424 * Instead of clearing all references at force_empty, we remember 4425 * the number of reference from swap_cgroup and free mem_cgroup when 4426 * it goes down to 0. 4427 * 4428 * Removal of cgroup itself succeeds regardless of refs from swap. 4429 */ 4430 4431static void __mem_cgroup_free(struct mem_cgroup *mem) 4432{ 4433 int node; 4434 4435 mem_cgroup_remove_from_trees(mem); 4436 free_css_id(&mem_cgroup_subsys, &mem->css); 4437 4438 for_each_node_state(node, N_POSSIBLE) 4439 free_mem_cgroup_per_zone_info(mem, node); 4440 4441 free_percpu(mem->stat); 4442 if (sizeof(struct mem_cgroup) < PAGE_SIZE) 4443 kfree(mem); 4444 else 4445 vfree(mem); 4446} 4447 4448static void mem_cgroup_get(struct mem_cgroup *mem) 4449{ 4450 atomic_inc(&mem->refcnt); 4451} 4452 4453static void __mem_cgroup_put(struct mem_cgroup *mem, int count) 4454{ 4455 if (atomic_sub_and_test(count, &mem->refcnt)) { 4456 struct mem_cgroup *parent = parent_mem_cgroup(mem); 4457 __mem_cgroup_free(mem); 4458 if (parent) 4459 mem_cgroup_put(parent); 4460 } 4461} 4462 4463static void mem_cgroup_put(struct mem_cgroup *mem) 4464{ 4465 __mem_cgroup_put(mem, 1); 4466} 4467 4468/* 4469 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4470 */ 4471static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) 4472{ 4473 if (!mem->res.parent) 4474 return NULL; 4475 return mem_cgroup_from_res_counter(mem->res.parent, res); 4476} 4477 4478#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4479static void __init enable_swap_cgroup(void) 4480{ 4481 if (!mem_cgroup_disabled() && really_do_swap_account) 4482 do_swap_account = 1; 4483} 4484#else 4485static void __init enable_swap_cgroup(void) 4486{ 4487} 4488#endif 4489 4490static int mem_cgroup_soft_limit_tree_init(void) 4491{ 4492 struct mem_cgroup_tree_per_node *rtpn; 4493 struct mem_cgroup_tree_per_zone *rtpz; 4494 int tmp, node, zone; 4495 4496 for_each_node_state(node, N_POSSIBLE) { 4497 tmp = node; 4498 if (!node_state(node, N_NORMAL_MEMORY)) 4499 tmp = -1; 4500 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 4501 if (!rtpn) 4502 return 1; 4503 4504 soft_limit_tree.rb_tree_per_node[node] = rtpn; 4505 4506 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4507 rtpz = &rtpn->rb_tree_per_zone[zone]; 4508 rtpz->rb_root = RB_ROOT; 4509 spin_lock_init(&rtpz->lock); 4510 } 4511 } 4512 return 0; 4513} 4514 4515static struct cgroup_subsys_state * __ref 4516mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 4517{ 4518 struct mem_cgroup *mem, *parent; 4519 long error = -ENOMEM; 4520 int node; 4521 4522 mem = mem_cgroup_alloc(); 4523 if (!mem) 4524 return ERR_PTR(error); 4525 4526 for_each_node_state(node, N_POSSIBLE) 4527 if (alloc_mem_cgroup_per_zone_info(mem, node)) 4528 goto free_out; 4529 4530 /* root ? */ 4531 if (cont->parent == NULL) { 4532 int cpu; 4533 enable_swap_cgroup(); 4534 parent = NULL; 4535 root_mem_cgroup = mem; 4536 if (mem_cgroup_soft_limit_tree_init()) 4537 goto free_out; 4538 for_each_possible_cpu(cpu) { 4539 struct memcg_stock_pcp *stock = 4540 &per_cpu(memcg_stock, cpu); 4541 INIT_WORK(&stock->work, drain_local_stock); 4542 } 4543 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 4544 } else { 4545 parent = mem_cgroup_from_cont(cont->parent); 4546 mem->use_hierarchy = parent->use_hierarchy; 4547 mem->oom_kill_disable = parent->oom_kill_disable; 4548 } 4549 4550 if (parent && parent->use_hierarchy) { 4551 res_counter_init(&mem->res, &parent->res); 4552 res_counter_init(&mem->memsw, &parent->memsw); 4553 /* 4554 * We increment refcnt of the parent to ensure that we can 4555 * safely access it on res_counter_charge/uncharge. 4556 * This refcnt will be decremented when freeing this 4557 * mem_cgroup(see mem_cgroup_put). 4558 */ 4559 mem_cgroup_get(parent); 4560 } else { 4561 res_counter_init(&mem->res, NULL); 4562 res_counter_init(&mem->memsw, NULL); 4563 } 4564 mem->last_scanned_child = 0; 4565 INIT_LIST_HEAD(&mem->oom_notify); 4566 4567 if (parent) 4568 mem->swappiness = get_swappiness(parent); 4569 atomic_set(&mem->refcnt, 1); 4570 mem->move_charge_at_immigrate = 0; 4571 mutex_init(&mem->thresholds_lock); 4572 return &mem->css; 4573free_out: 4574 __mem_cgroup_free(mem); 4575 root_mem_cgroup = NULL; 4576 return ERR_PTR(error); 4577} 4578 4579static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 4580 struct cgroup *cont) 4581{ 4582 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 4583 4584 return mem_cgroup_force_empty(mem, false); 4585} 4586 4587static void mem_cgroup_destroy(struct cgroup_subsys *ss, 4588 struct cgroup *cont) 4589{ 4590 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 4591 4592 mem_cgroup_put(mem); 4593} 4594 4595static int mem_cgroup_populate(struct cgroup_subsys *ss, 4596 struct cgroup *cont) 4597{ 4598 int ret; 4599 4600 ret = cgroup_add_files(cont, ss, mem_cgroup_files, 4601 ARRAY_SIZE(mem_cgroup_files)); 4602 4603 if (!ret) 4604 ret = register_memsw_files(cont, ss); 4605 return ret; 4606} 4607 4608#ifdef CONFIG_MMU 4609/* Handlers for move charge at task migration. */ 4610#define PRECHARGE_COUNT_AT_ONCE 256 4611static int mem_cgroup_do_precharge(unsigned long count) 4612{ 4613 int ret = 0; 4614 int batch_count = PRECHARGE_COUNT_AT_ONCE; 4615 struct mem_cgroup *mem = mc.to; 4616 4617 if (mem_cgroup_is_root(mem)) { 4618 mc.precharge += count; 4619 /* we don't need css_get for root */ 4620 return ret; 4621 } 4622 /* try to charge at once */ 4623 if (count > 1) { 4624 struct res_counter *dummy; 4625 /* 4626 * "mem" cannot be under rmdir() because we've already checked 4627 * by cgroup_lock_live_cgroup() that it is not removed and we 4628 * are still under the same cgroup_mutex. So we can postpone 4629 * css_get(). 4630 */ 4631 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) 4632 goto one_by_one; 4633 if (do_swap_account && res_counter_charge(&mem->memsw, 4634 PAGE_SIZE * count, &dummy)) { 4635 res_counter_uncharge(&mem->res, PAGE_SIZE * count); 4636 goto one_by_one; 4637 } 4638 mc.precharge += count; 4639 return ret; 4640 } 4641one_by_one: 4642 /* fall back to one by one charge */ 4643 while (count--) { 4644 if (signal_pending(current)) { 4645 ret = -EINTR; 4646 break; 4647 } 4648 if (!batch_count--) { 4649 batch_count = PRECHARGE_COUNT_AT_ONCE; 4650 cond_resched(); 4651 } 4652 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false); 4653 if (ret || !mem) 4654 /* mem_cgroup_clear_mc() will do uncharge later */ 4655 return -ENOMEM; 4656 mc.precharge++; 4657 } 4658 return ret; 4659} 4660 4661/** 4662 * is_target_pte_for_mc - check a pte whether it is valid for move charge 4663 * @vma: the vma the pte to be checked belongs 4664 * @addr: the address corresponding to the pte to be checked 4665 * @ptent: the pte to be checked 4666 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4667 * 4668 * Returns 4669 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4670 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4671 * move charge. if @target is not NULL, the page is stored in target->page 4672 * with extra refcnt got(Callers should handle it). 4673 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4674 * target for charge migration. if @target is not NULL, the entry is stored 4675 * in target->ent. 4676 * 4677 * Called with pte lock held. 4678 */ 4679union mc_target { 4680 struct page *page; 4681 swp_entry_t ent; 4682}; 4683 4684enum mc_target_type { 4685 MC_TARGET_NONE, /* not used */ 4686 MC_TARGET_PAGE, 4687 MC_TARGET_SWAP, 4688}; 4689 4690static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4691 unsigned long addr, pte_t ptent) 4692{ 4693 struct page *page = vm_normal_page(vma, addr, ptent); 4694 4695 if (!page || !page_mapped(page)) 4696 return NULL; 4697 if (PageAnon(page)) { 4698 /* we don't move shared anon */ 4699 if (!move_anon() || page_mapcount(page) > 2) 4700 return NULL; 4701 } else if (!move_file()) 4702 /* we ignore mapcount for file pages */ 4703 return NULL; 4704 if (!get_page_unless_zero(page)) 4705 return NULL; 4706 4707 return page; 4708} 4709 4710static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4711 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4712{ 4713 int usage_count; 4714 struct page *page = NULL; 4715 swp_entry_t ent = pte_to_swp_entry(ptent); 4716 4717 if (!move_anon() || non_swap_entry(ent)) 4718 return NULL; 4719 usage_count = mem_cgroup_count_swap_user(ent, &page); 4720 if (usage_count > 1) { /* we don't move shared anon */ 4721 if (page) 4722 put_page(page); 4723 return NULL; 4724 } 4725 if (do_swap_account) 4726 entry->val = ent.val; 4727 4728 return page; 4729} 4730 4731static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4732 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4733{ 4734 struct page *page = NULL; 4735 struct inode *inode; 4736 struct address_space *mapping; 4737 pgoff_t pgoff; 4738 4739 if (!vma->vm_file) /* anonymous vma */ 4740 return NULL; 4741 if (!move_file()) 4742 return NULL; 4743 4744 inode = vma->vm_file->f_path.dentry->d_inode; 4745 mapping = vma->vm_file->f_mapping; 4746 if (pte_none(ptent)) 4747 pgoff = linear_page_index(vma, addr); 4748 else /* pte_file(ptent) is true */ 4749 pgoff = pte_to_pgoff(ptent); 4750 4751 /* page is moved even if it's not RSS of this task(page-faulted). */ 4752 if (!mapping_cap_swap_backed(mapping)) { /* normal file */ 4753 page = find_get_page(mapping, pgoff); 4754 } else { /* shmem/tmpfs file. we should take account of swap too. */ 4755 swp_entry_t ent; 4756 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent); 4757 if (do_swap_account) 4758 entry->val = ent.val; 4759 } 4760 4761 return page; 4762} 4763 4764static int is_target_pte_for_mc(struct vm_area_struct *vma, 4765 unsigned long addr, pte_t ptent, union mc_target *target) 4766{ 4767 struct page *page = NULL; 4768 struct page_cgroup *pc; 4769 int ret = 0; 4770 swp_entry_t ent = { .val = 0 }; 4771 4772 if (pte_present(ptent)) 4773 page = mc_handle_present_pte(vma, addr, ptent); 4774 else if (is_swap_pte(ptent)) 4775 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 4776 else if (pte_none(ptent) || pte_file(ptent)) 4777 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4778 4779 if (!page && !ent.val) 4780 return 0; 4781 if (page) { 4782 pc = lookup_page_cgroup(page); 4783 /* 4784 * Do only loose check w/o page_cgroup lock. 4785 * mem_cgroup_move_account() checks the pc is valid or not under 4786 * the lock. 4787 */ 4788 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 4789 ret = MC_TARGET_PAGE; 4790 if (target) 4791 target->page = page; 4792 } 4793 if (!ret || !target) 4794 put_page(page); 4795 } 4796 /* There is a swap entry and a page doesn't exist or isn't charged */ 4797 if (ent.val && !ret && 4798 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { 4799 ret = MC_TARGET_SWAP; 4800 if (target) 4801 target->ent = ent; 4802 } 4803 return ret; 4804} 4805 4806static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4807 unsigned long addr, unsigned long end, 4808 struct mm_walk *walk) 4809{ 4810 struct vm_area_struct *vma = walk->private; 4811 pte_t *pte; 4812 spinlock_t *ptl; 4813 4814 split_huge_page_pmd(walk->mm, pmd); 4815 4816 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4817 for (; addr != end; pte++, addr += PAGE_SIZE) 4818 if (is_target_pte_for_mc(vma, addr, *pte, NULL)) 4819 mc.precharge++; /* increment precharge temporarily */ 4820 pte_unmap_unlock(pte - 1, ptl); 4821 cond_resched(); 4822 4823 return 0; 4824} 4825 4826static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4827{ 4828 unsigned long precharge; 4829 struct vm_area_struct *vma; 4830 4831 down_read(&mm->mmap_sem); 4832 for (vma = mm->mmap; vma; vma = vma->vm_next) { 4833 struct mm_walk mem_cgroup_count_precharge_walk = { 4834 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4835 .mm = mm, 4836 .private = vma, 4837 }; 4838 if (is_vm_hugetlb_page(vma)) 4839 continue; 4840 walk_page_range(vma->vm_start, vma->vm_end, 4841 &mem_cgroup_count_precharge_walk); 4842 } 4843 up_read(&mm->mmap_sem); 4844 4845 precharge = mc.precharge; 4846 mc.precharge = 0; 4847 4848 return precharge; 4849} 4850 4851static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4852{ 4853 unsigned long precharge = mem_cgroup_count_precharge(mm); 4854 4855 VM_BUG_ON(mc.moving_task); 4856 mc.moving_task = current; 4857 return mem_cgroup_do_precharge(precharge); 4858} 4859 4860/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4861static void __mem_cgroup_clear_mc(void) 4862{ 4863 struct mem_cgroup *from = mc.from; 4864 struct mem_cgroup *to = mc.to; 4865 4866 /* we must uncharge all the leftover precharges from mc.to */ 4867 if (mc.precharge) { 4868 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 4869 mc.precharge = 0; 4870 } 4871 /* 4872 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4873 * we must uncharge here. 4874 */ 4875 if (mc.moved_charge) { 4876 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 4877 mc.moved_charge = 0; 4878 } 4879 /* we must fixup refcnts and charges */ 4880 if (mc.moved_swap) { 4881 /* uncharge swap account from the old cgroup */ 4882 if (!mem_cgroup_is_root(mc.from)) 4883 res_counter_uncharge(&mc.from->memsw, 4884 PAGE_SIZE * mc.moved_swap); 4885 __mem_cgroup_put(mc.from, mc.moved_swap); 4886 4887 if (!mem_cgroup_is_root(mc.to)) { 4888 /* 4889 * we charged both to->res and to->memsw, so we should 4890 * uncharge to->res. 4891 */ 4892 res_counter_uncharge(&mc.to->res, 4893 PAGE_SIZE * mc.moved_swap); 4894 } 4895 /* we've already done mem_cgroup_get(mc.to) */ 4896 mc.moved_swap = 0; 4897 } 4898 memcg_oom_recover(from); 4899 memcg_oom_recover(to); 4900 wake_up_all(&mc.waitq); 4901} 4902 4903static void mem_cgroup_clear_mc(void) 4904{ 4905 struct mem_cgroup *from = mc.from; 4906 4907 /* 4908 * we must clear moving_task before waking up waiters at the end of 4909 * task migration. 4910 */ 4911 mc.moving_task = NULL; 4912 __mem_cgroup_clear_mc(); 4913 spin_lock(&mc.lock); 4914 mc.from = NULL; 4915 mc.to = NULL; 4916 spin_unlock(&mc.lock); 4917 mem_cgroup_end_move(from); 4918} 4919 4920static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 4921 struct cgroup *cgroup, 4922 struct task_struct *p, 4923 bool threadgroup) 4924{ 4925 int ret = 0; 4926 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); 4927 4928 if (mem->move_charge_at_immigrate) { 4929 struct mm_struct *mm; 4930 struct mem_cgroup *from = mem_cgroup_from_task(p); 4931 4932 VM_BUG_ON(from == mem); 4933 4934 mm = get_task_mm(p); 4935 if (!mm) 4936 return 0; 4937 /* We move charges only when we move a owner of the mm */ 4938 if (mm->owner == p) { 4939 VM_BUG_ON(mc.from); 4940 VM_BUG_ON(mc.to); 4941 VM_BUG_ON(mc.precharge); 4942 VM_BUG_ON(mc.moved_charge); 4943 VM_BUG_ON(mc.moved_swap); 4944 mem_cgroup_start_move(from); 4945 spin_lock(&mc.lock); 4946 mc.from = from; 4947 mc.to = mem; 4948 spin_unlock(&mc.lock); 4949 /* We set mc.moving_task later */ 4950 4951 ret = mem_cgroup_precharge_mc(mm); 4952 if (ret) 4953 mem_cgroup_clear_mc(); 4954 } 4955 mmput(mm); 4956 } 4957 return ret; 4958} 4959 4960static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 4961 struct cgroup *cgroup, 4962 struct task_struct *p, 4963 bool threadgroup) 4964{ 4965 mem_cgroup_clear_mc(); 4966} 4967 4968static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4969 unsigned long addr, unsigned long end, 4970 struct mm_walk *walk) 4971{ 4972 int ret = 0; 4973 struct vm_area_struct *vma = walk->private; 4974 pte_t *pte; 4975 spinlock_t *ptl; 4976 4977 split_huge_page_pmd(walk->mm, pmd); 4978retry: 4979 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4980 for (; addr != end; addr += PAGE_SIZE) { 4981 pte_t ptent = *(pte++); 4982 union mc_target target; 4983 int type; 4984 struct page *page; 4985 struct page_cgroup *pc; 4986 swp_entry_t ent; 4987 4988 if (!mc.precharge) 4989 break; 4990 4991 type = is_target_pte_for_mc(vma, addr, ptent, &target); 4992 switch (type) { 4993 case MC_TARGET_PAGE: 4994 page = target.page; 4995 if (isolate_lru_page(page)) 4996 goto put; 4997 pc = lookup_page_cgroup(page); 4998 if (!mem_cgroup_move_account(page, 1, pc, 4999 mc.from, mc.to, false)) { 5000 mc.precharge--; 5001 /* we uncharge from mc.from later. */ 5002 mc.moved_charge++; 5003 } 5004 putback_lru_page(page); 5005put: /* is_target_pte_for_mc() gets the page */ 5006 put_page(page); 5007 break; 5008 case MC_TARGET_SWAP: 5009 ent = target.ent; 5010 if (!mem_cgroup_move_swap_account(ent, 5011 mc.from, mc.to, false)) { 5012 mc.precharge--; 5013 /* we fixup refcnts and charges later. */ 5014 mc.moved_swap++; 5015 } 5016 break; 5017 default: 5018 break; 5019 } 5020 } 5021 pte_unmap_unlock(pte - 1, ptl); 5022 cond_resched(); 5023 5024 if (addr != end) { 5025 /* 5026 * We have consumed all precharges we got in can_attach(). 5027 * We try charge one by one, but don't do any additional 5028 * charges to mc.to if we have failed in charge once in attach() 5029 * phase. 5030 */ 5031 ret = mem_cgroup_do_precharge(1); 5032 if (!ret) 5033 goto retry; 5034 } 5035 5036 return ret; 5037} 5038 5039static void mem_cgroup_move_charge(struct mm_struct *mm) 5040{ 5041 struct vm_area_struct *vma; 5042 5043 lru_add_drain_all(); 5044retry: 5045 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5046 /* 5047 * Someone who are holding the mmap_sem might be waiting in 5048 * waitq. So we cancel all extra charges, wake up all waiters, 5049 * and retry. Because we cancel precharges, we might not be able 5050 * to move enough charges, but moving charge is a best-effort 5051 * feature anyway, so it wouldn't be a big problem. 5052 */ 5053 __mem_cgroup_clear_mc(); 5054 cond_resched(); 5055 goto retry; 5056 } 5057 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5058 int ret; 5059 struct mm_walk mem_cgroup_move_charge_walk = { 5060 .pmd_entry = mem_cgroup_move_charge_pte_range, 5061 .mm = mm, 5062 .private = vma, 5063 }; 5064 if (is_vm_hugetlb_page(vma)) 5065 continue; 5066 ret = walk_page_range(vma->vm_start, vma->vm_end, 5067 &mem_cgroup_move_charge_walk); 5068 if (ret) 5069 /* 5070 * means we have consumed all precharges and failed in 5071 * doing additional charge. Just abandon here. 5072 */ 5073 break; 5074 } 5075 up_read(&mm->mmap_sem); 5076} 5077 5078static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5079 struct cgroup *cont, 5080 struct cgroup *old_cont, 5081 struct task_struct *p, 5082 bool threadgroup) 5083{ 5084 struct mm_struct *mm; 5085 5086 if (!mc.to) 5087 /* no need to move charge */ 5088 return; 5089 5090 mm = get_task_mm(p); 5091 if (mm) { 5092 mem_cgroup_move_charge(mm); 5093 mmput(mm); 5094 } 5095 mem_cgroup_clear_mc(); 5096} 5097#else /* !CONFIG_MMU */ 5098static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5099 struct cgroup *cgroup, 5100 struct task_struct *p, 5101 bool threadgroup) 5102{ 5103 return 0; 5104} 5105static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5106 struct cgroup *cgroup, 5107 struct task_struct *p, 5108 bool threadgroup) 5109{ 5110} 5111static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5112 struct cgroup *cont, 5113 struct cgroup *old_cont, 5114 struct task_struct *p, 5115 bool threadgroup) 5116{ 5117} 5118#endif 5119 5120struct cgroup_subsys mem_cgroup_subsys = { 5121 .name = "memory", 5122 .subsys_id = mem_cgroup_subsys_id, 5123 .create = mem_cgroup_create, 5124 .pre_destroy = mem_cgroup_pre_destroy, 5125 .destroy = mem_cgroup_destroy, 5126 .populate = mem_cgroup_populate, 5127 .can_attach = mem_cgroup_can_attach, 5128 .cancel_attach = mem_cgroup_cancel_attach, 5129 .attach = mem_cgroup_move_task, 5130 .early_init = 0, 5131 .use_id = 1, 5132}; 5133 5134#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 5135static int __init enable_swap_account(char *s) 5136{ 5137 /* consider enabled if no parameter or 1 is given */ 5138 if (!(*s) || !strcmp(s, "=1")) 5139 really_do_swap_account = 1; 5140 else if (!strcmp(s, "=0")) 5141 really_do_swap_account = 0; 5142 return 1; 5143} 5144__setup("swapaccount", enable_swap_account); 5145 5146static int __init disable_swap_account(char *s) 5147{ 5148 printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n"); 5149 enable_swap_account("=0"); 5150 return 1; 5151} 5152__setup("noswapaccount", disable_swap_account); 5153#endif 5154