memcontrol.c revision 1af8efe965676ab30d6c8a5b1fccc9229f339a3b
1/* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 */ 23 24#include <linux/res_counter.h> 25#include <linux/memcontrol.h> 26#include <linux/cgroup.h> 27#include <linux/mm.h> 28#include <linux/hugetlb.h> 29#include <linux/pagemap.h> 30#include <linux/smp.h> 31#include <linux/page-flags.h> 32#include <linux/backing-dev.h> 33#include <linux/bit_spinlock.h> 34#include <linux/rcupdate.h> 35#include <linux/limits.h> 36#include <linux/mutex.h> 37#include <linux/rbtree.h> 38#include <linux/shmem_fs.h> 39#include <linux/slab.h> 40#include <linux/swap.h> 41#include <linux/swapops.h> 42#include <linux/spinlock.h> 43#include <linux/eventfd.h> 44#include <linux/sort.h> 45#include <linux/fs.h> 46#include <linux/seq_file.h> 47#include <linux/vmalloc.h> 48#include <linux/mm_inline.h> 49#include <linux/page_cgroup.h> 50#include <linux/cpu.h> 51#include <linux/oom.h> 52#include "internal.h" 53 54#include <asm/uaccess.h> 55 56#include <trace/events/vmscan.h> 57 58struct cgroup_subsys mem_cgroup_subsys __read_mostly; 59#define MEM_CGROUP_RECLAIM_RETRIES 5 60struct mem_cgroup *root_mem_cgroup __read_mostly; 61 62#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 63/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 64int do_swap_account __read_mostly; 65 66/* for remember boot option*/ 67#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED 68static int really_do_swap_account __initdata = 1; 69#else 70static int really_do_swap_account __initdata = 0; 71#endif 72 73#else 74#define do_swap_account (0) 75#endif 76 77 78/* 79 * Statistics for memory cgroup. 80 */ 81enum mem_cgroup_stat_index { 82 /* 83 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 84 */ 85 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 86 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 87 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 88 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 89 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ 90 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ 91 MEM_CGROUP_STAT_NSTATS, 92}; 93 94enum mem_cgroup_events_index { 95 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 96 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 97 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ 98 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 99 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 100 MEM_CGROUP_EVENTS_NSTATS, 101}; 102/* 103 * Per memcg event counter is incremented at every pagein/pageout. With THP, 104 * it will be incremated by the number of pages. This counter is used for 105 * for trigger some periodic events. This is straightforward and better 106 * than using jiffies etc. to handle periodic memcg event. 107 */ 108enum mem_cgroup_events_target { 109 MEM_CGROUP_TARGET_THRESH, 110 MEM_CGROUP_TARGET_SOFTLIMIT, 111 MEM_CGROUP_TARGET_NUMAINFO, 112 MEM_CGROUP_NTARGETS, 113}; 114#define THRESHOLDS_EVENTS_TARGET (128) 115#define SOFTLIMIT_EVENTS_TARGET (1024) 116#define NUMAINFO_EVENTS_TARGET (1024) 117 118struct mem_cgroup_stat_cpu { 119 long count[MEM_CGROUP_STAT_NSTATS]; 120 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 121 unsigned long targets[MEM_CGROUP_NTARGETS]; 122}; 123 124/* 125 * per-zone information in memory controller. 126 */ 127struct mem_cgroup_per_zone { 128 /* 129 * spin_lock to protect the per cgroup LRU 130 */ 131 struct list_head lists[NR_LRU_LISTS]; 132 unsigned long count[NR_LRU_LISTS]; 133 134 struct zone_reclaim_stat reclaim_stat; 135 struct rb_node tree_node; /* RB tree node */ 136 unsigned long long usage_in_excess;/* Set to the value by which */ 137 /* the soft limit is exceeded*/ 138 bool on_tree; 139 struct mem_cgroup *mem; /* Back pointer, we cannot */ 140 /* use container_of */ 141}; 142/* Macro for accessing counter */ 143#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 144 145struct mem_cgroup_per_node { 146 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 147}; 148 149struct mem_cgroup_lru_info { 150 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; 151}; 152 153/* 154 * Cgroups above their limits are maintained in a RB-Tree, independent of 155 * their hierarchy representation 156 */ 157 158struct mem_cgroup_tree_per_zone { 159 struct rb_root rb_root; 160 spinlock_t lock; 161}; 162 163struct mem_cgroup_tree_per_node { 164 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 165}; 166 167struct mem_cgroup_tree { 168 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 169}; 170 171static struct mem_cgroup_tree soft_limit_tree __read_mostly; 172 173struct mem_cgroup_threshold { 174 struct eventfd_ctx *eventfd; 175 u64 threshold; 176}; 177 178/* For threshold */ 179struct mem_cgroup_threshold_ary { 180 /* An array index points to threshold just below usage. */ 181 int current_threshold; 182 /* Size of entries[] */ 183 unsigned int size; 184 /* Array of thresholds */ 185 struct mem_cgroup_threshold entries[0]; 186}; 187 188struct mem_cgroup_thresholds { 189 /* Primary thresholds array */ 190 struct mem_cgroup_threshold_ary *primary; 191 /* 192 * Spare threshold array. 193 * This is needed to make mem_cgroup_unregister_event() "never fail". 194 * It must be able to store at least primary->size - 1 entries. 195 */ 196 struct mem_cgroup_threshold_ary *spare; 197}; 198 199/* for OOM */ 200struct mem_cgroup_eventfd_list { 201 struct list_head list; 202 struct eventfd_ctx *eventfd; 203}; 204 205static void mem_cgroup_threshold(struct mem_cgroup *mem); 206static void mem_cgroup_oom_notify(struct mem_cgroup *mem); 207 208/* 209 * The memory controller data structure. The memory controller controls both 210 * page cache and RSS per cgroup. We would eventually like to provide 211 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 212 * to help the administrator determine what knobs to tune. 213 * 214 * TODO: Add a water mark for the memory controller. Reclaim will begin when 215 * we hit the water mark. May be even add a low water mark, such that 216 * no reclaim occurs from a cgroup at it's low water mark, this is 217 * a feature that will be implemented much later in the future. 218 */ 219struct mem_cgroup { 220 struct cgroup_subsys_state css; 221 /* 222 * the counter to account for memory usage 223 */ 224 struct res_counter res; 225 /* 226 * the counter to account for mem+swap usage. 227 */ 228 struct res_counter memsw; 229 /* 230 * Per cgroup active and inactive list, similar to the 231 * per zone LRU lists. 232 */ 233 struct mem_cgroup_lru_info info; 234 /* 235 * While reclaiming in a hierarchy, we cache the last child we 236 * reclaimed from. 237 */ 238 int last_scanned_child; 239 int last_scanned_node; 240#if MAX_NUMNODES > 1 241 nodemask_t scan_nodes; 242 atomic_t numainfo_events; 243 atomic_t numainfo_updating; 244#endif 245 /* 246 * Should the accounting and control be hierarchical, per subtree? 247 */ 248 bool use_hierarchy; 249 250 bool oom_lock; 251 atomic_t under_oom; 252 253 atomic_t refcnt; 254 255 int swappiness; 256 /* OOM-Killer disable */ 257 int oom_kill_disable; 258 259 /* set when res.limit == memsw.limit */ 260 bool memsw_is_minimum; 261 262 /* protect arrays of thresholds */ 263 struct mutex thresholds_lock; 264 265 /* thresholds for memory usage. RCU-protected */ 266 struct mem_cgroup_thresholds thresholds; 267 268 /* thresholds for mem+swap usage. RCU-protected */ 269 struct mem_cgroup_thresholds memsw_thresholds; 270 271 /* For oom notifier event fd */ 272 struct list_head oom_notify; 273 274 /* 275 * Should we move charges of a task when a task is moved into this 276 * mem_cgroup ? And what type of charges should we move ? 277 */ 278 unsigned long move_charge_at_immigrate; 279 /* 280 * percpu counter. 281 */ 282 struct mem_cgroup_stat_cpu *stat; 283 /* 284 * used when a cpu is offlined or other synchronizations 285 * See mem_cgroup_read_stat(). 286 */ 287 struct mem_cgroup_stat_cpu nocpu_base; 288 spinlock_t pcp_counter_lock; 289}; 290 291/* Stuffs for move charges at task migration. */ 292/* 293 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a 294 * left-shifted bitmap of these types. 295 */ 296enum move_type { 297 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 298 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 299 NR_MOVE_TYPE, 300}; 301 302/* "mc" and its members are protected by cgroup_mutex */ 303static struct move_charge_struct { 304 spinlock_t lock; /* for from, to */ 305 struct mem_cgroup *from; 306 struct mem_cgroup *to; 307 unsigned long precharge; 308 unsigned long moved_charge; 309 unsigned long moved_swap; 310 struct task_struct *moving_task; /* a task moving charges */ 311 wait_queue_head_t waitq; /* a waitq for other context */ 312} mc = { 313 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 314 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 315}; 316 317static bool move_anon(void) 318{ 319 return test_bit(MOVE_CHARGE_TYPE_ANON, 320 &mc.to->move_charge_at_immigrate); 321} 322 323static bool move_file(void) 324{ 325 return test_bit(MOVE_CHARGE_TYPE_FILE, 326 &mc.to->move_charge_at_immigrate); 327} 328 329/* 330 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 331 * limit reclaim to prevent infinite loops, if they ever occur. 332 */ 333#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) 334#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) 335 336enum charge_type { 337 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 338 MEM_CGROUP_CHARGE_TYPE_MAPPED, 339 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 340 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 341 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 342 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 343 NR_CHARGE_TYPE, 344}; 345 346/* for encoding cft->private value on file */ 347#define _MEM (0) 348#define _MEMSWAP (1) 349#define _OOM_TYPE (2) 350#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 351#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 352#define MEMFILE_ATTR(val) ((val) & 0xffff) 353/* Used for OOM nofiier */ 354#define OOM_CONTROL (0) 355 356/* 357 * Reclaim flags for mem_cgroup_hierarchical_reclaim 358 */ 359#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 360#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 361#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 362#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 363#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2 364#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT) 365 366static void mem_cgroup_get(struct mem_cgroup *mem); 367static void mem_cgroup_put(struct mem_cgroup *mem); 368static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 369static void drain_all_stock_async(struct mem_cgroup *mem); 370 371static struct mem_cgroup_per_zone * 372mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 373{ 374 return &mem->info.nodeinfo[nid]->zoneinfo[zid]; 375} 376 377struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 378{ 379 return &mem->css; 380} 381 382static struct mem_cgroup_per_zone * 383page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page) 384{ 385 int nid = page_to_nid(page); 386 int zid = page_zonenum(page); 387 388 return mem_cgroup_zoneinfo(mem, nid, zid); 389} 390 391static struct mem_cgroup_tree_per_zone * 392soft_limit_tree_node_zone(int nid, int zid) 393{ 394 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 395} 396 397static struct mem_cgroup_tree_per_zone * 398soft_limit_tree_from_page(struct page *page) 399{ 400 int nid = page_to_nid(page); 401 int zid = page_zonenum(page); 402 403 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 404} 405 406static void 407__mem_cgroup_insert_exceeded(struct mem_cgroup *mem, 408 struct mem_cgroup_per_zone *mz, 409 struct mem_cgroup_tree_per_zone *mctz, 410 unsigned long long new_usage_in_excess) 411{ 412 struct rb_node **p = &mctz->rb_root.rb_node; 413 struct rb_node *parent = NULL; 414 struct mem_cgroup_per_zone *mz_node; 415 416 if (mz->on_tree) 417 return; 418 419 mz->usage_in_excess = new_usage_in_excess; 420 if (!mz->usage_in_excess) 421 return; 422 while (*p) { 423 parent = *p; 424 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 425 tree_node); 426 if (mz->usage_in_excess < mz_node->usage_in_excess) 427 p = &(*p)->rb_left; 428 /* 429 * We can't avoid mem cgroups that are over their soft 430 * limit by the same amount 431 */ 432 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 433 p = &(*p)->rb_right; 434 } 435 rb_link_node(&mz->tree_node, parent, p); 436 rb_insert_color(&mz->tree_node, &mctz->rb_root); 437 mz->on_tree = true; 438} 439 440static void 441__mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 442 struct mem_cgroup_per_zone *mz, 443 struct mem_cgroup_tree_per_zone *mctz) 444{ 445 if (!mz->on_tree) 446 return; 447 rb_erase(&mz->tree_node, &mctz->rb_root); 448 mz->on_tree = false; 449} 450 451static void 452mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 453 struct mem_cgroup_per_zone *mz, 454 struct mem_cgroup_tree_per_zone *mctz) 455{ 456 spin_lock(&mctz->lock); 457 __mem_cgroup_remove_exceeded(mem, mz, mctz); 458 spin_unlock(&mctz->lock); 459} 460 461 462static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) 463{ 464 unsigned long long excess; 465 struct mem_cgroup_per_zone *mz; 466 struct mem_cgroup_tree_per_zone *mctz; 467 int nid = page_to_nid(page); 468 int zid = page_zonenum(page); 469 mctz = soft_limit_tree_from_page(page); 470 471 /* 472 * Necessary to update all ancestors when hierarchy is used. 473 * because their event counter is not touched. 474 */ 475 for (; mem; mem = parent_mem_cgroup(mem)) { 476 mz = mem_cgroup_zoneinfo(mem, nid, zid); 477 excess = res_counter_soft_limit_excess(&mem->res); 478 /* 479 * We have to update the tree if mz is on RB-tree or 480 * mem is over its softlimit. 481 */ 482 if (excess || mz->on_tree) { 483 spin_lock(&mctz->lock); 484 /* if on-tree, remove it */ 485 if (mz->on_tree) 486 __mem_cgroup_remove_exceeded(mem, mz, mctz); 487 /* 488 * Insert again. mz->usage_in_excess will be updated. 489 * If excess is 0, no tree ops. 490 */ 491 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess); 492 spin_unlock(&mctz->lock); 493 } 494 } 495} 496 497static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) 498{ 499 int node, zone; 500 struct mem_cgroup_per_zone *mz; 501 struct mem_cgroup_tree_per_zone *mctz; 502 503 for_each_node_state(node, N_POSSIBLE) { 504 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 505 mz = mem_cgroup_zoneinfo(mem, node, zone); 506 mctz = soft_limit_tree_node_zone(node, zone); 507 mem_cgroup_remove_exceeded(mem, mz, mctz); 508 } 509 } 510} 511 512static struct mem_cgroup_per_zone * 513__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 514{ 515 struct rb_node *rightmost = NULL; 516 struct mem_cgroup_per_zone *mz; 517 518retry: 519 mz = NULL; 520 rightmost = rb_last(&mctz->rb_root); 521 if (!rightmost) 522 goto done; /* Nothing to reclaim from */ 523 524 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 525 /* 526 * Remove the node now but someone else can add it back, 527 * we will to add it back at the end of reclaim to its correct 528 * position in the tree. 529 */ 530 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 531 if (!res_counter_soft_limit_excess(&mz->mem->res) || 532 !css_tryget(&mz->mem->css)) 533 goto retry; 534done: 535 return mz; 536} 537 538static struct mem_cgroup_per_zone * 539mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 540{ 541 struct mem_cgroup_per_zone *mz; 542 543 spin_lock(&mctz->lock); 544 mz = __mem_cgroup_largest_soft_limit_node(mctz); 545 spin_unlock(&mctz->lock); 546 return mz; 547} 548 549/* 550 * Implementation Note: reading percpu statistics for memcg. 551 * 552 * Both of vmstat[] and percpu_counter has threshold and do periodic 553 * synchronization to implement "quick" read. There are trade-off between 554 * reading cost and precision of value. Then, we may have a chance to implement 555 * a periodic synchronizion of counter in memcg's counter. 556 * 557 * But this _read() function is used for user interface now. The user accounts 558 * memory usage by memory cgroup and he _always_ requires exact value because 559 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 560 * have to visit all online cpus and make sum. So, for now, unnecessary 561 * synchronization is not implemented. (just implemented for cpu hotplug) 562 * 563 * If there are kernel internal actions which can make use of some not-exact 564 * value, and reading all cpu value can be performance bottleneck in some 565 * common workload, threashold and synchonization as vmstat[] should be 566 * implemented. 567 */ 568static long mem_cgroup_read_stat(struct mem_cgroup *mem, 569 enum mem_cgroup_stat_index idx) 570{ 571 long val = 0; 572 int cpu; 573 574 get_online_cpus(); 575 for_each_online_cpu(cpu) 576 val += per_cpu(mem->stat->count[idx], cpu); 577#ifdef CONFIG_HOTPLUG_CPU 578 spin_lock(&mem->pcp_counter_lock); 579 val += mem->nocpu_base.count[idx]; 580 spin_unlock(&mem->pcp_counter_lock); 581#endif 582 put_online_cpus(); 583 return val; 584} 585 586static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, 587 bool charge) 588{ 589 int val = (charge) ? 1 : -1; 590 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); 591} 592 593void mem_cgroup_pgfault(struct mem_cgroup *mem, int val) 594{ 595 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val); 596} 597 598void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val) 599{ 600 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val); 601} 602 603static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, 604 enum mem_cgroup_events_index idx) 605{ 606 unsigned long val = 0; 607 int cpu; 608 609 for_each_online_cpu(cpu) 610 val += per_cpu(mem->stat->events[idx], cpu); 611#ifdef CONFIG_HOTPLUG_CPU 612 spin_lock(&mem->pcp_counter_lock); 613 val += mem->nocpu_base.events[idx]; 614 spin_unlock(&mem->pcp_counter_lock); 615#endif 616 return val; 617} 618 619static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, 620 bool file, int nr_pages) 621{ 622 preempt_disable(); 623 624 if (file) 625 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); 626 else 627 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages); 628 629 /* pagein of a big page is an event. So, ignore page size */ 630 if (nr_pages > 0) 631 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 632 else { 633 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 634 nr_pages = -nr_pages; /* for event */ 635 } 636 637 __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); 638 639 preempt_enable(); 640} 641 642unsigned long 643mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid, 644 unsigned int lru_mask) 645{ 646 struct mem_cgroup_per_zone *mz; 647 enum lru_list l; 648 unsigned long ret = 0; 649 650 mz = mem_cgroup_zoneinfo(mem, nid, zid); 651 652 for_each_lru(l) { 653 if (BIT(l) & lru_mask) 654 ret += MEM_CGROUP_ZSTAT(mz, l); 655 } 656 return ret; 657} 658 659static unsigned long 660mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem, 661 int nid, unsigned int lru_mask) 662{ 663 u64 total = 0; 664 int zid; 665 666 for (zid = 0; zid < MAX_NR_ZONES; zid++) 667 total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask); 668 669 return total; 670} 671 672static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem, 673 unsigned int lru_mask) 674{ 675 int nid; 676 u64 total = 0; 677 678 for_each_node_state(nid, N_HIGH_MEMORY) 679 total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask); 680 return total; 681} 682 683static bool __memcg_event_check(struct mem_cgroup *mem, int target) 684{ 685 unsigned long val, next; 686 687 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); 688 next = this_cpu_read(mem->stat->targets[target]); 689 /* from time_after() in jiffies.h */ 690 return ((long)next - (long)val < 0); 691} 692 693static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target) 694{ 695 unsigned long val, next; 696 697 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); 698 699 switch (target) { 700 case MEM_CGROUP_TARGET_THRESH: 701 next = val + THRESHOLDS_EVENTS_TARGET; 702 break; 703 case MEM_CGROUP_TARGET_SOFTLIMIT: 704 next = val + SOFTLIMIT_EVENTS_TARGET; 705 break; 706 case MEM_CGROUP_TARGET_NUMAINFO: 707 next = val + NUMAINFO_EVENTS_TARGET; 708 break; 709 default: 710 return; 711 } 712 713 this_cpu_write(mem->stat->targets[target], next); 714} 715 716/* 717 * Check events in order. 718 * 719 */ 720static void memcg_check_events(struct mem_cgroup *mem, struct page *page) 721{ 722 /* threshold event is triggered in finer grain than soft limit */ 723 if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) { 724 mem_cgroup_threshold(mem); 725 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH); 726 if (unlikely(__memcg_event_check(mem, 727 MEM_CGROUP_TARGET_SOFTLIMIT))) { 728 mem_cgroup_update_tree(mem, page); 729 __mem_cgroup_target_update(mem, 730 MEM_CGROUP_TARGET_SOFTLIMIT); 731 } 732#if MAX_NUMNODES > 1 733 if (unlikely(__memcg_event_check(mem, 734 MEM_CGROUP_TARGET_NUMAINFO))) { 735 atomic_inc(&mem->numainfo_events); 736 __mem_cgroup_target_update(mem, 737 MEM_CGROUP_TARGET_NUMAINFO); 738 } 739#endif 740 } 741} 742 743static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 744{ 745 return container_of(cgroup_subsys_state(cont, 746 mem_cgroup_subsys_id), struct mem_cgroup, 747 css); 748} 749 750struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 751{ 752 /* 753 * mm_update_next_owner() may clear mm->owner to NULL 754 * if it races with swapoff, page migration, etc. 755 * So this can be called with p == NULL. 756 */ 757 if (unlikely(!p)) 758 return NULL; 759 760 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 761 struct mem_cgroup, css); 762} 763 764struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 765{ 766 struct mem_cgroup *mem = NULL; 767 768 if (!mm) 769 return NULL; 770 /* 771 * Because we have no locks, mm->owner's may be being moved to other 772 * cgroup. We use css_tryget() here even if this looks 773 * pessimistic (rather than adding locks here). 774 */ 775 rcu_read_lock(); 776 do { 777 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 778 if (unlikely(!mem)) 779 break; 780 } while (!css_tryget(&mem->css)); 781 rcu_read_unlock(); 782 return mem; 783} 784 785/* The caller has to guarantee "mem" exists before calling this */ 786static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) 787{ 788 struct cgroup_subsys_state *css; 789 int found; 790 791 if (!mem) /* ROOT cgroup has the smallest ID */ 792 return root_mem_cgroup; /*css_put/get against root is ignored*/ 793 if (!mem->use_hierarchy) { 794 if (css_tryget(&mem->css)) 795 return mem; 796 return NULL; 797 } 798 rcu_read_lock(); 799 /* 800 * searching a memory cgroup which has the smallest ID under given 801 * ROOT cgroup. (ID >= 1) 802 */ 803 css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found); 804 if (css && css_tryget(css)) 805 mem = container_of(css, struct mem_cgroup, css); 806 else 807 mem = NULL; 808 rcu_read_unlock(); 809 return mem; 810} 811 812static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, 813 struct mem_cgroup *root, 814 bool cond) 815{ 816 int nextid = css_id(&iter->css) + 1; 817 int found; 818 int hierarchy_used; 819 struct cgroup_subsys_state *css; 820 821 hierarchy_used = iter->use_hierarchy; 822 823 css_put(&iter->css); 824 /* If no ROOT, walk all, ignore hierarchy */ 825 if (!cond || (root && !hierarchy_used)) 826 return NULL; 827 828 if (!root) 829 root = root_mem_cgroup; 830 831 do { 832 iter = NULL; 833 rcu_read_lock(); 834 835 css = css_get_next(&mem_cgroup_subsys, nextid, 836 &root->css, &found); 837 if (css && css_tryget(css)) 838 iter = container_of(css, struct mem_cgroup, css); 839 rcu_read_unlock(); 840 /* If css is NULL, no more cgroups will be found */ 841 nextid = found + 1; 842 } while (css && !iter); 843 844 return iter; 845} 846/* 847 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please 848 * be careful that "break" loop is not allowed. We have reference count. 849 * Instead of that modify "cond" to be false and "continue" to exit the loop. 850 */ 851#define for_each_mem_cgroup_tree_cond(iter, root, cond) \ 852 for (iter = mem_cgroup_start_loop(root);\ 853 iter != NULL;\ 854 iter = mem_cgroup_get_next(iter, root, cond)) 855 856#define for_each_mem_cgroup_tree(iter, root) \ 857 for_each_mem_cgroup_tree_cond(iter, root, true) 858 859#define for_each_mem_cgroup_all(iter) \ 860 for_each_mem_cgroup_tree_cond(iter, NULL, true) 861 862 863static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) 864{ 865 return (mem == root_mem_cgroup); 866} 867 868void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 869{ 870 struct mem_cgroup *mem; 871 872 if (!mm) 873 return; 874 875 rcu_read_lock(); 876 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 877 if (unlikely(!mem)) 878 goto out; 879 880 switch (idx) { 881 case PGMAJFAULT: 882 mem_cgroup_pgmajfault(mem, 1); 883 break; 884 case PGFAULT: 885 mem_cgroup_pgfault(mem, 1); 886 break; 887 default: 888 BUG(); 889 } 890out: 891 rcu_read_unlock(); 892} 893EXPORT_SYMBOL(mem_cgroup_count_vm_event); 894 895/* 896 * Following LRU functions are allowed to be used without PCG_LOCK. 897 * Operations are called by routine of global LRU independently from memcg. 898 * What we have to take care of here is validness of pc->mem_cgroup. 899 * 900 * Changes to pc->mem_cgroup happens when 901 * 1. charge 902 * 2. moving account 903 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 904 * It is added to LRU before charge. 905 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 906 * When moving account, the page is not on LRU. It's isolated. 907 */ 908 909void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) 910{ 911 struct page_cgroup *pc; 912 struct mem_cgroup_per_zone *mz; 913 914 if (mem_cgroup_disabled()) 915 return; 916 pc = lookup_page_cgroup(page); 917 /* can happen while we handle swapcache. */ 918 if (!TestClearPageCgroupAcctLRU(pc)) 919 return; 920 VM_BUG_ON(!pc->mem_cgroup); 921 /* 922 * We don't check PCG_USED bit. It's cleared when the "page" is finally 923 * removed from global LRU. 924 */ 925 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 926 /* huge page split is done under lru_lock. so, we have no races. */ 927 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); 928 if (mem_cgroup_is_root(pc->mem_cgroup)) 929 return; 930 VM_BUG_ON(list_empty(&pc->lru)); 931 list_del_init(&pc->lru); 932} 933 934void mem_cgroup_del_lru(struct page *page) 935{ 936 mem_cgroup_del_lru_list(page, page_lru(page)); 937} 938 939/* 940 * Writeback is about to end against a page which has been marked for immediate 941 * reclaim. If it still appears to be reclaimable, move it to the tail of the 942 * inactive list. 943 */ 944void mem_cgroup_rotate_reclaimable_page(struct page *page) 945{ 946 struct mem_cgroup_per_zone *mz; 947 struct page_cgroup *pc; 948 enum lru_list lru = page_lru(page); 949 950 if (mem_cgroup_disabled()) 951 return; 952 953 pc = lookup_page_cgroup(page); 954 /* unused or root page is not rotated. */ 955 if (!PageCgroupUsed(pc)) 956 return; 957 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 958 smp_rmb(); 959 if (mem_cgroup_is_root(pc->mem_cgroup)) 960 return; 961 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 962 list_move_tail(&pc->lru, &mz->lists[lru]); 963} 964 965void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 966{ 967 struct mem_cgroup_per_zone *mz; 968 struct page_cgroup *pc; 969 970 if (mem_cgroup_disabled()) 971 return; 972 973 pc = lookup_page_cgroup(page); 974 /* unused or root page is not rotated. */ 975 if (!PageCgroupUsed(pc)) 976 return; 977 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 978 smp_rmb(); 979 if (mem_cgroup_is_root(pc->mem_cgroup)) 980 return; 981 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 982 list_move(&pc->lru, &mz->lists[lru]); 983} 984 985void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) 986{ 987 struct page_cgroup *pc; 988 struct mem_cgroup_per_zone *mz; 989 990 if (mem_cgroup_disabled()) 991 return; 992 pc = lookup_page_cgroup(page); 993 VM_BUG_ON(PageCgroupAcctLRU(pc)); 994 if (!PageCgroupUsed(pc)) 995 return; 996 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 997 smp_rmb(); 998 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 999 /* huge page split is done under lru_lock. so, we have no races. */ 1000 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1001 SetPageCgroupAcctLRU(pc); 1002 if (mem_cgroup_is_root(pc->mem_cgroup)) 1003 return; 1004 list_add(&pc->lru, &mz->lists[lru]); 1005} 1006 1007/* 1008 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed 1009 * while it's linked to lru because the page may be reused after it's fully 1010 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again. 1011 * It's done under lock_page and expected that zone->lru_lock isnever held. 1012 */ 1013static void mem_cgroup_lru_del_before_commit(struct page *page) 1014{ 1015 unsigned long flags; 1016 struct zone *zone = page_zone(page); 1017 struct page_cgroup *pc = lookup_page_cgroup(page); 1018 1019 /* 1020 * Doing this check without taking ->lru_lock seems wrong but this 1021 * is safe. Because if page_cgroup's USED bit is unset, the page 1022 * will not be added to any memcg's LRU. If page_cgroup's USED bit is 1023 * set, the commit after this will fail, anyway. 1024 * This all charge/uncharge is done under some mutual execustion. 1025 * So, we don't need to taking care of changes in USED bit. 1026 */ 1027 if (likely(!PageLRU(page))) 1028 return; 1029 1030 spin_lock_irqsave(&zone->lru_lock, flags); 1031 /* 1032 * Forget old LRU when this page_cgroup is *not* used. This Used bit 1033 * is guarded by lock_page() because the page is SwapCache. 1034 */ 1035 if (!PageCgroupUsed(pc)) 1036 mem_cgroup_del_lru_list(page, page_lru(page)); 1037 spin_unlock_irqrestore(&zone->lru_lock, flags); 1038} 1039 1040static void mem_cgroup_lru_add_after_commit(struct page *page) 1041{ 1042 unsigned long flags; 1043 struct zone *zone = page_zone(page); 1044 struct page_cgroup *pc = lookup_page_cgroup(page); 1045 1046 /* taking care of that the page is added to LRU while we commit it */ 1047 if (likely(!PageLRU(page))) 1048 return; 1049 spin_lock_irqsave(&zone->lru_lock, flags); 1050 /* link when the page is linked to LRU but page_cgroup isn't */ 1051 if (PageLRU(page) && !PageCgroupAcctLRU(pc)) 1052 mem_cgroup_add_lru_list(page, page_lru(page)); 1053 spin_unlock_irqrestore(&zone->lru_lock, flags); 1054} 1055 1056 1057void mem_cgroup_move_lists(struct page *page, 1058 enum lru_list from, enum lru_list to) 1059{ 1060 if (mem_cgroup_disabled()) 1061 return; 1062 mem_cgroup_del_lru_list(page, from); 1063 mem_cgroup_add_lru_list(page, to); 1064} 1065 1066int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 1067{ 1068 int ret; 1069 struct mem_cgroup *curr = NULL; 1070 struct task_struct *p; 1071 1072 p = find_lock_task_mm(task); 1073 if (!p) 1074 return 0; 1075 curr = try_get_mem_cgroup_from_mm(p->mm); 1076 task_unlock(p); 1077 if (!curr) 1078 return 0; 1079 /* 1080 * We should check use_hierarchy of "mem" not "curr". Because checking 1081 * use_hierarchy of "curr" here make this function true if hierarchy is 1082 * enabled in "curr" and "curr" is a child of "mem" in *cgroup* 1083 * hierarchy(even if use_hierarchy is disabled in "mem"). 1084 */ 1085 if (mem->use_hierarchy) 1086 ret = css_is_ancestor(&curr->css, &mem->css); 1087 else 1088 ret = (curr == mem); 1089 css_put(&curr->css); 1090 return ret; 1091} 1092 1093static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages) 1094{ 1095 unsigned long active; 1096 unsigned long inactive; 1097 unsigned long gb; 1098 unsigned long inactive_ratio; 1099 1100 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); 1101 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); 1102 1103 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1104 if (gb) 1105 inactive_ratio = int_sqrt(10 * gb); 1106 else 1107 inactive_ratio = 1; 1108 1109 if (present_pages) { 1110 present_pages[0] = inactive; 1111 present_pages[1] = active; 1112 } 1113 1114 return inactive_ratio; 1115} 1116 1117int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 1118{ 1119 unsigned long active; 1120 unsigned long inactive; 1121 unsigned long present_pages[2]; 1122 unsigned long inactive_ratio; 1123 1124 inactive_ratio = calc_inactive_ratio(memcg, present_pages); 1125 1126 inactive = present_pages[0]; 1127 active = present_pages[1]; 1128 1129 if (inactive * inactive_ratio < active) 1130 return 1; 1131 1132 return 0; 1133} 1134 1135int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 1136{ 1137 unsigned long active; 1138 unsigned long inactive; 1139 1140 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); 1141 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); 1142 1143 return (active > inactive); 1144} 1145 1146struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 1147 struct zone *zone) 1148{ 1149 int nid = zone_to_nid(zone); 1150 int zid = zone_idx(zone); 1151 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); 1152 1153 return &mz->reclaim_stat; 1154} 1155 1156struct zone_reclaim_stat * 1157mem_cgroup_get_reclaim_stat_from_page(struct page *page) 1158{ 1159 struct page_cgroup *pc; 1160 struct mem_cgroup_per_zone *mz; 1161 1162 if (mem_cgroup_disabled()) 1163 return NULL; 1164 1165 pc = lookup_page_cgroup(page); 1166 if (!PageCgroupUsed(pc)) 1167 return NULL; 1168 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1169 smp_rmb(); 1170 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1171 return &mz->reclaim_stat; 1172} 1173 1174unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 1175 struct list_head *dst, 1176 unsigned long *scanned, int order, 1177 int mode, struct zone *z, 1178 struct mem_cgroup *mem_cont, 1179 int active, int file) 1180{ 1181 unsigned long nr_taken = 0; 1182 struct page *page; 1183 unsigned long scan; 1184 LIST_HEAD(pc_list); 1185 struct list_head *src; 1186 struct page_cgroup *pc, *tmp; 1187 int nid = zone_to_nid(z); 1188 int zid = zone_idx(z); 1189 struct mem_cgroup_per_zone *mz; 1190 int lru = LRU_FILE * file + active; 1191 int ret; 1192 1193 BUG_ON(!mem_cont); 1194 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 1195 src = &mz->lists[lru]; 1196 1197 scan = 0; 1198 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 1199 if (scan >= nr_to_scan) 1200 break; 1201 1202 if (unlikely(!PageCgroupUsed(pc))) 1203 continue; 1204 1205 page = lookup_cgroup_page(pc); 1206 1207 if (unlikely(!PageLRU(page))) 1208 continue; 1209 1210 scan++; 1211 ret = __isolate_lru_page(page, mode, file); 1212 switch (ret) { 1213 case 0: 1214 list_move(&page->lru, dst); 1215 mem_cgroup_del_lru(page); 1216 nr_taken += hpage_nr_pages(page); 1217 break; 1218 case -EBUSY: 1219 /* we don't affect global LRU but rotate in our LRU */ 1220 mem_cgroup_rotate_lru_list(page, page_lru(page)); 1221 break; 1222 default: 1223 break; 1224 } 1225 } 1226 1227 *scanned = scan; 1228 1229 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken, 1230 0, 0, 0, mode); 1231 1232 return nr_taken; 1233} 1234 1235#define mem_cgroup_from_res_counter(counter, member) \ 1236 container_of(counter, struct mem_cgroup, member) 1237 1238/** 1239 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1240 * @mem: the memory cgroup 1241 * 1242 * Returns the maximum amount of memory @mem can be charged with, in 1243 * pages. 1244 */ 1245static unsigned long mem_cgroup_margin(struct mem_cgroup *mem) 1246{ 1247 unsigned long long margin; 1248 1249 margin = res_counter_margin(&mem->res); 1250 if (do_swap_account) 1251 margin = min(margin, res_counter_margin(&mem->memsw)); 1252 return margin >> PAGE_SHIFT; 1253} 1254 1255int mem_cgroup_swappiness(struct mem_cgroup *memcg) 1256{ 1257 struct cgroup *cgrp = memcg->css.cgroup; 1258 1259 /* root ? */ 1260 if (cgrp->parent == NULL) 1261 return vm_swappiness; 1262 1263 return memcg->swappiness; 1264} 1265 1266static void mem_cgroup_start_move(struct mem_cgroup *mem) 1267{ 1268 int cpu; 1269 1270 get_online_cpus(); 1271 spin_lock(&mem->pcp_counter_lock); 1272 for_each_online_cpu(cpu) 1273 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; 1274 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; 1275 spin_unlock(&mem->pcp_counter_lock); 1276 put_online_cpus(); 1277 1278 synchronize_rcu(); 1279} 1280 1281static void mem_cgroup_end_move(struct mem_cgroup *mem) 1282{ 1283 int cpu; 1284 1285 if (!mem) 1286 return; 1287 get_online_cpus(); 1288 spin_lock(&mem->pcp_counter_lock); 1289 for_each_online_cpu(cpu) 1290 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; 1291 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; 1292 spin_unlock(&mem->pcp_counter_lock); 1293 put_online_cpus(); 1294} 1295/* 1296 * 2 routines for checking "mem" is under move_account() or not. 1297 * 1298 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used 1299 * for avoiding race in accounting. If true, 1300 * pc->mem_cgroup may be overwritten. 1301 * 1302 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1303 * under hierarchy of moving cgroups. This is for 1304 * waiting at hith-memory prressure caused by "move". 1305 */ 1306 1307static bool mem_cgroup_stealed(struct mem_cgroup *mem) 1308{ 1309 VM_BUG_ON(!rcu_read_lock_held()); 1310 return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0; 1311} 1312 1313static bool mem_cgroup_under_move(struct mem_cgroup *mem) 1314{ 1315 struct mem_cgroup *from; 1316 struct mem_cgroup *to; 1317 bool ret = false; 1318 /* 1319 * Unlike task_move routines, we access mc.to, mc.from not under 1320 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1321 */ 1322 spin_lock(&mc.lock); 1323 from = mc.from; 1324 to = mc.to; 1325 if (!from) 1326 goto unlock; 1327 if (from == mem || to == mem 1328 || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css)) 1329 || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css))) 1330 ret = true; 1331unlock: 1332 spin_unlock(&mc.lock); 1333 return ret; 1334} 1335 1336static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem) 1337{ 1338 if (mc.moving_task && current != mc.moving_task) { 1339 if (mem_cgroup_under_move(mem)) { 1340 DEFINE_WAIT(wait); 1341 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1342 /* moving charge context might have finished. */ 1343 if (mc.moving_task) 1344 schedule(); 1345 finish_wait(&mc.waitq, &wait); 1346 return true; 1347 } 1348 } 1349 return false; 1350} 1351 1352/** 1353 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. 1354 * @memcg: The memory cgroup that went over limit 1355 * @p: Task that is going to be killed 1356 * 1357 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1358 * enabled 1359 */ 1360void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1361{ 1362 struct cgroup *task_cgrp; 1363 struct cgroup *mem_cgrp; 1364 /* 1365 * Need a buffer in BSS, can't rely on allocations. The code relies 1366 * on the assumption that OOM is serialized for memory controller. 1367 * If this assumption is broken, revisit this code. 1368 */ 1369 static char memcg_name[PATH_MAX]; 1370 int ret; 1371 1372 if (!memcg || !p) 1373 return; 1374 1375 1376 rcu_read_lock(); 1377 1378 mem_cgrp = memcg->css.cgroup; 1379 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1380 1381 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1382 if (ret < 0) { 1383 /* 1384 * Unfortunately, we are unable to convert to a useful name 1385 * But we'll still print out the usage information 1386 */ 1387 rcu_read_unlock(); 1388 goto done; 1389 } 1390 rcu_read_unlock(); 1391 1392 printk(KERN_INFO "Task in %s killed", memcg_name); 1393 1394 rcu_read_lock(); 1395 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1396 if (ret < 0) { 1397 rcu_read_unlock(); 1398 goto done; 1399 } 1400 rcu_read_unlock(); 1401 1402 /* 1403 * Continues from above, so we don't need an KERN_ level 1404 */ 1405 printk(KERN_CONT " as a result of limit of %s\n", memcg_name); 1406done: 1407 1408 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n", 1409 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1410 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1411 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1412 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, " 1413 "failcnt %llu\n", 1414 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1415 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1416 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1417} 1418 1419/* 1420 * This function returns the number of memcg under hierarchy tree. Returns 1421 * 1(self count) if no children. 1422 */ 1423static int mem_cgroup_count_children(struct mem_cgroup *mem) 1424{ 1425 int num = 0; 1426 struct mem_cgroup *iter; 1427 1428 for_each_mem_cgroup_tree(iter, mem) 1429 num++; 1430 return num; 1431} 1432 1433/* 1434 * Return the memory (and swap, if configured) limit for a memcg. 1435 */ 1436u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1437{ 1438 u64 limit; 1439 u64 memsw; 1440 1441 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1442 limit += total_swap_pages << PAGE_SHIFT; 1443 1444 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1445 /* 1446 * If memsw is finite and limits the amount of swap space available 1447 * to this memcg, return that limit. 1448 */ 1449 return min(limit, memsw); 1450} 1451 1452/* 1453 * Visit the first child (need not be the first child as per the ordering 1454 * of the cgroup list, since we track last_scanned_child) of @mem and use 1455 * that to reclaim free pages from. 1456 */ 1457static struct mem_cgroup * 1458mem_cgroup_select_victim(struct mem_cgroup *root_mem) 1459{ 1460 struct mem_cgroup *ret = NULL; 1461 struct cgroup_subsys_state *css; 1462 int nextid, found; 1463 1464 if (!root_mem->use_hierarchy) { 1465 css_get(&root_mem->css); 1466 ret = root_mem; 1467 } 1468 1469 while (!ret) { 1470 rcu_read_lock(); 1471 nextid = root_mem->last_scanned_child + 1; 1472 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css, 1473 &found); 1474 if (css && css_tryget(css)) 1475 ret = container_of(css, struct mem_cgroup, css); 1476 1477 rcu_read_unlock(); 1478 /* Updates scanning parameter */ 1479 if (!css) { 1480 /* this means start scan from ID:1 */ 1481 root_mem->last_scanned_child = 0; 1482 } else 1483 root_mem->last_scanned_child = found; 1484 } 1485 1486 return ret; 1487} 1488 1489/** 1490 * test_mem_cgroup_node_reclaimable 1491 * @mem: the target memcg 1492 * @nid: the node ID to be checked. 1493 * @noswap : specify true here if the user wants flle only information. 1494 * 1495 * This function returns whether the specified memcg contains any 1496 * reclaimable pages on a node. Returns true if there are any reclaimable 1497 * pages in the node. 1498 */ 1499static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem, 1500 int nid, bool noswap) 1501{ 1502 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE)) 1503 return true; 1504 if (noswap || !total_swap_pages) 1505 return false; 1506 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON)) 1507 return true; 1508 return false; 1509 1510} 1511#if MAX_NUMNODES > 1 1512 1513/* 1514 * Always updating the nodemask is not very good - even if we have an empty 1515 * list or the wrong list here, we can start from some node and traverse all 1516 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1517 * 1518 */ 1519static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) 1520{ 1521 int nid; 1522 /* 1523 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1524 * pagein/pageout changes since the last update. 1525 */ 1526 if (!atomic_read(&mem->numainfo_events)) 1527 return; 1528 if (atomic_inc_return(&mem->numainfo_updating) > 1) 1529 return; 1530 1531 /* make a nodemask where this memcg uses memory from */ 1532 mem->scan_nodes = node_states[N_HIGH_MEMORY]; 1533 1534 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { 1535 1536 if (!test_mem_cgroup_node_reclaimable(mem, nid, false)) 1537 node_clear(nid, mem->scan_nodes); 1538 } 1539 1540 atomic_set(&mem->numainfo_events, 0); 1541 atomic_set(&mem->numainfo_updating, 0); 1542} 1543 1544/* 1545 * Selecting a node where we start reclaim from. Because what we need is just 1546 * reducing usage counter, start from anywhere is O,K. Considering 1547 * memory reclaim from current node, there are pros. and cons. 1548 * 1549 * Freeing memory from current node means freeing memory from a node which 1550 * we'll use or we've used. So, it may make LRU bad. And if several threads 1551 * hit limits, it will see a contention on a node. But freeing from remote 1552 * node means more costs for memory reclaim because of memory latency. 1553 * 1554 * Now, we use round-robin. Better algorithm is welcomed. 1555 */ 1556int mem_cgroup_select_victim_node(struct mem_cgroup *mem) 1557{ 1558 int node; 1559 1560 mem_cgroup_may_update_nodemask(mem); 1561 node = mem->last_scanned_node; 1562 1563 node = next_node(node, mem->scan_nodes); 1564 if (node == MAX_NUMNODES) 1565 node = first_node(mem->scan_nodes); 1566 /* 1567 * We call this when we hit limit, not when pages are added to LRU. 1568 * No LRU may hold pages because all pages are UNEVICTABLE or 1569 * memcg is too small and all pages are not on LRU. In that case, 1570 * we use curret node. 1571 */ 1572 if (unlikely(node == MAX_NUMNODES)) 1573 node = numa_node_id(); 1574 1575 mem->last_scanned_node = node; 1576 return node; 1577} 1578 1579/* 1580 * Check all nodes whether it contains reclaimable pages or not. 1581 * For quick scan, we make use of scan_nodes. This will allow us to skip 1582 * unused nodes. But scan_nodes is lazily updated and may not cotain 1583 * enough new information. We need to do double check. 1584 */ 1585bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) 1586{ 1587 int nid; 1588 1589 /* 1590 * quick check...making use of scan_node. 1591 * We can skip unused nodes. 1592 */ 1593 if (!nodes_empty(mem->scan_nodes)) { 1594 for (nid = first_node(mem->scan_nodes); 1595 nid < MAX_NUMNODES; 1596 nid = next_node(nid, mem->scan_nodes)) { 1597 1598 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) 1599 return true; 1600 } 1601 } 1602 /* 1603 * Check rest of nodes. 1604 */ 1605 for_each_node_state(nid, N_HIGH_MEMORY) { 1606 if (node_isset(nid, mem->scan_nodes)) 1607 continue; 1608 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) 1609 return true; 1610 } 1611 return false; 1612} 1613 1614#else 1615int mem_cgroup_select_victim_node(struct mem_cgroup *mem) 1616{ 1617 return 0; 1618} 1619 1620bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) 1621{ 1622 return test_mem_cgroup_node_reclaimable(mem, 0, noswap); 1623} 1624#endif 1625 1626/* 1627 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1628 * we reclaimed from, so that we don't end up penalizing one child extensively 1629 * based on its position in the children list. 1630 * 1631 * root_mem is the original ancestor that we've been reclaim from. 1632 * 1633 * We give up and return to the caller when we visit root_mem twice. 1634 * (other groups can be removed while we're walking....) 1635 * 1636 * If shrink==true, for avoiding to free too much, this returns immedieately. 1637 */ 1638static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, 1639 struct zone *zone, 1640 gfp_t gfp_mask, 1641 unsigned long reclaim_options, 1642 unsigned long *total_scanned) 1643{ 1644 struct mem_cgroup *victim; 1645 int ret, total = 0; 1646 int loop = 0; 1647 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; 1648 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1649 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1650 unsigned long excess; 1651 unsigned long nr_scanned; 1652 1653 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1654 1655 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1656 if (!check_soft && root_mem->memsw_is_minimum) 1657 noswap = true; 1658 1659 while (1) { 1660 victim = mem_cgroup_select_victim(root_mem); 1661 if (victim == root_mem) { 1662 loop++; 1663 /* 1664 * We are not draining per cpu cached charges during 1665 * soft limit reclaim because global reclaim doesn't 1666 * care about charges. It tries to free some memory and 1667 * charges will not give any. 1668 */ 1669 if (!check_soft && loop >= 1) 1670 drain_all_stock_async(root_mem); 1671 if (loop >= 2) { 1672 /* 1673 * If we have not been able to reclaim 1674 * anything, it might because there are 1675 * no reclaimable pages under this hierarchy 1676 */ 1677 if (!check_soft || !total) { 1678 css_put(&victim->css); 1679 break; 1680 } 1681 /* 1682 * We want to do more targeted reclaim. 1683 * excess >> 2 is not to excessive so as to 1684 * reclaim too much, nor too less that we keep 1685 * coming back to reclaim from this cgroup 1686 */ 1687 if (total >= (excess >> 2) || 1688 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) { 1689 css_put(&victim->css); 1690 break; 1691 } 1692 } 1693 } 1694 if (!mem_cgroup_reclaimable(victim, noswap)) { 1695 /* this cgroup's local usage == 0 */ 1696 css_put(&victim->css); 1697 continue; 1698 } 1699 /* we use swappiness of local cgroup */ 1700 if (check_soft) { 1701 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1702 noswap, zone, &nr_scanned); 1703 *total_scanned += nr_scanned; 1704 } else 1705 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1706 noswap); 1707 css_put(&victim->css); 1708 /* 1709 * At shrinking usage, we can't check we should stop here or 1710 * reclaim more. It's depends on callers. last_scanned_child 1711 * will work enough for keeping fairness under tree. 1712 */ 1713 if (shrink) 1714 return ret; 1715 total += ret; 1716 if (check_soft) { 1717 if (!res_counter_soft_limit_excess(&root_mem->res)) 1718 return total; 1719 } else if (mem_cgroup_margin(root_mem)) 1720 return total; 1721 } 1722 return total; 1723} 1724 1725/* 1726 * Check OOM-Killer is already running under our hierarchy. 1727 * If someone is running, return false. 1728 * Has to be called with memcg_oom_lock 1729 */ 1730static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) 1731{ 1732 int lock_count = -1; 1733 struct mem_cgroup *iter, *failed = NULL; 1734 bool cond = true; 1735 1736 for_each_mem_cgroup_tree_cond(iter, mem, cond) { 1737 bool locked = iter->oom_lock; 1738 1739 iter->oom_lock = true; 1740 if (lock_count == -1) 1741 lock_count = iter->oom_lock; 1742 else if (lock_count != locked) { 1743 /* 1744 * this subtree of our hierarchy is already locked 1745 * so we cannot give a lock. 1746 */ 1747 lock_count = 0; 1748 failed = iter; 1749 cond = false; 1750 } 1751 } 1752 1753 if (!failed) 1754 goto done; 1755 1756 /* 1757 * OK, we failed to lock the whole subtree so we have to clean up 1758 * what we set up to the failing subtree 1759 */ 1760 cond = true; 1761 for_each_mem_cgroup_tree_cond(iter, mem, cond) { 1762 if (iter == failed) { 1763 cond = false; 1764 continue; 1765 } 1766 iter->oom_lock = false; 1767 } 1768done: 1769 return lock_count; 1770} 1771 1772/* 1773 * Has to be called with memcg_oom_lock 1774 */ 1775static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) 1776{ 1777 struct mem_cgroup *iter; 1778 1779 for_each_mem_cgroup_tree(iter, mem) 1780 iter->oom_lock = false; 1781 return 0; 1782} 1783 1784static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem) 1785{ 1786 struct mem_cgroup *iter; 1787 1788 for_each_mem_cgroup_tree(iter, mem) 1789 atomic_inc(&iter->under_oom); 1790} 1791 1792static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem) 1793{ 1794 struct mem_cgroup *iter; 1795 1796 /* 1797 * When a new child is created while the hierarchy is under oom, 1798 * mem_cgroup_oom_lock() may not be called. We have to use 1799 * atomic_add_unless() here. 1800 */ 1801 for_each_mem_cgroup_tree(iter, mem) 1802 atomic_add_unless(&iter->under_oom, -1, 0); 1803} 1804 1805static DEFINE_SPINLOCK(memcg_oom_lock); 1806static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1807 1808struct oom_wait_info { 1809 struct mem_cgroup *mem; 1810 wait_queue_t wait; 1811}; 1812 1813static int memcg_oom_wake_function(wait_queue_t *wait, 1814 unsigned mode, int sync, void *arg) 1815{ 1816 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg; 1817 struct oom_wait_info *oom_wait_info; 1818 1819 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1820 1821 if (oom_wait_info->mem == wake_mem) 1822 goto wakeup; 1823 /* if no hierarchy, no match */ 1824 if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy) 1825 return 0; 1826 /* 1827 * Both of oom_wait_info->mem and wake_mem are stable under us. 1828 * Then we can use css_is_ancestor without taking care of RCU. 1829 */ 1830 if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) && 1831 !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css)) 1832 return 0; 1833 1834wakeup: 1835 return autoremove_wake_function(wait, mode, sync, arg); 1836} 1837 1838static void memcg_wakeup_oom(struct mem_cgroup *mem) 1839{ 1840 /* for filtering, pass "mem" as argument. */ 1841 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem); 1842} 1843 1844static void memcg_oom_recover(struct mem_cgroup *mem) 1845{ 1846 if (mem && atomic_read(&mem->under_oom)) 1847 memcg_wakeup_oom(mem); 1848} 1849 1850/* 1851 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1852 */ 1853bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) 1854{ 1855 struct oom_wait_info owait; 1856 bool locked, need_to_kill; 1857 1858 owait.mem = mem; 1859 owait.wait.flags = 0; 1860 owait.wait.func = memcg_oom_wake_function; 1861 owait.wait.private = current; 1862 INIT_LIST_HEAD(&owait.wait.task_list); 1863 need_to_kill = true; 1864 mem_cgroup_mark_under_oom(mem); 1865 1866 /* At first, try to OOM lock hierarchy under mem.*/ 1867 spin_lock(&memcg_oom_lock); 1868 locked = mem_cgroup_oom_lock(mem); 1869 /* 1870 * Even if signal_pending(), we can't quit charge() loop without 1871 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 1872 * under OOM is always welcomed, use TASK_KILLABLE here. 1873 */ 1874 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1875 if (!locked || mem->oom_kill_disable) 1876 need_to_kill = false; 1877 if (locked) 1878 mem_cgroup_oom_notify(mem); 1879 spin_unlock(&memcg_oom_lock); 1880 1881 if (need_to_kill) { 1882 finish_wait(&memcg_oom_waitq, &owait.wait); 1883 mem_cgroup_out_of_memory(mem, mask); 1884 } else { 1885 schedule(); 1886 finish_wait(&memcg_oom_waitq, &owait.wait); 1887 } 1888 spin_lock(&memcg_oom_lock); 1889 if (locked) 1890 mem_cgroup_oom_unlock(mem); 1891 memcg_wakeup_oom(mem); 1892 spin_unlock(&memcg_oom_lock); 1893 1894 mem_cgroup_unmark_under_oom(mem); 1895 1896 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 1897 return false; 1898 /* Give chance to dying process */ 1899 schedule_timeout(1); 1900 return true; 1901} 1902 1903/* 1904 * Currently used to update mapped file statistics, but the routine can be 1905 * generalized to update other statistics as well. 1906 * 1907 * Notes: Race condition 1908 * 1909 * We usually use page_cgroup_lock() for accessing page_cgroup member but 1910 * it tends to be costly. But considering some conditions, we doesn't need 1911 * to do so _always_. 1912 * 1913 * Considering "charge", lock_page_cgroup() is not required because all 1914 * file-stat operations happen after a page is attached to radix-tree. There 1915 * are no race with "charge". 1916 * 1917 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 1918 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 1919 * if there are race with "uncharge". Statistics itself is properly handled 1920 * by flags. 1921 * 1922 * Considering "move", this is an only case we see a race. To make the race 1923 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are 1924 * possibility of race condition. If there is, we take a lock. 1925 */ 1926 1927void mem_cgroup_update_page_stat(struct page *page, 1928 enum mem_cgroup_page_stat_item idx, int val) 1929{ 1930 struct mem_cgroup *mem; 1931 struct page_cgroup *pc = lookup_page_cgroup(page); 1932 bool need_unlock = false; 1933 unsigned long uninitialized_var(flags); 1934 1935 if (unlikely(!pc)) 1936 return; 1937 1938 rcu_read_lock(); 1939 mem = pc->mem_cgroup; 1940 if (unlikely(!mem || !PageCgroupUsed(pc))) 1941 goto out; 1942 /* pc->mem_cgroup is unstable ? */ 1943 if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) { 1944 /* take a lock against to access pc->mem_cgroup */ 1945 move_lock_page_cgroup(pc, &flags); 1946 need_unlock = true; 1947 mem = pc->mem_cgroup; 1948 if (!mem || !PageCgroupUsed(pc)) 1949 goto out; 1950 } 1951 1952 switch (idx) { 1953 case MEMCG_NR_FILE_MAPPED: 1954 if (val > 0) 1955 SetPageCgroupFileMapped(pc); 1956 else if (!page_mapped(page)) 1957 ClearPageCgroupFileMapped(pc); 1958 idx = MEM_CGROUP_STAT_FILE_MAPPED; 1959 break; 1960 default: 1961 BUG(); 1962 } 1963 1964 this_cpu_add(mem->stat->count[idx], val); 1965 1966out: 1967 if (unlikely(need_unlock)) 1968 move_unlock_page_cgroup(pc, &flags); 1969 rcu_read_unlock(); 1970 return; 1971} 1972EXPORT_SYMBOL(mem_cgroup_update_page_stat); 1973 1974/* 1975 * size of first charge trial. "32" comes from vmscan.c's magic value. 1976 * TODO: maybe necessary to use big numbers in big irons. 1977 */ 1978#define CHARGE_BATCH 32U 1979struct memcg_stock_pcp { 1980 struct mem_cgroup *cached; /* this never be root cgroup */ 1981 unsigned int nr_pages; 1982 struct work_struct work; 1983 unsigned long flags; 1984#define FLUSHING_CACHED_CHARGE (0) 1985}; 1986static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1987static DEFINE_MUTEX(percpu_charge_mutex); 1988 1989/* 1990 * Try to consume stocked charge on this cpu. If success, one page is consumed 1991 * from local stock and true is returned. If the stock is 0 or charges from a 1992 * cgroup which is not current target, returns false. This stock will be 1993 * refilled. 1994 */ 1995static bool consume_stock(struct mem_cgroup *mem) 1996{ 1997 struct memcg_stock_pcp *stock; 1998 bool ret = true; 1999 2000 stock = &get_cpu_var(memcg_stock); 2001 if (mem == stock->cached && stock->nr_pages) 2002 stock->nr_pages--; 2003 else /* need to call res_counter_charge */ 2004 ret = false; 2005 put_cpu_var(memcg_stock); 2006 return ret; 2007} 2008 2009/* 2010 * Returns stocks cached in percpu to res_counter and reset cached information. 2011 */ 2012static void drain_stock(struct memcg_stock_pcp *stock) 2013{ 2014 struct mem_cgroup *old = stock->cached; 2015 2016 if (stock->nr_pages) { 2017 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 2018 2019 res_counter_uncharge(&old->res, bytes); 2020 if (do_swap_account) 2021 res_counter_uncharge(&old->memsw, bytes); 2022 stock->nr_pages = 0; 2023 } 2024 stock->cached = NULL; 2025} 2026 2027/* 2028 * This must be called under preempt disabled or must be called by 2029 * a thread which is pinned to local cpu. 2030 */ 2031static void drain_local_stock(struct work_struct *dummy) 2032{ 2033 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 2034 drain_stock(stock); 2035 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2036} 2037 2038/* 2039 * Cache charges(val) which is from res_counter, to local per_cpu area. 2040 * This will be consumed by consume_stock() function, later. 2041 */ 2042static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages) 2043{ 2044 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2045 2046 if (stock->cached != mem) { /* reset if necessary */ 2047 drain_stock(stock); 2048 stock->cached = mem; 2049 } 2050 stock->nr_pages += nr_pages; 2051 put_cpu_var(memcg_stock); 2052} 2053 2054/* 2055 * Tries to drain stocked charges in other cpus. This function is asynchronous 2056 * and just put a work per cpu for draining localy on each cpu. Caller can 2057 * expects some charges will be back to res_counter later but cannot wait for 2058 * it. 2059 */ 2060static void drain_all_stock_async(struct mem_cgroup *root_mem) 2061{ 2062 int cpu, curcpu; 2063 /* 2064 * If someone calls draining, avoid adding more kworker runs. 2065 */ 2066 if (!mutex_trylock(&percpu_charge_mutex)) 2067 return; 2068 /* Notify other cpus that system-wide "drain" is running */ 2069 get_online_cpus(); 2070 /* 2071 * Get a hint for avoiding draining charges on the current cpu, 2072 * which must be exhausted by our charging. It is not required that 2073 * this be a precise check, so we use raw_smp_processor_id() instead of 2074 * getcpu()/putcpu(). 2075 */ 2076 curcpu = raw_smp_processor_id(); 2077 for_each_online_cpu(cpu) { 2078 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2079 struct mem_cgroup *mem; 2080 2081 if (cpu == curcpu) 2082 continue; 2083 2084 mem = stock->cached; 2085 if (!mem) 2086 continue; 2087 if (mem != root_mem) { 2088 if (!root_mem->use_hierarchy) 2089 continue; 2090 /* check whether "mem" is under tree of "root_mem" */ 2091 if (!css_is_ancestor(&mem->css, &root_mem->css)) 2092 continue; 2093 } 2094 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2095 schedule_work_on(cpu, &stock->work); 2096 } 2097 put_online_cpus(); 2098 mutex_unlock(&percpu_charge_mutex); 2099 /* We don't wait for flush_work */ 2100} 2101 2102/* This is a synchronous drain interface. */ 2103static void drain_all_stock_sync(void) 2104{ 2105 /* called when force_empty is called */ 2106 mutex_lock(&percpu_charge_mutex); 2107 schedule_on_each_cpu(drain_local_stock); 2108 mutex_unlock(&percpu_charge_mutex); 2109} 2110 2111/* 2112 * This function drains percpu counter value from DEAD cpu and 2113 * move it to local cpu. Note that this function can be preempted. 2114 */ 2115static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) 2116{ 2117 int i; 2118 2119 spin_lock(&mem->pcp_counter_lock); 2120 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { 2121 long x = per_cpu(mem->stat->count[i], cpu); 2122 2123 per_cpu(mem->stat->count[i], cpu) = 0; 2124 mem->nocpu_base.count[i] += x; 2125 } 2126 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2127 unsigned long x = per_cpu(mem->stat->events[i], cpu); 2128 2129 per_cpu(mem->stat->events[i], cpu) = 0; 2130 mem->nocpu_base.events[i] += x; 2131 } 2132 /* need to clear ON_MOVE value, works as a kind of lock. */ 2133 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; 2134 spin_unlock(&mem->pcp_counter_lock); 2135} 2136 2137static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) 2138{ 2139 int idx = MEM_CGROUP_ON_MOVE; 2140 2141 spin_lock(&mem->pcp_counter_lock); 2142 per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; 2143 spin_unlock(&mem->pcp_counter_lock); 2144} 2145 2146static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 2147 unsigned long action, 2148 void *hcpu) 2149{ 2150 int cpu = (unsigned long)hcpu; 2151 struct memcg_stock_pcp *stock; 2152 struct mem_cgroup *iter; 2153 2154 if ((action == CPU_ONLINE)) { 2155 for_each_mem_cgroup_all(iter) 2156 synchronize_mem_cgroup_on_move(iter, cpu); 2157 return NOTIFY_OK; 2158 } 2159 2160 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) 2161 return NOTIFY_OK; 2162 2163 for_each_mem_cgroup_all(iter) 2164 mem_cgroup_drain_pcp_counter(iter, cpu); 2165 2166 stock = &per_cpu(memcg_stock, cpu); 2167 drain_stock(stock); 2168 return NOTIFY_OK; 2169} 2170 2171 2172/* See __mem_cgroup_try_charge() for details */ 2173enum { 2174 CHARGE_OK, /* success */ 2175 CHARGE_RETRY, /* need to retry but retry is not bad */ 2176 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 2177 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 2178 CHARGE_OOM_DIE, /* the current is killed because of OOM */ 2179}; 2180 2181static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, 2182 unsigned int nr_pages, bool oom_check) 2183{ 2184 unsigned long csize = nr_pages * PAGE_SIZE; 2185 struct mem_cgroup *mem_over_limit; 2186 struct res_counter *fail_res; 2187 unsigned long flags = 0; 2188 int ret; 2189 2190 ret = res_counter_charge(&mem->res, csize, &fail_res); 2191 2192 if (likely(!ret)) { 2193 if (!do_swap_account) 2194 return CHARGE_OK; 2195 ret = res_counter_charge(&mem->memsw, csize, &fail_res); 2196 if (likely(!ret)) 2197 return CHARGE_OK; 2198 2199 res_counter_uncharge(&mem->res, csize); 2200 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 2201 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 2202 } else 2203 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2204 /* 2205 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch 2206 * of regular pages (CHARGE_BATCH), or a single regular page (1). 2207 * 2208 * Never reclaim on behalf of optional batching, retry with a 2209 * single page instead. 2210 */ 2211 if (nr_pages == CHARGE_BATCH) 2212 return CHARGE_RETRY; 2213 2214 if (!(gfp_mask & __GFP_WAIT)) 2215 return CHARGE_WOULDBLOCK; 2216 2217 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, 2218 gfp_mask, flags, NULL); 2219 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2220 return CHARGE_RETRY; 2221 /* 2222 * Even though the limit is exceeded at this point, reclaim 2223 * may have been able to free some pages. Retry the charge 2224 * before killing the task. 2225 * 2226 * Only for regular pages, though: huge pages are rather 2227 * unlikely to succeed so close to the limit, and we fall back 2228 * to regular pages anyway in case of failure. 2229 */ 2230 if (nr_pages == 1 && ret) 2231 return CHARGE_RETRY; 2232 2233 /* 2234 * At task move, charge accounts can be doubly counted. So, it's 2235 * better to wait until the end of task_move if something is going on. 2236 */ 2237 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2238 return CHARGE_RETRY; 2239 2240 /* If we don't need to call oom-killer at el, return immediately */ 2241 if (!oom_check) 2242 return CHARGE_NOMEM; 2243 /* check OOM */ 2244 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) 2245 return CHARGE_OOM_DIE; 2246 2247 return CHARGE_RETRY; 2248} 2249 2250/* 2251 * Unlike exported interface, "oom" parameter is added. if oom==true, 2252 * oom-killer can be invoked. 2253 */ 2254static int __mem_cgroup_try_charge(struct mm_struct *mm, 2255 gfp_t gfp_mask, 2256 unsigned int nr_pages, 2257 struct mem_cgroup **memcg, 2258 bool oom) 2259{ 2260 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2261 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2262 struct mem_cgroup *mem = NULL; 2263 int ret; 2264 2265 /* 2266 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 2267 * in system level. So, allow to go ahead dying process in addition to 2268 * MEMDIE process. 2269 */ 2270 if (unlikely(test_thread_flag(TIF_MEMDIE) 2271 || fatal_signal_pending(current))) 2272 goto bypass; 2273 2274 /* 2275 * We always charge the cgroup the mm_struct belongs to. 2276 * The mm_struct's mem_cgroup changes on task migration if the 2277 * thread group leader migrates. It's possible that mm is not 2278 * set, if so charge the init_mm (happens for pagecache usage). 2279 */ 2280 if (!*memcg && !mm) 2281 goto bypass; 2282again: 2283 if (*memcg) { /* css should be a valid one */ 2284 mem = *memcg; 2285 VM_BUG_ON(css_is_removed(&mem->css)); 2286 if (mem_cgroup_is_root(mem)) 2287 goto done; 2288 if (nr_pages == 1 && consume_stock(mem)) 2289 goto done; 2290 css_get(&mem->css); 2291 } else { 2292 struct task_struct *p; 2293 2294 rcu_read_lock(); 2295 p = rcu_dereference(mm->owner); 2296 /* 2297 * Because we don't have task_lock(), "p" can exit. 2298 * In that case, "mem" can point to root or p can be NULL with 2299 * race with swapoff. Then, we have small risk of mis-accouning. 2300 * But such kind of mis-account by race always happens because 2301 * we don't have cgroup_mutex(). It's overkill and we allo that 2302 * small race, here. 2303 * (*) swapoff at el will charge against mm-struct not against 2304 * task-struct. So, mm->owner can be NULL. 2305 */ 2306 mem = mem_cgroup_from_task(p); 2307 if (!mem || mem_cgroup_is_root(mem)) { 2308 rcu_read_unlock(); 2309 goto done; 2310 } 2311 if (nr_pages == 1 && consume_stock(mem)) { 2312 /* 2313 * It seems dagerous to access memcg without css_get(). 2314 * But considering how consume_stok works, it's not 2315 * necessary. If consume_stock success, some charges 2316 * from this memcg are cached on this cpu. So, we 2317 * don't need to call css_get()/css_tryget() before 2318 * calling consume_stock(). 2319 */ 2320 rcu_read_unlock(); 2321 goto done; 2322 } 2323 /* after here, we may be blocked. we need to get refcnt */ 2324 if (!css_tryget(&mem->css)) { 2325 rcu_read_unlock(); 2326 goto again; 2327 } 2328 rcu_read_unlock(); 2329 } 2330 2331 do { 2332 bool oom_check; 2333 2334 /* If killed, bypass charge */ 2335 if (fatal_signal_pending(current)) { 2336 css_put(&mem->css); 2337 goto bypass; 2338 } 2339 2340 oom_check = false; 2341 if (oom && !nr_oom_retries) { 2342 oom_check = true; 2343 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2344 } 2345 2346 ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check); 2347 switch (ret) { 2348 case CHARGE_OK: 2349 break; 2350 case CHARGE_RETRY: /* not in OOM situation but retry */ 2351 batch = nr_pages; 2352 css_put(&mem->css); 2353 mem = NULL; 2354 goto again; 2355 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2356 css_put(&mem->css); 2357 goto nomem; 2358 case CHARGE_NOMEM: /* OOM routine works */ 2359 if (!oom) { 2360 css_put(&mem->css); 2361 goto nomem; 2362 } 2363 /* If oom, we never return -ENOMEM */ 2364 nr_oom_retries--; 2365 break; 2366 case CHARGE_OOM_DIE: /* Killed by OOM Killer */ 2367 css_put(&mem->css); 2368 goto bypass; 2369 } 2370 } while (ret != CHARGE_OK); 2371 2372 if (batch > nr_pages) 2373 refill_stock(mem, batch - nr_pages); 2374 css_put(&mem->css); 2375done: 2376 *memcg = mem; 2377 return 0; 2378nomem: 2379 *memcg = NULL; 2380 return -ENOMEM; 2381bypass: 2382 *memcg = NULL; 2383 return 0; 2384} 2385 2386/* 2387 * Somemtimes we have to undo a charge we got by try_charge(). 2388 * This function is for that and do uncharge, put css's refcnt. 2389 * gotten by try_charge(). 2390 */ 2391static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, 2392 unsigned int nr_pages) 2393{ 2394 if (!mem_cgroup_is_root(mem)) { 2395 unsigned long bytes = nr_pages * PAGE_SIZE; 2396 2397 res_counter_uncharge(&mem->res, bytes); 2398 if (do_swap_account) 2399 res_counter_uncharge(&mem->memsw, bytes); 2400 } 2401} 2402 2403/* 2404 * A helper function to get mem_cgroup from ID. must be called under 2405 * rcu_read_lock(). The caller must check css_is_removed() or some if 2406 * it's concern. (dropping refcnt from swap can be called against removed 2407 * memcg.) 2408 */ 2409static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2410{ 2411 struct cgroup_subsys_state *css; 2412 2413 /* ID 0 is unused ID */ 2414 if (!id) 2415 return NULL; 2416 css = css_lookup(&mem_cgroup_subsys, id); 2417 if (!css) 2418 return NULL; 2419 return container_of(css, struct mem_cgroup, css); 2420} 2421 2422struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2423{ 2424 struct mem_cgroup *mem = NULL; 2425 struct page_cgroup *pc; 2426 unsigned short id; 2427 swp_entry_t ent; 2428 2429 VM_BUG_ON(!PageLocked(page)); 2430 2431 pc = lookup_page_cgroup(page); 2432 lock_page_cgroup(pc); 2433 if (PageCgroupUsed(pc)) { 2434 mem = pc->mem_cgroup; 2435 if (mem && !css_tryget(&mem->css)) 2436 mem = NULL; 2437 } else if (PageSwapCache(page)) { 2438 ent.val = page_private(page); 2439 id = lookup_swap_cgroup(ent); 2440 rcu_read_lock(); 2441 mem = mem_cgroup_lookup(id); 2442 if (mem && !css_tryget(&mem->css)) 2443 mem = NULL; 2444 rcu_read_unlock(); 2445 } 2446 unlock_page_cgroup(pc); 2447 return mem; 2448} 2449 2450static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, 2451 struct page *page, 2452 unsigned int nr_pages, 2453 struct page_cgroup *pc, 2454 enum charge_type ctype) 2455{ 2456 lock_page_cgroup(pc); 2457 if (unlikely(PageCgroupUsed(pc))) { 2458 unlock_page_cgroup(pc); 2459 __mem_cgroup_cancel_charge(mem, nr_pages); 2460 return; 2461 } 2462 /* 2463 * we don't need page_cgroup_lock about tail pages, becase they are not 2464 * accessed by any other context at this point. 2465 */ 2466 pc->mem_cgroup = mem; 2467 /* 2468 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2469 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2470 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2471 * before USED bit, we need memory barrier here. 2472 * See mem_cgroup_add_lru_list(), etc. 2473 */ 2474 smp_wmb(); 2475 switch (ctype) { 2476 case MEM_CGROUP_CHARGE_TYPE_CACHE: 2477 case MEM_CGROUP_CHARGE_TYPE_SHMEM: 2478 SetPageCgroupCache(pc); 2479 SetPageCgroupUsed(pc); 2480 break; 2481 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2482 ClearPageCgroupCache(pc); 2483 SetPageCgroupUsed(pc); 2484 break; 2485 default: 2486 break; 2487 } 2488 2489 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages); 2490 unlock_page_cgroup(pc); 2491 /* 2492 * "charge_statistics" updated event counter. Then, check it. 2493 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2494 * if they exceeds softlimit. 2495 */ 2496 memcg_check_events(mem, page); 2497} 2498 2499#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2500 2501#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ 2502 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) 2503/* 2504 * Because tail pages are not marked as "used", set it. We're under 2505 * zone->lru_lock, 'splitting on pmd' and compund_lock. 2506 */ 2507void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) 2508{ 2509 struct page_cgroup *head_pc = lookup_page_cgroup(head); 2510 struct page_cgroup *tail_pc = lookup_page_cgroup(tail); 2511 unsigned long flags; 2512 2513 if (mem_cgroup_disabled()) 2514 return; 2515 /* 2516 * We have no races with charge/uncharge but will have races with 2517 * page state accounting. 2518 */ 2519 move_lock_page_cgroup(head_pc, &flags); 2520 2521 tail_pc->mem_cgroup = head_pc->mem_cgroup; 2522 smp_wmb(); /* see __commit_charge() */ 2523 if (PageCgroupAcctLRU(head_pc)) { 2524 enum lru_list lru; 2525 struct mem_cgroup_per_zone *mz; 2526 2527 /* 2528 * LRU flags cannot be copied because we need to add tail 2529 *.page to LRU by generic call and our hook will be called. 2530 * We hold lru_lock, then, reduce counter directly. 2531 */ 2532 lru = page_lru(head); 2533 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); 2534 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 2535 } 2536 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 2537 move_unlock_page_cgroup(head_pc, &flags); 2538} 2539#endif 2540 2541/** 2542 * mem_cgroup_move_account - move account of the page 2543 * @page: the page 2544 * @nr_pages: number of regular pages (>1 for huge pages) 2545 * @pc: page_cgroup of the page. 2546 * @from: mem_cgroup which the page is moved from. 2547 * @to: mem_cgroup which the page is moved to. @from != @to. 2548 * @uncharge: whether we should call uncharge and css_put against @from. 2549 * 2550 * The caller must confirm following. 2551 * - page is not on LRU (isolate_page() is useful.) 2552 * - compound_lock is held when nr_pages > 1 2553 * 2554 * This function doesn't do "charge" nor css_get to new cgroup. It should be 2555 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is 2556 * true, this function does "uncharge" from old cgroup, but it doesn't if 2557 * @uncharge is false, so a caller should do "uncharge". 2558 */ 2559static int mem_cgroup_move_account(struct page *page, 2560 unsigned int nr_pages, 2561 struct page_cgroup *pc, 2562 struct mem_cgroup *from, 2563 struct mem_cgroup *to, 2564 bool uncharge) 2565{ 2566 unsigned long flags; 2567 int ret; 2568 2569 VM_BUG_ON(from == to); 2570 VM_BUG_ON(PageLRU(page)); 2571 /* 2572 * The page is isolated from LRU. So, collapse function 2573 * will not handle this page. But page splitting can happen. 2574 * Do this check under compound_page_lock(). The caller should 2575 * hold it. 2576 */ 2577 ret = -EBUSY; 2578 if (nr_pages > 1 && !PageTransHuge(page)) 2579 goto out; 2580 2581 lock_page_cgroup(pc); 2582 2583 ret = -EINVAL; 2584 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 2585 goto unlock; 2586 2587 move_lock_page_cgroup(pc, &flags); 2588 2589 if (PageCgroupFileMapped(pc)) { 2590 /* Update mapped_file data for mem_cgroup */ 2591 preempt_disable(); 2592 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2593 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2594 preempt_enable(); 2595 } 2596 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); 2597 if (uncharge) 2598 /* This is not "cancel", but cancel_charge does all we need. */ 2599 __mem_cgroup_cancel_charge(from, nr_pages); 2600 2601 /* caller should have done css_get */ 2602 pc->mem_cgroup = to; 2603 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); 2604 /* 2605 * We charges against "to" which may not have any tasks. Then, "to" 2606 * can be under rmdir(). But in current implementation, caller of 2607 * this function is just force_empty() and move charge, so it's 2608 * guaranteed that "to" is never removed. So, we don't check rmdir 2609 * status here. 2610 */ 2611 move_unlock_page_cgroup(pc, &flags); 2612 ret = 0; 2613unlock: 2614 unlock_page_cgroup(pc); 2615 /* 2616 * check events 2617 */ 2618 memcg_check_events(to, page); 2619 memcg_check_events(from, page); 2620out: 2621 return ret; 2622} 2623 2624/* 2625 * move charges to its parent. 2626 */ 2627 2628static int mem_cgroup_move_parent(struct page *page, 2629 struct page_cgroup *pc, 2630 struct mem_cgroup *child, 2631 gfp_t gfp_mask) 2632{ 2633 struct cgroup *cg = child->css.cgroup; 2634 struct cgroup *pcg = cg->parent; 2635 struct mem_cgroup *parent; 2636 unsigned int nr_pages; 2637 unsigned long uninitialized_var(flags); 2638 int ret; 2639 2640 /* Is ROOT ? */ 2641 if (!pcg) 2642 return -EINVAL; 2643 2644 ret = -EBUSY; 2645 if (!get_page_unless_zero(page)) 2646 goto out; 2647 if (isolate_lru_page(page)) 2648 goto put; 2649 2650 nr_pages = hpage_nr_pages(page); 2651 2652 parent = mem_cgroup_from_cont(pcg); 2653 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); 2654 if (ret || !parent) 2655 goto put_back; 2656 2657 if (nr_pages > 1) 2658 flags = compound_lock_irqsave(page); 2659 2660 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); 2661 if (ret) 2662 __mem_cgroup_cancel_charge(parent, nr_pages); 2663 2664 if (nr_pages > 1) 2665 compound_unlock_irqrestore(page, flags); 2666put_back: 2667 putback_lru_page(page); 2668put: 2669 put_page(page); 2670out: 2671 return ret; 2672} 2673 2674/* 2675 * Charge the memory controller for page usage. 2676 * Return 2677 * 0 if the charge was successful 2678 * < 0 if the cgroup is over its limit 2679 */ 2680static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 2681 gfp_t gfp_mask, enum charge_type ctype) 2682{ 2683 struct mem_cgroup *mem = NULL; 2684 unsigned int nr_pages = 1; 2685 struct page_cgroup *pc; 2686 bool oom = true; 2687 int ret; 2688 2689 if (PageTransHuge(page)) { 2690 nr_pages <<= compound_order(page); 2691 VM_BUG_ON(!PageTransHuge(page)); 2692 /* 2693 * Never OOM-kill a process for a huge page. The 2694 * fault handler will fall back to regular pages. 2695 */ 2696 oom = false; 2697 } 2698 2699 pc = lookup_page_cgroup(page); 2700 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */ 2701 2702 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom); 2703 if (ret || !mem) 2704 return ret; 2705 2706 __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype); 2707 return 0; 2708} 2709 2710int mem_cgroup_newpage_charge(struct page *page, 2711 struct mm_struct *mm, gfp_t gfp_mask) 2712{ 2713 if (mem_cgroup_disabled()) 2714 return 0; 2715 /* 2716 * If already mapped, we don't have to account. 2717 * If page cache, page->mapping has address_space. 2718 * But page->mapping may have out-of-use anon_vma pointer, 2719 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping 2720 * is NULL. 2721 */ 2722 if (page_mapped(page) || (page->mapping && !PageAnon(page))) 2723 return 0; 2724 if (unlikely(!mm)) 2725 mm = &init_mm; 2726 return mem_cgroup_charge_common(page, mm, gfp_mask, 2727 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2728} 2729 2730static void 2731__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2732 enum charge_type ctype); 2733 2734static void 2735__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem, 2736 enum charge_type ctype) 2737{ 2738 struct page_cgroup *pc = lookup_page_cgroup(page); 2739 /* 2740 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page 2741 * is already on LRU. It means the page may on some other page_cgroup's 2742 * LRU. Take care of it. 2743 */ 2744 mem_cgroup_lru_del_before_commit(page); 2745 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); 2746 mem_cgroup_lru_add_after_commit(page); 2747 return; 2748} 2749 2750int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2751 gfp_t gfp_mask) 2752{ 2753 struct mem_cgroup *mem = NULL; 2754 int ret; 2755 2756 if (mem_cgroup_disabled()) 2757 return 0; 2758 if (PageCompound(page)) 2759 return 0; 2760 /* 2761 * Corner case handling. This is called from add_to_page_cache() 2762 * in usual. But some FS (shmem) precharges this page before calling it 2763 * and call add_to_page_cache() with GFP_NOWAIT. 2764 * 2765 * For GFP_NOWAIT case, the page may be pre-charged before calling 2766 * add_to_page_cache(). (See shmem.c) check it here and avoid to call 2767 * charge twice. (It works but has to pay a bit larger cost.) 2768 * And when the page is SwapCache, it should take swap information 2769 * into account. This is under lock_page() now. 2770 */ 2771 if (!(gfp_mask & __GFP_WAIT)) { 2772 struct page_cgroup *pc; 2773 2774 pc = lookup_page_cgroup(page); 2775 if (!pc) 2776 return 0; 2777 lock_page_cgroup(pc); 2778 if (PageCgroupUsed(pc)) { 2779 unlock_page_cgroup(pc); 2780 return 0; 2781 } 2782 unlock_page_cgroup(pc); 2783 } 2784 2785 if (unlikely(!mm)) 2786 mm = &init_mm; 2787 2788 if (page_is_file_cache(page)) { 2789 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true); 2790 if (ret || !mem) 2791 return ret; 2792 2793 /* 2794 * FUSE reuses pages without going through the final 2795 * put that would remove them from the LRU list, make 2796 * sure that they get relinked properly. 2797 */ 2798 __mem_cgroup_commit_charge_lrucare(page, mem, 2799 MEM_CGROUP_CHARGE_TYPE_CACHE); 2800 return ret; 2801 } 2802 /* shmem */ 2803 if (PageSwapCache(page)) { 2804 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); 2805 if (!ret) 2806 __mem_cgroup_commit_charge_swapin(page, mem, 2807 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2808 } else 2809 ret = mem_cgroup_charge_common(page, mm, gfp_mask, 2810 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2811 2812 return ret; 2813} 2814 2815/* 2816 * While swap-in, try_charge -> commit or cancel, the page is locked. 2817 * And when try_charge() successfully returns, one refcnt to memcg without 2818 * struct page_cgroup is acquired. This refcnt will be consumed by 2819 * "commit()" or removed by "cancel()" 2820 */ 2821int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 2822 struct page *page, 2823 gfp_t mask, struct mem_cgroup **ptr) 2824{ 2825 struct mem_cgroup *mem; 2826 int ret; 2827 2828 *ptr = NULL; 2829 2830 if (mem_cgroup_disabled()) 2831 return 0; 2832 2833 if (!do_swap_account) 2834 goto charge_cur_mm; 2835 /* 2836 * A racing thread's fault, or swapoff, may have already updated 2837 * the pte, and even removed page from swap cache: in those cases 2838 * do_swap_page()'s pte_same() test will fail; but there's also a 2839 * KSM case which does need to charge the page. 2840 */ 2841 if (!PageSwapCache(page)) 2842 goto charge_cur_mm; 2843 mem = try_get_mem_cgroup_from_page(page); 2844 if (!mem) 2845 goto charge_cur_mm; 2846 *ptr = mem; 2847 ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true); 2848 css_put(&mem->css); 2849 return ret; 2850charge_cur_mm: 2851 if (unlikely(!mm)) 2852 mm = &init_mm; 2853 return __mem_cgroup_try_charge(mm, mask, 1, ptr, true); 2854} 2855 2856static void 2857__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2858 enum charge_type ctype) 2859{ 2860 if (mem_cgroup_disabled()) 2861 return; 2862 if (!ptr) 2863 return; 2864 cgroup_exclude_rmdir(&ptr->css); 2865 2866 __mem_cgroup_commit_charge_lrucare(page, ptr, ctype); 2867 /* 2868 * Now swap is on-memory. This means this page may be 2869 * counted both as mem and swap....double count. 2870 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 2871 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 2872 * may call delete_from_swap_cache() before reach here. 2873 */ 2874 if (do_swap_account && PageSwapCache(page)) { 2875 swp_entry_t ent = {.val = page_private(page)}; 2876 unsigned short id; 2877 struct mem_cgroup *memcg; 2878 2879 id = swap_cgroup_record(ent, 0); 2880 rcu_read_lock(); 2881 memcg = mem_cgroup_lookup(id); 2882 if (memcg) { 2883 /* 2884 * This recorded memcg can be obsolete one. So, avoid 2885 * calling css_tryget 2886 */ 2887 if (!mem_cgroup_is_root(memcg)) 2888 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 2889 mem_cgroup_swap_statistics(memcg, false); 2890 mem_cgroup_put(memcg); 2891 } 2892 rcu_read_unlock(); 2893 } 2894 /* 2895 * At swapin, we may charge account against cgroup which has no tasks. 2896 * So, rmdir()->pre_destroy() can be called while we do this charge. 2897 * In that case, we need to call pre_destroy() again. check it here. 2898 */ 2899 cgroup_release_and_wakeup_rmdir(&ptr->css); 2900} 2901 2902void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) 2903{ 2904 __mem_cgroup_commit_charge_swapin(page, ptr, 2905 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2906} 2907 2908void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) 2909{ 2910 if (mem_cgroup_disabled()) 2911 return; 2912 if (!mem) 2913 return; 2914 __mem_cgroup_cancel_charge(mem, 1); 2915} 2916 2917static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, 2918 unsigned int nr_pages, 2919 const enum charge_type ctype) 2920{ 2921 struct memcg_batch_info *batch = NULL; 2922 bool uncharge_memsw = true; 2923 2924 /* If swapout, usage of swap doesn't decrease */ 2925 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2926 uncharge_memsw = false; 2927 2928 batch = ¤t->memcg_batch; 2929 /* 2930 * In usual, we do css_get() when we remember memcg pointer. 2931 * But in this case, we keep res->usage until end of a series of 2932 * uncharges. Then, it's ok to ignore memcg's refcnt. 2933 */ 2934 if (!batch->memcg) 2935 batch->memcg = mem; 2936 /* 2937 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 2938 * In those cases, all pages freed continuously can be expected to be in 2939 * the same cgroup and we have chance to coalesce uncharges. 2940 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 2941 * because we want to do uncharge as soon as possible. 2942 */ 2943 2944 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 2945 goto direct_uncharge; 2946 2947 if (nr_pages > 1) 2948 goto direct_uncharge; 2949 2950 /* 2951 * In typical case, batch->memcg == mem. This means we can 2952 * merge a series of uncharges to an uncharge of res_counter. 2953 * If not, we uncharge res_counter ony by one. 2954 */ 2955 if (batch->memcg != mem) 2956 goto direct_uncharge; 2957 /* remember freed charge and uncharge it later */ 2958 batch->nr_pages++; 2959 if (uncharge_memsw) 2960 batch->memsw_nr_pages++; 2961 return; 2962direct_uncharge: 2963 res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE); 2964 if (uncharge_memsw) 2965 res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE); 2966 if (unlikely(batch->memcg != mem)) 2967 memcg_oom_recover(mem); 2968 return; 2969} 2970 2971/* 2972 * uncharge if !page_mapped(page) 2973 */ 2974static struct mem_cgroup * 2975__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 2976{ 2977 struct mem_cgroup *mem = NULL; 2978 unsigned int nr_pages = 1; 2979 struct page_cgroup *pc; 2980 2981 if (mem_cgroup_disabled()) 2982 return NULL; 2983 2984 if (PageSwapCache(page)) 2985 return NULL; 2986 2987 if (PageTransHuge(page)) { 2988 nr_pages <<= compound_order(page); 2989 VM_BUG_ON(!PageTransHuge(page)); 2990 } 2991 /* 2992 * Check if our page_cgroup is valid 2993 */ 2994 pc = lookup_page_cgroup(page); 2995 if (unlikely(!pc || !PageCgroupUsed(pc))) 2996 return NULL; 2997 2998 lock_page_cgroup(pc); 2999 3000 mem = pc->mem_cgroup; 3001 3002 if (!PageCgroupUsed(pc)) 3003 goto unlock_out; 3004 3005 switch (ctype) { 3006 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 3007 case MEM_CGROUP_CHARGE_TYPE_DROP: 3008 /* See mem_cgroup_prepare_migration() */ 3009 if (page_mapped(page) || PageCgroupMigration(pc)) 3010 goto unlock_out; 3011 break; 3012 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 3013 if (!PageAnon(page)) { /* Shared memory */ 3014 if (page->mapping && !page_is_file_cache(page)) 3015 goto unlock_out; 3016 } else if (page_mapped(page)) /* Anon */ 3017 goto unlock_out; 3018 break; 3019 default: 3020 break; 3021 } 3022 3023 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages); 3024 3025 ClearPageCgroupUsed(pc); 3026 /* 3027 * pc->mem_cgroup is not cleared here. It will be accessed when it's 3028 * freed from LRU. This is safe because uncharged page is expected not 3029 * to be reused (freed soon). Exception is SwapCache, it's handled by 3030 * special functions. 3031 */ 3032 3033 unlock_page_cgroup(pc); 3034 /* 3035 * even after unlock, we have mem->res.usage here and this memcg 3036 * will never be freed. 3037 */ 3038 memcg_check_events(mem, page); 3039 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 3040 mem_cgroup_swap_statistics(mem, true); 3041 mem_cgroup_get(mem); 3042 } 3043 if (!mem_cgroup_is_root(mem)) 3044 mem_cgroup_do_uncharge(mem, nr_pages, ctype); 3045 3046 return mem; 3047 3048unlock_out: 3049 unlock_page_cgroup(pc); 3050 return NULL; 3051} 3052 3053void mem_cgroup_uncharge_page(struct page *page) 3054{ 3055 /* early check. */ 3056 if (page_mapped(page)) 3057 return; 3058 if (page->mapping && !PageAnon(page)) 3059 return; 3060 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); 3061} 3062 3063void mem_cgroup_uncharge_cache_page(struct page *page) 3064{ 3065 VM_BUG_ON(page_mapped(page)); 3066 VM_BUG_ON(page->mapping); 3067 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 3068} 3069 3070/* 3071 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 3072 * In that cases, pages are freed continuously and we can expect pages 3073 * are in the same memcg. All these calls itself limits the number of 3074 * pages freed at once, then uncharge_start/end() is called properly. 3075 * This may be called prural(2) times in a context, 3076 */ 3077 3078void mem_cgroup_uncharge_start(void) 3079{ 3080 current->memcg_batch.do_batch++; 3081 /* We can do nest. */ 3082 if (current->memcg_batch.do_batch == 1) { 3083 current->memcg_batch.memcg = NULL; 3084 current->memcg_batch.nr_pages = 0; 3085 current->memcg_batch.memsw_nr_pages = 0; 3086 } 3087} 3088 3089void mem_cgroup_uncharge_end(void) 3090{ 3091 struct memcg_batch_info *batch = ¤t->memcg_batch; 3092 3093 if (!batch->do_batch) 3094 return; 3095 3096 batch->do_batch--; 3097 if (batch->do_batch) /* If stacked, do nothing. */ 3098 return; 3099 3100 if (!batch->memcg) 3101 return; 3102 /* 3103 * This "batch->memcg" is valid without any css_get/put etc... 3104 * bacause we hide charges behind us. 3105 */ 3106 if (batch->nr_pages) 3107 res_counter_uncharge(&batch->memcg->res, 3108 batch->nr_pages * PAGE_SIZE); 3109 if (batch->memsw_nr_pages) 3110 res_counter_uncharge(&batch->memcg->memsw, 3111 batch->memsw_nr_pages * PAGE_SIZE); 3112 memcg_oom_recover(batch->memcg); 3113 /* forget this pointer (for sanity check) */ 3114 batch->memcg = NULL; 3115} 3116 3117#ifdef CONFIG_SWAP 3118/* 3119 * called after __delete_from_swap_cache() and drop "page" account. 3120 * memcg information is recorded to swap_cgroup of "ent" 3121 */ 3122void 3123mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 3124{ 3125 struct mem_cgroup *memcg; 3126 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 3127 3128 if (!swapout) /* this was a swap cache but the swap is unused ! */ 3129 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 3130 3131 memcg = __mem_cgroup_uncharge_common(page, ctype); 3132 3133 /* 3134 * record memcg information, if swapout && memcg != NULL, 3135 * mem_cgroup_get() was called in uncharge(). 3136 */ 3137 if (do_swap_account && swapout && memcg) 3138 swap_cgroup_record(ent, css_id(&memcg->css)); 3139} 3140#endif 3141 3142#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 3143/* 3144 * called from swap_entry_free(). remove record in swap_cgroup and 3145 * uncharge "memsw" account. 3146 */ 3147void mem_cgroup_uncharge_swap(swp_entry_t ent) 3148{ 3149 struct mem_cgroup *memcg; 3150 unsigned short id; 3151 3152 if (!do_swap_account) 3153 return; 3154 3155 id = swap_cgroup_record(ent, 0); 3156 rcu_read_lock(); 3157 memcg = mem_cgroup_lookup(id); 3158 if (memcg) { 3159 /* 3160 * We uncharge this because swap is freed. 3161 * This memcg can be obsolete one. We avoid calling css_tryget 3162 */ 3163 if (!mem_cgroup_is_root(memcg)) 3164 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 3165 mem_cgroup_swap_statistics(memcg, false); 3166 mem_cgroup_put(memcg); 3167 } 3168 rcu_read_unlock(); 3169} 3170 3171/** 3172 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3173 * @entry: swap entry to be moved 3174 * @from: mem_cgroup which the entry is moved from 3175 * @to: mem_cgroup which the entry is moved to 3176 * @need_fixup: whether we should fixup res_counters and refcounts. 3177 * 3178 * It succeeds only when the swap_cgroup's record for this entry is the same 3179 * as the mem_cgroup's id of @from. 3180 * 3181 * Returns 0 on success, -EINVAL on failure. 3182 * 3183 * The caller must have charged to @to, IOW, called res_counter_charge() about 3184 * both res and memsw, and called css_get(). 3185 */ 3186static int mem_cgroup_move_swap_account(swp_entry_t entry, 3187 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3188{ 3189 unsigned short old_id, new_id; 3190 3191 old_id = css_id(&from->css); 3192 new_id = css_id(&to->css); 3193 3194 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3195 mem_cgroup_swap_statistics(from, false); 3196 mem_cgroup_swap_statistics(to, true); 3197 /* 3198 * This function is only called from task migration context now. 3199 * It postpones res_counter and refcount handling till the end 3200 * of task migration(mem_cgroup_clear_mc()) for performance 3201 * improvement. But we cannot postpone mem_cgroup_get(to) 3202 * because if the process that has been moved to @to does 3203 * swap-in, the refcount of @to might be decreased to 0. 3204 */ 3205 mem_cgroup_get(to); 3206 if (need_fixup) { 3207 if (!mem_cgroup_is_root(from)) 3208 res_counter_uncharge(&from->memsw, PAGE_SIZE); 3209 mem_cgroup_put(from); 3210 /* 3211 * we charged both to->res and to->memsw, so we should 3212 * uncharge to->res. 3213 */ 3214 if (!mem_cgroup_is_root(to)) 3215 res_counter_uncharge(&to->res, PAGE_SIZE); 3216 } 3217 return 0; 3218 } 3219 return -EINVAL; 3220} 3221#else 3222static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3223 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3224{ 3225 return -EINVAL; 3226} 3227#endif 3228 3229/* 3230 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 3231 * page belongs to. 3232 */ 3233int mem_cgroup_prepare_migration(struct page *page, 3234 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) 3235{ 3236 struct mem_cgroup *mem = NULL; 3237 struct page_cgroup *pc; 3238 enum charge_type ctype; 3239 int ret = 0; 3240 3241 *ptr = NULL; 3242 3243 VM_BUG_ON(PageTransHuge(page)); 3244 if (mem_cgroup_disabled()) 3245 return 0; 3246 3247 pc = lookup_page_cgroup(page); 3248 lock_page_cgroup(pc); 3249 if (PageCgroupUsed(pc)) { 3250 mem = pc->mem_cgroup; 3251 css_get(&mem->css); 3252 /* 3253 * At migrating an anonymous page, its mapcount goes down 3254 * to 0 and uncharge() will be called. But, even if it's fully 3255 * unmapped, migration may fail and this page has to be 3256 * charged again. We set MIGRATION flag here and delay uncharge 3257 * until end_migration() is called 3258 * 3259 * Corner Case Thinking 3260 * A) 3261 * When the old page was mapped as Anon and it's unmap-and-freed 3262 * while migration was ongoing. 3263 * If unmap finds the old page, uncharge() of it will be delayed 3264 * until end_migration(). If unmap finds a new page, it's 3265 * uncharged when it make mapcount to be 1->0. If unmap code 3266 * finds swap_migration_entry, the new page will not be mapped 3267 * and end_migration() will find it(mapcount==0). 3268 * 3269 * B) 3270 * When the old page was mapped but migraion fails, the kernel 3271 * remaps it. A charge for it is kept by MIGRATION flag even 3272 * if mapcount goes down to 0. We can do remap successfully 3273 * without charging it again. 3274 * 3275 * C) 3276 * The "old" page is under lock_page() until the end of 3277 * migration, so, the old page itself will not be swapped-out. 3278 * If the new page is swapped out before end_migraton, our 3279 * hook to usual swap-out path will catch the event. 3280 */ 3281 if (PageAnon(page)) 3282 SetPageCgroupMigration(pc); 3283 } 3284 unlock_page_cgroup(pc); 3285 /* 3286 * If the page is not charged at this point, 3287 * we return here. 3288 */ 3289 if (!mem) 3290 return 0; 3291 3292 *ptr = mem; 3293 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false); 3294 css_put(&mem->css);/* drop extra refcnt */ 3295 if (ret || *ptr == NULL) { 3296 if (PageAnon(page)) { 3297 lock_page_cgroup(pc); 3298 ClearPageCgroupMigration(pc); 3299 unlock_page_cgroup(pc); 3300 /* 3301 * The old page may be fully unmapped while we kept it. 3302 */ 3303 mem_cgroup_uncharge_page(page); 3304 } 3305 return -ENOMEM; 3306 } 3307 /* 3308 * We charge new page before it's used/mapped. So, even if unlock_page() 3309 * is called before end_migration, we can catch all events on this new 3310 * page. In the case new page is migrated but not remapped, new page's 3311 * mapcount will be finally 0 and we call uncharge in end_migration(). 3312 */ 3313 pc = lookup_page_cgroup(newpage); 3314 if (PageAnon(page)) 3315 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 3316 else if (page_is_file_cache(page)) 3317 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3318 else 3319 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3320 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); 3321 return ret; 3322} 3323 3324/* remove redundant charge if migration failed*/ 3325void mem_cgroup_end_migration(struct mem_cgroup *mem, 3326 struct page *oldpage, struct page *newpage, bool migration_ok) 3327{ 3328 struct page *used, *unused; 3329 struct page_cgroup *pc; 3330 3331 if (!mem) 3332 return; 3333 /* blocks rmdir() */ 3334 cgroup_exclude_rmdir(&mem->css); 3335 if (!migration_ok) { 3336 used = oldpage; 3337 unused = newpage; 3338 } else { 3339 used = newpage; 3340 unused = oldpage; 3341 } 3342 /* 3343 * We disallowed uncharge of pages under migration because mapcount 3344 * of the page goes down to zero, temporarly. 3345 * Clear the flag and check the page should be charged. 3346 */ 3347 pc = lookup_page_cgroup(oldpage); 3348 lock_page_cgroup(pc); 3349 ClearPageCgroupMigration(pc); 3350 unlock_page_cgroup(pc); 3351 3352 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); 3353 3354 /* 3355 * If a page is a file cache, radix-tree replacement is very atomic 3356 * and we can skip this check. When it was an Anon page, its mapcount 3357 * goes down to 0. But because we added MIGRATION flage, it's not 3358 * uncharged yet. There are several case but page->mapcount check 3359 * and USED bit check in mem_cgroup_uncharge_page() will do enough 3360 * check. (see prepare_charge() also) 3361 */ 3362 if (PageAnon(used)) 3363 mem_cgroup_uncharge_page(used); 3364 /* 3365 * At migration, we may charge account against cgroup which has no 3366 * tasks. 3367 * So, rmdir()->pre_destroy() can be called while we do this charge. 3368 * In that case, we need to call pre_destroy() again. check it here. 3369 */ 3370 cgroup_release_and_wakeup_rmdir(&mem->css); 3371} 3372 3373/* 3374 * A call to try to shrink memory usage on charge failure at shmem's swapin. 3375 * Calling hierarchical_reclaim is not enough because we should update 3376 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM. 3377 * Moreover considering hierarchy, we should reclaim from the mem_over_limit, 3378 * not from the memcg which this page would be charged to. 3379 * try_charge_swapin does all of these works properly. 3380 */ 3381int mem_cgroup_shmem_charge_fallback(struct page *page, 3382 struct mm_struct *mm, 3383 gfp_t gfp_mask) 3384{ 3385 struct mem_cgroup *mem; 3386 int ret; 3387 3388 if (mem_cgroup_disabled()) 3389 return 0; 3390 3391 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); 3392 if (!ret) 3393 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */ 3394 3395 return ret; 3396} 3397 3398#ifdef CONFIG_DEBUG_VM 3399static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 3400{ 3401 struct page_cgroup *pc; 3402 3403 pc = lookup_page_cgroup(page); 3404 if (likely(pc) && PageCgroupUsed(pc)) 3405 return pc; 3406 return NULL; 3407} 3408 3409bool mem_cgroup_bad_page_check(struct page *page) 3410{ 3411 if (mem_cgroup_disabled()) 3412 return false; 3413 3414 return lookup_page_cgroup_used(page) != NULL; 3415} 3416 3417void mem_cgroup_print_bad_page(struct page *page) 3418{ 3419 struct page_cgroup *pc; 3420 3421 pc = lookup_page_cgroup_used(page); 3422 if (pc) { 3423 int ret = -1; 3424 char *path; 3425 3426 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", 3427 pc, pc->flags, pc->mem_cgroup); 3428 3429 path = kmalloc(PATH_MAX, GFP_KERNEL); 3430 if (path) { 3431 rcu_read_lock(); 3432 ret = cgroup_path(pc->mem_cgroup->css.cgroup, 3433 path, PATH_MAX); 3434 rcu_read_unlock(); 3435 } 3436 3437 printk(KERN_CONT "(%s)\n", 3438 (ret < 0) ? "cannot get the path" : path); 3439 kfree(path); 3440 } 3441} 3442#endif 3443 3444static DEFINE_MUTEX(set_limit_mutex); 3445 3446static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 3447 unsigned long long val) 3448{ 3449 int retry_count; 3450 u64 memswlimit, memlimit; 3451 int ret = 0; 3452 int children = mem_cgroup_count_children(memcg); 3453 u64 curusage, oldusage; 3454 int enlarge; 3455 3456 /* 3457 * For keeping hierarchical_reclaim simple, how long we should retry 3458 * is depends on callers. We set our retry-count to be function 3459 * of # of children which we should visit in this loop. 3460 */ 3461 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 3462 3463 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3464 3465 enlarge = 0; 3466 while (retry_count) { 3467 if (signal_pending(current)) { 3468 ret = -EINTR; 3469 break; 3470 } 3471 /* 3472 * Rather than hide all in some function, I do this in 3473 * open coded manner. You see what this really does. 3474 * We have to guarantee mem->res.limit < mem->memsw.limit. 3475 */ 3476 mutex_lock(&set_limit_mutex); 3477 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3478 if (memswlimit < val) { 3479 ret = -EINVAL; 3480 mutex_unlock(&set_limit_mutex); 3481 break; 3482 } 3483 3484 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3485 if (memlimit < val) 3486 enlarge = 1; 3487 3488 ret = res_counter_set_limit(&memcg->res, val); 3489 if (!ret) { 3490 if (memswlimit == val) 3491 memcg->memsw_is_minimum = true; 3492 else 3493 memcg->memsw_is_minimum = false; 3494 } 3495 mutex_unlock(&set_limit_mutex); 3496 3497 if (!ret) 3498 break; 3499 3500 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3501 MEM_CGROUP_RECLAIM_SHRINK, 3502 NULL); 3503 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3504 /* Usage is reduced ? */ 3505 if (curusage >= oldusage) 3506 retry_count--; 3507 else 3508 oldusage = curusage; 3509 } 3510 if (!ret && enlarge) 3511 memcg_oom_recover(memcg); 3512 3513 return ret; 3514} 3515 3516static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 3517 unsigned long long val) 3518{ 3519 int retry_count; 3520 u64 memlimit, memswlimit, oldusage, curusage; 3521 int children = mem_cgroup_count_children(memcg); 3522 int ret = -EBUSY; 3523 int enlarge = 0; 3524 3525 /* see mem_cgroup_resize_res_limit */ 3526 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 3527 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3528 while (retry_count) { 3529 if (signal_pending(current)) { 3530 ret = -EINTR; 3531 break; 3532 } 3533 /* 3534 * Rather than hide all in some function, I do this in 3535 * open coded manner. You see what this really does. 3536 * We have to guarantee mem->res.limit < mem->memsw.limit. 3537 */ 3538 mutex_lock(&set_limit_mutex); 3539 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3540 if (memlimit > val) { 3541 ret = -EINVAL; 3542 mutex_unlock(&set_limit_mutex); 3543 break; 3544 } 3545 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3546 if (memswlimit < val) 3547 enlarge = 1; 3548 ret = res_counter_set_limit(&memcg->memsw, val); 3549 if (!ret) { 3550 if (memlimit == val) 3551 memcg->memsw_is_minimum = true; 3552 else 3553 memcg->memsw_is_minimum = false; 3554 } 3555 mutex_unlock(&set_limit_mutex); 3556 3557 if (!ret) 3558 break; 3559 3560 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3561 MEM_CGROUP_RECLAIM_NOSWAP | 3562 MEM_CGROUP_RECLAIM_SHRINK, 3563 NULL); 3564 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3565 /* Usage is reduced ? */ 3566 if (curusage >= oldusage) 3567 retry_count--; 3568 else 3569 oldusage = curusage; 3570 } 3571 if (!ret && enlarge) 3572 memcg_oom_recover(memcg); 3573 return ret; 3574} 3575 3576unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 3577 gfp_t gfp_mask, 3578 unsigned long *total_scanned) 3579{ 3580 unsigned long nr_reclaimed = 0; 3581 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 3582 unsigned long reclaimed; 3583 int loop = 0; 3584 struct mem_cgroup_tree_per_zone *mctz; 3585 unsigned long long excess; 3586 unsigned long nr_scanned; 3587 3588 if (order > 0) 3589 return 0; 3590 3591 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 3592 /* 3593 * This loop can run a while, specially if mem_cgroup's continuously 3594 * keep exceeding their soft limit and putting the system under 3595 * pressure 3596 */ 3597 do { 3598 if (next_mz) 3599 mz = next_mz; 3600 else 3601 mz = mem_cgroup_largest_soft_limit_node(mctz); 3602 if (!mz) 3603 break; 3604 3605 nr_scanned = 0; 3606 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, 3607 gfp_mask, 3608 MEM_CGROUP_RECLAIM_SOFT, 3609 &nr_scanned); 3610 nr_reclaimed += reclaimed; 3611 *total_scanned += nr_scanned; 3612 spin_lock(&mctz->lock); 3613 3614 /* 3615 * If we failed to reclaim anything from this memory cgroup 3616 * it is time to move on to the next cgroup 3617 */ 3618 next_mz = NULL; 3619 if (!reclaimed) { 3620 do { 3621 /* 3622 * Loop until we find yet another one. 3623 * 3624 * By the time we get the soft_limit lock 3625 * again, someone might have aded the 3626 * group back on the RB tree. Iterate to 3627 * make sure we get a different mem. 3628 * mem_cgroup_largest_soft_limit_node returns 3629 * NULL if no other cgroup is present on 3630 * the tree 3631 */ 3632 next_mz = 3633 __mem_cgroup_largest_soft_limit_node(mctz); 3634 if (next_mz == mz) 3635 css_put(&next_mz->mem->css); 3636 else /* next_mz == NULL or other memcg */ 3637 break; 3638 } while (1); 3639 } 3640 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 3641 excess = res_counter_soft_limit_excess(&mz->mem->res); 3642 /* 3643 * One school of thought says that we should not add 3644 * back the node to the tree if reclaim returns 0. 3645 * But our reclaim could return 0, simply because due 3646 * to priority we are exposing a smaller subset of 3647 * memory to reclaim from. Consider this as a longer 3648 * term TODO. 3649 */ 3650 /* If excess == 0, no tree ops */ 3651 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); 3652 spin_unlock(&mctz->lock); 3653 css_put(&mz->mem->css); 3654 loop++; 3655 /* 3656 * Could not reclaim anything and there are no more 3657 * mem cgroups to try or we seem to be looping without 3658 * reclaiming anything. 3659 */ 3660 if (!nr_reclaimed && 3661 (next_mz == NULL || 3662 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3663 break; 3664 } while (!nr_reclaimed); 3665 if (next_mz) 3666 css_put(&next_mz->mem->css); 3667 return nr_reclaimed; 3668} 3669 3670/* 3671 * This routine traverse page_cgroup in given list and drop them all. 3672 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 3673 */ 3674static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, 3675 int node, int zid, enum lru_list lru) 3676{ 3677 struct zone *zone; 3678 struct mem_cgroup_per_zone *mz; 3679 struct page_cgroup *pc, *busy; 3680 unsigned long flags, loop; 3681 struct list_head *list; 3682 int ret = 0; 3683 3684 zone = &NODE_DATA(node)->node_zones[zid]; 3685 mz = mem_cgroup_zoneinfo(mem, node, zid); 3686 list = &mz->lists[lru]; 3687 3688 loop = MEM_CGROUP_ZSTAT(mz, lru); 3689 /* give some margin against EBUSY etc...*/ 3690 loop += 256; 3691 busy = NULL; 3692 while (loop--) { 3693 struct page *page; 3694 3695 ret = 0; 3696 spin_lock_irqsave(&zone->lru_lock, flags); 3697 if (list_empty(list)) { 3698 spin_unlock_irqrestore(&zone->lru_lock, flags); 3699 break; 3700 } 3701 pc = list_entry(list->prev, struct page_cgroup, lru); 3702 if (busy == pc) { 3703 list_move(&pc->lru, list); 3704 busy = NULL; 3705 spin_unlock_irqrestore(&zone->lru_lock, flags); 3706 continue; 3707 } 3708 spin_unlock_irqrestore(&zone->lru_lock, flags); 3709 3710 page = lookup_cgroup_page(pc); 3711 3712 ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL); 3713 if (ret == -ENOMEM) 3714 break; 3715 3716 if (ret == -EBUSY || ret == -EINVAL) { 3717 /* found lock contention or "pc" is obsolete. */ 3718 busy = pc; 3719 cond_resched(); 3720 } else 3721 busy = NULL; 3722 } 3723 3724 if (!ret && !list_empty(list)) 3725 return -EBUSY; 3726 return ret; 3727} 3728 3729/* 3730 * make mem_cgroup's charge to be 0 if there is no task. 3731 * This enables deleting this mem_cgroup. 3732 */ 3733static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) 3734{ 3735 int ret; 3736 int node, zid, shrink; 3737 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3738 struct cgroup *cgrp = mem->css.cgroup; 3739 3740 css_get(&mem->css); 3741 3742 shrink = 0; 3743 /* should free all ? */ 3744 if (free_all) 3745 goto try_to_free; 3746move_account: 3747 do { 3748 ret = -EBUSY; 3749 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 3750 goto out; 3751 ret = -EINTR; 3752 if (signal_pending(current)) 3753 goto out; 3754 /* This is for making all *used* pages to be on LRU. */ 3755 lru_add_drain_all(); 3756 drain_all_stock_sync(); 3757 ret = 0; 3758 mem_cgroup_start_move(mem); 3759 for_each_node_state(node, N_HIGH_MEMORY) { 3760 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { 3761 enum lru_list l; 3762 for_each_lru(l) { 3763 ret = mem_cgroup_force_empty_list(mem, 3764 node, zid, l); 3765 if (ret) 3766 break; 3767 } 3768 } 3769 if (ret) 3770 break; 3771 } 3772 mem_cgroup_end_move(mem); 3773 memcg_oom_recover(mem); 3774 /* it seems parent cgroup doesn't have enough mem */ 3775 if (ret == -ENOMEM) 3776 goto try_to_free; 3777 cond_resched(); 3778 /* "ret" should also be checked to ensure all lists are empty. */ 3779 } while (mem->res.usage > 0 || ret); 3780out: 3781 css_put(&mem->css); 3782 return ret; 3783 3784try_to_free: 3785 /* returns EBUSY if there is a task or if we come here twice. */ 3786 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { 3787 ret = -EBUSY; 3788 goto out; 3789 } 3790 /* we call try-to-free pages for make this cgroup empty */ 3791 lru_add_drain_all(); 3792 /* try to free all pages in this cgroup */ 3793 shrink = 1; 3794 while (nr_retries && mem->res.usage > 0) { 3795 int progress; 3796 3797 if (signal_pending(current)) { 3798 ret = -EINTR; 3799 goto out; 3800 } 3801 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, 3802 false); 3803 if (!progress) { 3804 nr_retries--; 3805 /* maybe some writeback is necessary */ 3806 congestion_wait(BLK_RW_ASYNC, HZ/10); 3807 } 3808 3809 } 3810 lru_add_drain(); 3811 /* try move_account...there may be some *locked* pages. */ 3812 goto move_account; 3813} 3814 3815int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 3816{ 3817 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); 3818} 3819 3820 3821static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) 3822{ 3823 return mem_cgroup_from_cont(cont)->use_hierarchy; 3824} 3825 3826static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, 3827 u64 val) 3828{ 3829 int retval = 0; 3830 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 3831 struct cgroup *parent = cont->parent; 3832 struct mem_cgroup *parent_mem = NULL; 3833 3834 if (parent) 3835 parent_mem = mem_cgroup_from_cont(parent); 3836 3837 cgroup_lock(); 3838 /* 3839 * If parent's use_hierarchy is set, we can't make any modifications 3840 * in the child subtrees. If it is unset, then the change can 3841 * occur, provided the current cgroup has no children. 3842 * 3843 * For the root cgroup, parent_mem is NULL, we allow value to be 3844 * set if there are no children. 3845 */ 3846 if ((!parent_mem || !parent_mem->use_hierarchy) && 3847 (val == 1 || val == 0)) { 3848 if (list_empty(&cont->children)) 3849 mem->use_hierarchy = val; 3850 else 3851 retval = -EBUSY; 3852 } else 3853 retval = -EINVAL; 3854 cgroup_unlock(); 3855 3856 return retval; 3857} 3858 3859 3860static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem, 3861 enum mem_cgroup_stat_index idx) 3862{ 3863 struct mem_cgroup *iter; 3864 long val = 0; 3865 3866 /* Per-cpu values can be negative, use a signed accumulator */ 3867 for_each_mem_cgroup_tree(iter, mem) 3868 val += mem_cgroup_read_stat(iter, idx); 3869 3870 if (val < 0) /* race ? */ 3871 val = 0; 3872 return val; 3873} 3874 3875static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) 3876{ 3877 u64 val; 3878 3879 if (!mem_cgroup_is_root(mem)) { 3880 if (!swap) 3881 return res_counter_read_u64(&mem->res, RES_USAGE); 3882 else 3883 return res_counter_read_u64(&mem->memsw, RES_USAGE); 3884 } 3885 3886 val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE); 3887 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS); 3888 3889 if (swap) 3890 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 3891 3892 return val << PAGE_SHIFT; 3893} 3894 3895static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 3896{ 3897 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 3898 u64 val; 3899 int type, name; 3900 3901 type = MEMFILE_TYPE(cft->private); 3902 name = MEMFILE_ATTR(cft->private); 3903 switch (type) { 3904 case _MEM: 3905 if (name == RES_USAGE) 3906 val = mem_cgroup_usage(mem, false); 3907 else 3908 val = res_counter_read_u64(&mem->res, name); 3909 break; 3910 case _MEMSWAP: 3911 if (name == RES_USAGE) 3912 val = mem_cgroup_usage(mem, true); 3913 else 3914 val = res_counter_read_u64(&mem->memsw, name); 3915 break; 3916 default: 3917 BUG(); 3918 break; 3919 } 3920 return val; 3921} 3922/* 3923 * The user of this function is... 3924 * RES_LIMIT. 3925 */ 3926static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 3927 const char *buffer) 3928{ 3929 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3930 int type, name; 3931 unsigned long long val; 3932 int ret; 3933 3934 type = MEMFILE_TYPE(cft->private); 3935 name = MEMFILE_ATTR(cft->private); 3936 switch (name) { 3937 case RES_LIMIT: 3938 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3939 ret = -EINVAL; 3940 break; 3941 } 3942 /* This function does all necessary parse...reuse it */ 3943 ret = res_counter_memparse_write_strategy(buffer, &val); 3944 if (ret) 3945 break; 3946 if (type == _MEM) 3947 ret = mem_cgroup_resize_limit(memcg, val); 3948 else 3949 ret = mem_cgroup_resize_memsw_limit(memcg, val); 3950 break; 3951 case RES_SOFT_LIMIT: 3952 ret = res_counter_memparse_write_strategy(buffer, &val); 3953 if (ret) 3954 break; 3955 /* 3956 * For memsw, soft limits are hard to implement in terms 3957 * of semantics, for now, we support soft limits for 3958 * control without swap 3959 */ 3960 if (type == _MEM) 3961 ret = res_counter_set_soft_limit(&memcg->res, val); 3962 else 3963 ret = -EINVAL; 3964 break; 3965 default: 3966 ret = -EINVAL; /* should be BUG() ? */ 3967 break; 3968 } 3969 return ret; 3970} 3971 3972static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 3973 unsigned long long *mem_limit, unsigned long long *memsw_limit) 3974{ 3975 struct cgroup *cgroup; 3976 unsigned long long min_limit, min_memsw_limit, tmp; 3977 3978 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3979 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3980 cgroup = memcg->css.cgroup; 3981 if (!memcg->use_hierarchy) 3982 goto out; 3983 3984 while (cgroup->parent) { 3985 cgroup = cgroup->parent; 3986 memcg = mem_cgroup_from_cont(cgroup); 3987 if (!memcg->use_hierarchy) 3988 break; 3989 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 3990 min_limit = min(min_limit, tmp); 3991 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3992 min_memsw_limit = min(min_memsw_limit, tmp); 3993 } 3994out: 3995 *mem_limit = min_limit; 3996 *memsw_limit = min_memsw_limit; 3997 return; 3998} 3999 4000static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 4001{ 4002 struct mem_cgroup *mem; 4003 int type, name; 4004 4005 mem = mem_cgroup_from_cont(cont); 4006 type = MEMFILE_TYPE(event); 4007 name = MEMFILE_ATTR(event); 4008 switch (name) { 4009 case RES_MAX_USAGE: 4010 if (type == _MEM) 4011 res_counter_reset_max(&mem->res); 4012 else 4013 res_counter_reset_max(&mem->memsw); 4014 break; 4015 case RES_FAILCNT: 4016 if (type == _MEM) 4017 res_counter_reset_failcnt(&mem->res); 4018 else 4019 res_counter_reset_failcnt(&mem->memsw); 4020 break; 4021 } 4022 4023 return 0; 4024} 4025 4026static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, 4027 struct cftype *cft) 4028{ 4029 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; 4030} 4031 4032#ifdef CONFIG_MMU 4033static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4034 struct cftype *cft, u64 val) 4035{ 4036 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4037 4038 if (val >= (1 << NR_MOVE_TYPE)) 4039 return -EINVAL; 4040 /* 4041 * We check this value several times in both in can_attach() and 4042 * attach(), so we need cgroup lock to prevent this value from being 4043 * inconsistent. 4044 */ 4045 cgroup_lock(); 4046 mem->move_charge_at_immigrate = val; 4047 cgroup_unlock(); 4048 4049 return 0; 4050} 4051#else 4052static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4053 struct cftype *cft, u64 val) 4054{ 4055 return -ENOSYS; 4056} 4057#endif 4058 4059 4060/* For read statistics */ 4061enum { 4062 MCS_CACHE, 4063 MCS_RSS, 4064 MCS_FILE_MAPPED, 4065 MCS_PGPGIN, 4066 MCS_PGPGOUT, 4067 MCS_SWAP, 4068 MCS_PGFAULT, 4069 MCS_PGMAJFAULT, 4070 MCS_INACTIVE_ANON, 4071 MCS_ACTIVE_ANON, 4072 MCS_INACTIVE_FILE, 4073 MCS_ACTIVE_FILE, 4074 MCS_UNEVICTABLE, 4075 NR_MCS_STAT, 4076}; 4077 4078struct mcs_total_stat { 4079 s64 stat[NR_MCS_STAT]; 4080}; 4081 4082struct { 4083 char *local_name; 4084 char *total_name; 4085} memcg_stat_strings[NR_MCS_STAT] = { 4086 {"cache", "total_cache"}, 4087 {"rss", "total_rss"}, 4088 {"mapped_file", "total_mapped_file"}, 4089 {"pgpgin", "total_pgpgin"}, 4090 {"pgpgout", "total_pgpgout"}, 4091 {"swap", "total_swap"}, 4092 {"pgfault", "total_pgfault"}, 4093 {"pgmajfault", "total_pgmajfault"}, 4094 {"inactive_anon", "total_inactive_anon"}, 4095 {"active_anon", "total_active_anon"}, 4096 {"inactive_file", "total_inactive_file"}, 4097 {"active_file", "total_active_file"}, 4098 {"unevictable", "total_unevictable"} 4099}; 4100 4101 4102static void 4103mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) 4104{ 4105 s64 val; 4106 4107 /* per cpu stat */ 4108 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); 4109 s->stat[MCS_CACHE] += val * PAGE_SIZE; 4110 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); 4111 s->stat[MCS_RSS] += val * PAGE_SIZE; 4112 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); 4113 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; 4114 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN); 4115 s->stat[MCS_PGPGIN] += val; 4116 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT); 4117 s->stat[MCS_PGPGOUT] += val; 4118 if (do_swap_account) { 4119 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 4120 s->stat[MCS_SWAP] += val * PAGE_SIZE; 4121 } 4122 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT); 4123 s->stat[MCS_PGFAULT] += val; 4124 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT); 4125 s->stat[MCS_PGMAJFAULT] += val; 4126 4127 /* per zone stat */ 4128 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON)); 4129 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; 4130 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON)); 4131 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; 4132 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE)); 4133 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; 4134 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE)); 4135 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; 4136 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE)); 4137 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; 4138} 4139 4140static void 4141mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) 4142{ 4143 struct mem_cgroup *iter; 4144 4145 for_each_mem_cgroup_tree(iter, mem) 4146 mem_cgroup_get_local_stat(iter, s); 4147} 4148 4149#ifdef CONFIG_NUMA 4150static int mem_control_numa_stat_show(struct seq_file *m, void *arg) 4151{ 4152 int nid; 4153 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 4154 unsigned long node_nr; 4155 struct cgroup *cont = m->private; 4156 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4157 4158 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL); 4159 seq_printf(m, "total=%lu", total_nr); 4160 for_each_node_state(nid, N_HIGH_MEMORY) { 4161 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL); 4162 seq_printf(m, " N%d=%lu", nid, node_nr); 4163 } 4164 seq_putc(m, '\n'); 4165 4166 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE); 4167 seq_printf(m, "file=%lu", file_nr); 4168 for_each_node_state(nid, N_HIGH_MEMORY) { 4169 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4170 LRU_ALL_FILE); 4171 seq_printf(m, " N%d=%lu", nid, node_nr); 4172 } 4173 seq_putc(m, '\n'); 4174 4175 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON); 4176 seq_printf(m, "anon=%lu", anon_nr); 4177 for_each_node_state(nid, N_HIGH_MEMORY) { 4178 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4179 LRU_ALL_ANON); 4180 seq_printf(m, " N%d=%lu", nid, node_nr); 4181 } 4182 seq_putc(m, '\n'); 4183 4184 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE)); 4185 seq_printf(m, "unevictable=%lu", unevictable_nr); 4186 for_each_node_state(nid, N_HIGH_MEMORY) { 4187 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4188 BIT(LRU_UNEVICTABLE)); 4189 seq_printf(m, " N%d=%lu", nid, node_nr); 4190 } 4191 seq_putc(m, '\n'); 4192 return 0; 4193} 4194#endif /* CONFIG_NUMA */ 4195 4196static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4197 struct cgroup_map_cb *cb) 4198{ 4199 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4200 struct mcs_total_stat mystat; 4201 int i; 4202 4203 memset(&mystat, 0, sizeof(mystat)); 4204 mem_cgroup_get_local_stat(mem_cont, &mystat); 4205 4206 4207 for (i = 0; i < NR_MCS_STAT; i++) { 4208 if (i == MCS_SWAP && !do_swap_account) 4209 continue; 4210 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); 4211 } 4212 4213 /* Hierarchical information */ 4214 { 4215 unsigned long long limit, memsw_limit; 4216 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); 4217 cb->fill(cb, "hierarchical_memory_limit", limit); 4218 if (do_swap_account) 4219 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 4220 } 4221 4222 memset(&mystat, 0, sizeof(mystat)); 4223 mem_cgroup_get_total_stat(mem_cont, &mystat); 4224 for (i = 0; i < NR_MCS_STAT; i++) { 4225 if (i == MCS_SWAP && !do_swap_account) 4226 continue; 4227 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); 4228 } 4229 4230#ifdef CONFIG_DEBUG_VM 4231 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); 4232 4233 { 4234 int nid, zid; 4235 struct mem_cgroup_per_zone *mz; 4236 unsigned long recent_rotated[2] = {0, 0}; 4237 unsigned long recent_scanned[2] = {0, 0}; 4238 4239 for_each_online_node(nid) 4240 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4241 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 4242 4243 recent_rotated[0] += 4244 mz->reclaim_stat.recent_rotated[0]; 4245 recent_rotated[1] += 4246 mz->reclaim_stat.recent_rotated[1]; 4247 recent_scanned[0] += 4248 mz->reclaim_stat.recent_scanned[0]; 4249 recent_scanned[1] += 4250 mz->reclaim_stat.recent_scanned[1]; 4251 } 4252 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); 4253 cb->fill(cb, "recent_rotated_file", recent_rotated[1]); 4254 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); 4255 cb->fill(cb, "recent_scanned_file", recent_scanned[1]); 4256 } 4257#endif 4258 4259 return 0; 4260} 4261 4262static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) 4263{ 4264 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4265 4266 return mem_cgroup_swappiness(memcg); 4267} 4268 4269static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, 4270 u64 val) 4271{ 4272 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4273 struct mem_cgroup *parent; 4274 4275 if (val > 100) 4276 return -EINVAL; 4277 4278 if (cgrp->parent == NULL) 4279 return -EINVAL; 4280 4281 parent = mem_cgroup_from_cont(cgrp->parent); 4282 4283 cgroup_lock(); 4284 4285 /* If under hierarchy, only empty-root can set this value */ 4286 if ((parent->use_hierarchy) || 4287 (memcg->use_hierarchy && !list_empty(&cgrp->children))) { 4288 cgroup_unlock(); 4289 return -EINVAL; 4290 } 4291 4292 memcg->swappiness = val; 4293 4294 cgroup_unlock(); 4295 4296 return 0; 4297} 4298 4299static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4300{ 4301 struct mem_cgroup_threshold_ary *t; 4302 u64 usage; 4303 int i; 4304 4305 rcu_read_lock(); 4306 if (!swap) 4307 t = rcu_dereference(memcg->thresholds.primary); 4308 else 4309 t = rcu_dereference(memcg->memsw_thresholds.primary); 4310 4311 if (!t) 4312 goto unlock; 4313 4314 usage = mem_cgroup_usage(memcg, swap); 4315 4316 /* 4317 * current_threshold points to threshold just below usage. 4318 * If it's not true, a threshold was crossed after last 4319 * call of __mem_cgroup_threshold(). 4320 */ 4321 i = t->current_threshold; 4322 4323 /* 4324 * Iterate backward over array of thresholds starting from 4325 * current_threshold and check if a threshold is crossed. 4326 * If none of thresholds below usage is crossed, we read 4327 * only one element of the array here. 4328 */ 4329 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4330 eventfd_signal(t->entries[i].eventfd, 1); 4331 4332 /* i = current_threshold + 1 */ 4333 i++; 4334 4335 /* 4336 * Iterate forward over array of thresholds starting from 4337 * current_threshold+1 and check if a threshold is crossed. 4338 * If none of thresholds above usage is crossed, we read 4339 * only one element of the array here. 4340 */ 4341 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4342 eventfd_signal(t->entries[i].eventfd, 1); 4343 4344 /* Update current_threshold */ 4345 t->current_threshold = i - 1; 4346unlock: 4347 rcu_read_unlock(); 4348} 4349 4350static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4351{ 4352 while (memcg) { 4353 __mem_cgroup_threshold(memcg, false); 4354 if (do_swap_account) 4355 __mem_cgroup_threshold(memcg, true); 4356 4357 memcg = parent_mem_cgroup(memcg); 4358 } 4359} 4360 4361static int compare_thresholds(const void *a, const void *b) 4362{ 4363 const struct mem_cgroup_threshold *_a = a; 4364 const struct mem_cgroup_threshold *_b = b; 4365 4366 return _a->threshold - _b->threshold; 4367} 4368 4369static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) 4370{ 4371 struct mem_cgroup_eventfd_list *ev; 4372 4373 list_for_each_entry(ev, &mem->oom_notify, list) 4374 eventfd_signal(ev->eventfd, 1); 4375 return 0; 4376} 4377 4378static void mem_cgroup_oom_notify(struct mem_cgroup *mem) 4379{ 4380 struct mem_cgroup *iter; 4381 4382 for_each_mem_cgroup_tree(iter, mem) 4383 mem_cgroup_oom_notify_cb(iter); 4384} 4385 4386static int mem_cgroup_usage_register_event(struct cgroup *cgrp, 4387 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4388{ 4389 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4390 struct mem_cgroup_thresholds *thresholds; 4391 struct mem_cgroup_threshold_ary *new; 4392 int type = MEMFILE_TYPE(cft->private); 4393 u64 threshold, usage; 4394 int i, size, ret; 4395 4396 ret = res_counter_memparse_write_strategy(args, &threshold); 4397 if (ret) 4398 return ret; 4399 4400 mutex_lock(&memcg->thresholds_lock); 4401 4402 if (type == _MEM) 4403 thresholds = &memcg->thresholds; 4404 else if (type == _MEMSWAP) 4405 thresholds = &memcg->memsw_thresholds; 4406 else 4407 BUG(); 4408 4409 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4410 4411 /* Check if a threshold crossed before adding a new one */ 4412 if (thresholds->primary) 4413 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4414 4415 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4416 4417 /* Allocate memory for new array of thresholds */ 4418 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 4419 GFP_KERNEL); 4420 if (!new) { 4421 ret = -ENOMEM; 4422 goto unlock; 4423 } 4424 new->size = size; 4425 4426 /* Copy thresholds (if any) to new array */ 4427 if (thresholds->primary) { 4428 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 4429 sizeof(struct mem_cgroup_threshold)); 4430 } 4431 4432 /* Add new threshold */ 4433 new->entries[size - 1].eventfd = eventfd; 4434 new->entries[size - 1].threshold = threshold; 4435 4436 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4437 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 4438 compare_thresholds, NULL); 4439 4440 /* Find current threshold */ 4441 new->current_threshold = -1; 4442 for (i = 0; i < size; i++) { 4443 if (new->entries[i].threshold < usage) { 4444 /* 4445 * new->current_threshold will not be used until 4446 * rcu_assign_pointer(), so it's safe to increment 4447 * it here. 4448 */ 4449 ++new->current_threshold; 4450 } 4451 } 4452 4453 /* Free old spare buffer and save old primary buffer as spare */ 4454 kfree(thresholds->spare); 4455 thresholds->spare = thresholds->primary; 4456 4457 rcu_assign_pointer(thresholds->primary, new); 4458 4459 /* To be sure that nobody uses thresholds */ 4460 synchronize_rcu(); 4461 4462unlock: 4463 mutex_unlock(&memcg->thresholds_lock); 4464 4465 return ret; 4466} 4467 4468static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, 4469 struct cftype *cft, struct eventfd_ctx *eventfd) 4470{ 4471 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4472 struct mem_cgroup_thresholds *thresholds; 4473 struct mem_cgroup_threshold_ary *new; 4474 int type = MEMFILE_TYPE(cft->private); 4475 u64 usage; 4476 int i, j, size; 4477 4478 mutex_lock(&memcg->thresholds_lock); 4479 if (type == _MEM) 4480 thresholds = &memcg->thresholds; 4481 else if (type == _MEMSWAP) 4482 thresholds = &memcg->memsw_thresholds; 4483 else 4484 BUG(); 4485 4486 /* 4487 * Something went wrong if we trying to unregister a threshold 4488 * if we don't have thresholds 4489 */ 4490 BUG_ON(!thresholds); 4491 4492 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4493 4494 /* Check if a threshold crossed before removing */ 4495 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4496 4497 /* Calculate new number of threshold */ 4498 size = 0; 4499 for (i = 0; i < thresholds->primary->size; i++) { 4500 if (thresholds->primary->entries[i].eventfd != eventfd) 4501 size++; 4502 } 4503 4504 new = thresholds->spare; 4505 4506 /* Set thresholds array to NULL if we don't have thresholds */ 4507 if (!size) { 4508 kfree(new); 4509 new = NULL; 4510 goto swap_buffers; 4511 } 4512 4513 new->size = size; 4514 4515 /* Copy thresholds and find current threshold */ 4516 new->current_threshold = -1; 4517 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4518 if (thresholds->primary->entries[i].eventfd == eventfd) 4519 continue; 4520 4521 new->entries[j] = thresholds->primary->entries[i]; 4522 if (new->entries[j].threshold < usage) { 4523 /* 4524 * new->current_threshold will not be used 4525 * until rcu_assign_pointer(), so it's safe to increment 4526 * it here. 4527 */ 4528 ++new->current_threshold; 4529 } 4530 j++; 4531 } 4532 4533swap_buffers: 4534 /* Swap primary and spare array */ 4535 thresholds->spare = thresholds->primary; 4536 rcu_assign_pointer(thresholds->primary, new); 4537 4538 /* To be sure that nobody uses thresholds */ 4539 synchronize_rcu(); 4540 4541 mutex_unlock(&memcg->thresholds_lock); 4542} 4543 4544static int mem_cgroup_oom_register_event(struct cgroup *cgrp, 4545 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4546{ 4547 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4548 struct mem_cgroup_eventfd_list *event; 4549 int type = MEMFILE_TYPE(cft->private); 4550 4551 BUG_ON(type != _OOM_TYPE); 4552 event = kmalloc(sizeof(*event), GFP_KERNEL); 4553 if (!event) 4554 return -ENOMEM; 4555 4556 spin_lock(&memcg_oom_lock); 4557 4558 event->eventfd = eventfd; 4559 list_add(&event->list, &memcg->oom_notify); 4560 4561 /* already in OOM ? */ 4562 if (atomic_read(&memcg->under_oom)) 4563 eventfd_signal(eventfd, 1); 4564 spin_unlock(&memcg_oom_lock); 4565 4566 return 0; 4567} 4568 4569static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, 4570 struct cftype *cft, struct eventfd_ctx *eventfd) 4571{ 4572 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4573 struct mem_cgroup_eventfd_list *ev, *tmp; 4574 int type = MEMFILE_TYPE(cft->private); 4575 4576 BUG_ON(type != _OOM_TYPE); 4577 4578 spin_lock(&memcg_oom_lock); 4579 4580 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { 4581 if (ev->eventfd == eventfd) { 4582 list_del(&ev->list); 4583 kfree(ev); 4584 } 4585 } 4586 4587 spin_unlock(&memcg_oom_lock); 4588} 4589 4590static int mem_cgroup_oom_control_read(struct cgroup *cgrp, 4591 struct cftype *cft, struct cgroup_map_cb *cb) 4592{ 4593 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4594 4595 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable); 4596 4597 if (atomic_read(&mem->under_oom)) 4598 cb->fill(cb, "under_oom", 1); 4599 else 4600 cb->fill(cb, "under_oom", 0); 4601 return 0; 4602} 4603 4604static int mem_cgroup_oom_control_write(struct cgroup *cgrp, 4605 struct cftype *cft, u64 val) 4606{ 4607 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4608 struct mem_cgroup *parent; 4609 4610 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4611 if (!cgrp->parent || !((val == 0) || (val == 1))) 4612 return -EINVAL; 4613 4614 parent = mem_cgroup_from_cont(cgrp->parent); 4615 4616 cgroup_lock(); 4617 /* oom-kill-disable is a flag for subhierarchy. */ 4618 if ((parent->use_hierarchy) || 4619 (mem->use_hierarchy && !list_empty(&cgrp->children))) { 4620 cgroup_unlock(); 4621 return -EINVAL; 4622 } 4623 mem->oom_kill_disable = val; 4624 if (!val) 4625 memcg_oom_recover(mem); 4626 cgroup_unlock(); 4627 return 0; 4628} 4629 4630#ifdef CONFIG_NUMA 4631static const struct file_operations mem_control_numa_stat_file_operations = { 4632 .read = seq_read, 4633 .llseek = seq_lseek, 4634 .release = single_release, 4635}; 4636 4637static int mem_control_numa_stat_open(struct inode *unused, struct file *file) 4638{ 4639 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; 4640 4641 file->f_op = &mem_control_numa_stat_file_operations; 4642 return single_open(file, mem_control_numa_stat_show, cont); 4643} 4644#endif /* CONFIG_NUMA */ 4645 4646static struct cftype mem_cgroup_files[] = { 4647 { 4648 .name = "usage_in_bytes", 4649 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4650 .read_u64 = mem_cgroup_read, 4651 .register_event = mem_cgroup_usage_register_event, 4652 .unregister_event = mem_cgroup_usage_unregister_event, 4653 }, 4654 { 4655 .name = "max_usage_in_bytes", 4656 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4657 .trigger = mem_cgroup_reset, 4658 .read_u64 = mem_cgroup_read, 4659 }, 4660 { 4661 .name = "limit_in_bytes", 4662 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4663 .write_string = mem_cgroup_write, 4664 .read_u64 = mem_cgroup_read, 4665 }, 4666 { 4667 .name = "soft_limit_in_bytes", 4668 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4669 .write_string = mem_cgroup_write, 4670 .read_u64 = mem_cgroup_read, 4671 }, 4672 { 4673 .name = "failcnt", 4674 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4675 .trigger = mem_cgroup_reset, 4676 .read_u64 = mem_cgroup_read, 4677 }, 4678 { 4679 .name = "stat", 4680 .read_map = mem_control_stat_show, 4681 }, 4682 { 4683 .name = "force_empty", 4684 .trigger = mem_cgroup_force_empty_write, 4685 }, 4686 { 4687 .name = "use_hierarchy", 4688 .write_u64 = mem_cgroup_hierarchy_write, 4689 .read_u64 = mem_cgroup_hierarchy_read, 4690 }, 4691 { 4692 .name = "swappiness", 4693 .read_u64 = mem_cgroup_swappiness_read, 4694 .write_u64 = mem_cgroup_swappiness_write, 4695 }, 4696 { 4697 .name = "move_charge_at_immigrate", 4698 .read_u64 = mem_cgroup_move_charge_read, 4699 .write_u64 = mem_cgroup_move_charge_write, 4700 }, 4701 { 4702 .name = "oom_control", 4703 .read_map = mem_cgroup_oom_control_read, 4704 .write_u64 = mem_cgroup_oom_control_write, 4705 .register_event = mem_cgroup_oom_register_event, 4706 .unregister_event = mem_cgroup_oom_unregister_event, 4707 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4708 }, 4709#ifdef CONFIG_NUMA 4710 { 4711 .name = "numa_stat", 4712 .open = mem_control_numa_stat_open, 4713 .mode = S_IRUGO, 4714 }, 4715#endif 4716}; 4717 4718#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4719static struct cftype memsw_cgroup_files[] = { 4720 { 4721 .name = "memsw.usage_in_bytes", 4722 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 4723 .read_u64 = mem_cgroup_read, 4724 .register_event = mem_cgroup_usage_register_event, 4725 .unregister_event = mem_cgroup_usage_unregister_event, 4726 }, 4727 { 4728 .name = "memsw.max_usage_in_bytes", 4729 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 4730 .trigger = mem_cgroup_reset, 4731 .read_u64 = mem_cgroup_read, 4732 }, 4733 { 4734 .name = "memsw.limit_in_bytes", 4735 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 4736 .write_string = mem_cgroup_write, 4737 .read_u64 = mem_cgroup_read, 4738 }, 4739 { 4740 .name = "memsw.failcnt", 4741 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 4742 .trigger = mem_cgroup_reset, 4743 .read_u64 = mem_cgroup_read, 4744 }, 4745}; 4746 4747static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4748{ 4749 if (!do_swap_account) 4750 return 0; 4751 return cgroup_add_files(cont, ss, memsw_cgroup_files, 4752 ARRAY_SIZE(memsw_cgroup_files)); 4753}; 4754#else 4755static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4756{ 4757 return 0; 4758} 4759#endif 4760 4761static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 4762{ 4763 struct mem_cgroup_per_node *pn; 4764 struct mem_cgroup_per_zone *mz; 4765 enum lru_list l; 4766 int zone, tmp = node; 4767 /* 4768 * This routine is called against possible nodes. 4769 * But it's BUG to call kmalloc() against offline node. 4770 * 4771 * TODO: this routine can waste much memory for nodes which will 4772 * never be onlined. It's better to use memory hotplug callback 4773 * function. 4774 */ 4775 if (!node_state(node, N_NORMAL_MEMORY)) 4776 tmp = -1; 4777 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4778 if (!pn) 4779 return 1; 4780 4781 mem->info.nodeinfo[node] = pn; 4782 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4783 mz = &pn->zoneinfo[zone]; 4784 for_each_lru(l) 4785 INIT_LIST_HEAD(&mz->lists[l]); 4786 mz->usage_in_excess = 0; 4787 mz->on_tree = false; 4788 mz->mem = mem; 4789 } 4790 return 0; 4791} 4792 4793static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 4794{ 4795 kfree(mem->info.nodeinfo[node]); 4796} 4797 4798static struct mem_cgroup *mem_cgroup_alloc(void) 4799{ 4800 struct mem_cgroup *mem; 4801 int size = sizeof(struct mem_cgroup); 4802 4803 /* Can be very big if MAX_NUMNODES is very big */ 4804 if (size < PAGE_SIZE) 4805 mem = kzalloc(size, GFP_KERNEL); 4806 else 4807 mem = vzalloc(size); 4808 4809 if (!mem) 4810 return NULL; 4811 4812 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4813 if (!mem->stat) 4814 goto out_free; 4815 spin_lock_init(&mem->pcp_counter_lock); 4816 return mem; 4817 4818out_free: 4819 if (size < PAGE_SIZE) 4820 kfree(mem); 4821 else 4822 vfree(mem); 4823 return NULL; 4824} 4825 4826/* 4827 * At destroying mem_cgroup, references from swap_cgroup can remain. 4828 * (scanning all at force_empty is too costly...) 4829 * 4830 * Instead of clearing all references at force_empty, we remember 4831 * the number of reference from swap_cgroup and free mem_cgroup when 4832 * it goes down to 0. 4833 * 4834 * Removal of cgroup itself succeeds regardless of refs from swap. 4835 */ 4836 4837static void __mem_cgroup_free(struct mem_cgroup *mem) 4838{ 4839 int node; 4840 4841 mem_cgroup_remove_from_trees(mem); 4842 free_css_id(&mem_cgroup_subsys, &mem->css); 4843 4844 for_each_node_state(node, N_POSSIBLE) 4845 free_mem_cgroup_per_zone_info(mem, node); 4846 4847 free_percpu(mem->stat); 4848 if (sizeof(struct mem_cgroup) < PAGE_SIZE) 4849 kfree(mem); 4850 else 4851 vfree(mem); 4852} 4853 4854static void mem_cgroup_get(struct mem_cgroup *mem) 4855{ 4856 atomic_inc(&mem->refcnt); 4857} 4858 4859static void __mem_cgroup_put(struct mem_cgroup *mem, int count) 4860{ 4861 if (atomic_sub_and_test(count, &mem->refcnt)) { 4862 struct mem_cgroup *parent = parent_mem_cgroup(mem); 4863 __mem_cgroup_free(mem); 4864 if (parent) 4865 mem_cgroup_put(parent); 4866 } 4867} 4868 4869static void mem_cgroup_put(struct mem_cgroup *mem) 4870{ 4871 __mem_cgroup_put(mem, 1); 4872} 4873 4874/* 4875 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4876 */ 4877static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) 4878{ 4879 if (!mem->res.parent) 4880 return NULL; 4881 return mem_cgroup_from_res_counter(mem->res.parent, res); 4882} 4883 4884#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4885static void __init enable_swap_cgroup(void) 4886{ 4887 if (!mem_cgroup_disabled() && really_do_swap_account) 4888 do_swap_account = 1; 4889} 4890#else 4891static void __init enable_swap_cgroup(void) 4892{ 4893} 4894#endif 4895 4896static int mem_cgroup_soft_limit_tree_init(void) 4897{ 4898 struct mem_cgroup_tree_per_node *rtpn; 4899 struct mem_cgroup_tree_per_zone *rtpz; 4900 int tmp, node, zone; 4901 4902 for_each_node_state(node, N_POSSIBLE) { 4903 tmp = node; 4904 if (!node_state(node, N_NORMAL_MEMORY)) 4905 tmp = -1; 4906 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 4907 if (!rtpn) 4908 return 1; 4909 4910 soft_limit_tree.rb_tree_per_node[node] = rtpn; 4911 4912 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4913 rtpz = &rtpn->rb_tree_per_zone[zone]; 4914 rtpz->rb_root = RB_ROOT; 4915 spin_lock_init(&rtpz->lock); 4916 } 4917 } 4918 return 0; 4919} 4920 4921static struct cgroup_subsys_state * __ref 4922mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 4923{ 4924 struct mem_cgroup *mem, *parent; 4925 long error = -ENOMEM; 4926 int node; 4927 4928 mem = mem_cgroup_alloc(); 4929 if (!mem) 4930 return ERR_PTR(error); 4931 4932 for_each_node_state(node, N_POSSIBLE) 4933 if (alloc_mem_cgroup_per_zone_info(mem, node)) 4934 goto free_out; 4935 4936 /* root ? */ 4937 if (cont->parent == NULL) { 4938 int cpu; 4939 enable_swap_cgroup(); 4940 parent = NULL; 4941 root_mem_cgroup = mem; 4942 if (mem_cgroup_soft_limit_tree_init()) 4943 goto free_out; 4944 for_each_possible_cpu(cpu) { 4945 struct memcg_stock_pcp *stock = 4946 &per_cpu(memcg_stock, cpu); 4947 INIT_WORK(&stock->work, drain_local_stock); 4948 } 4949 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 4950 } else { 4951 parent = mem_cgroup_from_cont(cont->parent); 4952 mem->use_hierarchy = parent->use_hierarchy; 4953 mem->oom_kill_disable = parent->oom_kill_disable; 4954 } 4955 4956 if (parent && parent->use_hierarchy) { 4957 res_counter_init(&mem->res, &parent->res); 4958 res_counter_init(&mem->memsw, &parent->memsw); 4959 /* 4960 * We increment refcnt of the parent to ensure that we can 4961 * safely access it on res_counter_charge/uncharge. 4962 * This refcnt will be decremented when freeing this 4963 * mem_cgroup(see mem_cgroup_put). 4964 */ 4965 mem_cgroup_get(parent); 4966 } else { 4967 res_counter_init(&mem->res, NULL); 4968 res_counter_init(&mem->memsw, NULL); 4969 } 4970 mem->last_scanned_child = 0; 4971 mem->last_scanned_node = MAX_NUMNODES; 4972 INIT_LIST_HEAD(&mem->oom_notify); 4973 4974 if (parent) 4975 mem->swappiness = mem_cgroup_swappiness(parent); 4976 atomic_set(&mem->refcnt, 1); 4977 mem->move_charge_at_immigrate = 0; 4978 mutex_init(&mem->thresholds_lock); 4979 return &mem->css; 4980free_out: 4981 __mem_cgroup_free(mem); 4982 root_mem_cgroup = NULL; 4983 return ERR_PTR(error); 4984} 4985 4986static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 4987 struct cgroup *cont) 4988{ 4989 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 4990 4991 return mem_cgroup_force_empty(mem, false); 4992} 4993 4994static void mem_cgroup_destroy(struct cgroup_subsys *ss, 4995 struct cgroup *cont) 4996{ 4997 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 4998 4999 mem_cgroup_put(mem); 5000} 5001 5002static int mem_cgroup_populate(struct cgroup_subsys *ss, 5003 struct cgroup *cont) 5004{ 5005 int ret; 5006 5007 ret = cgroup_add_files(cont, ss, mem_cgroup_files, 5008 ARRAY_SIZE(mem_cgroup_files)); 5009 5010 if (!ret) 5011 ret = register_memsw_files(cont, ss); 5012 return ret; 5013} 5014 5015#ifdef CONFIG_MMU 5016/* Handlers for move charge at task migration. */ 5017#define PRECHARGE_COUNT_AT_ONCE 256 5018static int mem_cgroup_do_precharge(unsigned long count) 5019{ 5020 int ret = 0; 5021 int batch_count = PRECHARGE_COUNT_AT_ONCE; 5022 struct mem_cgroup *mem = mc.to; 5023 5024 if (mem_cgroup_is_root(mem)) { 5025 mc.precharge += count; 5026 /* we don't need css_get for root */ 5027 return ret; 5028 } 5029 /* try to charge at once */ 5030 if (count > 1) { 5031 struct res_counter *dummy; 5032 /* 5033 * "mem" cannot be under rmdir() because we've already checked 5034 * by cgroup_lock_live_cgroup() that it is not removed and we 5035 * are still under the same cgroup_mutex. So we can postpone 5036 * css_get(). 5037 */ 5038 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) 5039 goto one_by_one; 5040 if (do_swap_account && res_counter_charge(&mem->memsw, 5041 PAGE_SIZE * count, &dummy)) { 5042 res_counter_uncharge(&mem->res, PAGE_SIZE * count); 5043 goto one_by_one; 5044 } 5045 mc.precharge += count; 5046 return ret; 5047 } 5048one_by_one: 5049 /* fall back to one by one charge */ 5050 while (count--) { 5051 if (signal_pending(current)) { 5052 ret = -EINTR; 5053 break; 5054 } 5055 if (!batch_count--) { 5056 batch_count = PRECHARGE_COUNT_AT_ONCE; 5057 cond_resched(); 5058 } 5059 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false); 5060 if (ret || !mem) 5061 /* mem_cgroup_clear_mc() will do uncharge later */ 5062 return -ENOMEM; 5063 mc.precharge++; 5064 } 5065 return ret; 5066} 5067 5068/** 5069 * is_target_pte_for_mc - check a pte whether it is valid for move charge 5070 * @vma: the vma the pte to be checked belongs 5071 * @addr: the address corresponding to the pte to be checked 5072 * @ptent: the pte to be checked 5073 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5074 * 5075 * Returns 5076 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5077 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5078 * move charge. if @target is not NULL, the page is stored in target->page 5079 * with extra refcnt got(Callers should handle it). 5080 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5081 * target for charge migration. if @target is not NULL, the entry is stored 5082 * in target->ent. 5083 * 5084 * Called with pte lock held. 5085 */ 5086union mc_target { 5087 struct page *page; 5088 swp_entry_t ent; 5089}; 5090 5091enum mc_target_type { 5092 MC_TARGET_NONE, /* not used */ 5093 MC_TARGET_PAGE, 5094 MC_TARGET_SWAP, 5095}; 5096 5097static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5098 unsigned long addr, pte_t ptent) 5099{ 5100 struct page *page = vm_normal_page(vma, addr, ptent); 5101 5102 if (!page || !page_mapped(page)) 5103 return NULL; 5104 if (PageAnon(page)) { 5105 /* we don't move shared anon */ 5106 if (!move_anon() || page_mapcount(page) > 2) 5107 return NULL; 5108 } else if (!move_file()) 5109 /* we ignore mapcount for file pages */ 5110 return NULL; 5111 if (!get_page_unless_zero(page)) 5112 return NULL; 5113 5114 return page; 5115} 5116 5117static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5118 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5119{ 5120 int usage_count; 5121 struct page *page = NULL; 5122 swp_entry_t ent = pte_to_swp_entry(ptent); 5123 5124 if (!move_anon() || non_swap_entry(ent)) 5125 return NULL; 5126 usage_count = mem_cgroup_count_swap_user(ent, &page); 5127 if (usage_count > 1) { /* we don't move shared anon */ 5128 if (page) 5129 put_page(page); 5130 return NULL; 5131 } 5132 if (do_swap_account) 5133 entry->val = ent.val; 5134 5135 return page; 5136} 5137 5138static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5139 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5140{ 5141 struct page *page = NULL; 5142 struct inode *inode; 5143 struct address_space *mapping; 5144 pgoff_t pgoff; 5145 5146 if (!vma->vm_file) /* anonymous vma */ 5147 return NULL; 5148 if (!move_file()) 5149 return NULL; 5150 5151 inode = vma->vm_file->f_path.dentry->d_inode; 5152 mapping = vma->vm_file->f_mapping; 5153 if (pte_none(ptent)) 5154 pgoff = linear_page_index(vma, addr); 5155 else /* pte_file(ptent) is true */ 5156 pgoff = pte_to_pgoff(ptent); 5157 5158 /* page is moved even if it's not RSS of this task(page-faulted). */ 5159 if (!mapping_cap_swap_backed(mapping)) { /* normal file */ 5160 page = find_get_page(mapping, pgoff); 5161 } else { /* shmem/tmpfs file. we should take account of swap too. */ 5162 swp_entry_t ent; 5163 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent); 5164 if (do_swap_account) 5165 entry->val = ent.val; 5166 } 5167 5168 return page; 5169} 5170 5171static int is_target_pte_for_mc(struct vm_area_struct *vma, 5172 unsigned long addr, pte_t ptent, union mc_target *target) 5173{ 5174 struct page *page = NULL; 5175 struct page_cgroup *pc; 5176 int ret = 0; 5177 swp_entry_t ent = { .val = 0 }; 5178 5179 if (pte_present(ptent)) 5180 page = mc_handle_present_pte(vma, addr, ptent); 5181 else if (is_swap_pte(ptent)) 5182 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 5183 else if (pte_none(ptent) || pte_file(ptent)) 5184 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5185 5186 if (!page && !ent.val) 5187 return 0; 5188 if (page) { 5189 pc = lookup_page_cgroup(page); 5190 /* 5191 * Do only loose check w/o page_cgroup lock. 5192 * mem_cgroup_move_account() checks the pc is valid or not under 5193 * the lock. 5194 */ 5195 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 5196 ret = MC_TARGET_PAGE; 5197 if (target) 5198 target->page = page; 5199 } 5200 if (!ret || !target) 5201 put_page(page); 5202 } 5203 /* There is a swap entry and a page doesn't exist or isn't charged */ 5204 if (ent.val && !ret && 5205 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { 5206 ret = MC_TARGET_SWAP; 5207 if (target) 5208 target->ent = ent; 5209 } 5210 return ret; 5211} 5212 5213static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5214 unsigned long addr, unsigned long end, 5215 struct mm_walk *walk) 5216{ 5217 struct vm_area_struct *vma = walk->private; 5218 pte_t *pte; 5219 spinlock_t *ptl; 5220 5221 split_huge_page_pmd(walk->mm, pmd); 5222 5223 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5224 for (; addr != end; pte++, addr += PAGE_SIZE) 5225 if (is_target_pte_for_mc(vma, addr, *pte, NULL)) 5226 mc.precharge++; /* increment precharge temporarily */ 5227 pte_unmap_unlock(pte - 1, ptl); 5228 cond_resched(); 5229 5230 return 0; 5231} 5232 5233static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5234{ 5235 unsigned long precharge; 5236 struct vm_area_struct *vma; 5237 5238 down_read(&mm->mmap_sem); 5239 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5240 struct mm_walk mem_cgroup_count_precharge_walk = { 5241 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5242 .mm = mm, 5243 .private = vma, 5244 }; 5245 if (is_vm_hugetlb_page(vma)) 5246 continue; 5247 walk_page_range(vma->vm_start, vma->vm_end, 5248 &mem_cgroup_count_precharge_walk); 5249 } 5250 up_read(&mm->mmap_sem); 5251 5252 precharge = mc.precharge; 5253 mc.precharge = 0; 5254 5255 return precharge; 5256} 5257 5258static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5259{ 5260 unsigned long precharge = mem_cgroup_count_precharge(mm); 5261 5262 VM_BUG_ON(mc.moving_task); 5263 mc.moving_task = current; 5264 return mem_cgroup_do_precharge(precharge); 5265} 5266 5267/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5268static void __mem_cgroup_clear_mc(void) 5269{ 5270 struct mem_cgroup *from = mc.from; 5271 struct mem_cgroup *to = mc.to; 5272 5273 /* we must uncharge all the leftover precharges from mc.to */ 5274 if (mc.precharge) { 5275 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 5276 mc.precharge = 0; 5277 } 5278 /* 5279 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5280 * we must uncharge here. 5281 */ 5282 if (mc.moved_charge) { 5283 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 5284 mc.moved_charge = 0; 5285 } 5286 /* we must fixup refcnts and charges */ 5287 if (mc.moved_swap) { 5288 /* uncharge swap account from the old cgroup */ 5289 if (!mem_cgroup_is_root(mc.from)) 5290 res_counter_uncharge(&mc.from->memsw, 5291 PAGE_SIZE * mc.moved_swap); 5292 __mem_cgroup_put(mc.from, mc.moved_swap); 5293 5294 if (!mem_cgroup_is_root(mc.to)) { 5295 /* 5296 * we charged both to->res and to->memsw, so we should 5297 * uncharge to->res. 5298 */ 5299 res_counter_uncharge(&mc.to->res, 5300 PAGE_SIZE * mc.moved_swap); 5301 } 5302 /* we've already done mem_cgroup_get(mc.to) */ 5303 mc.moved_swap = 0; 5304 } 5305 memcg_oom_recover(from); 5306 memcg_oom_recover(to); 5307 wake_up_all(&mc.waitq); 5308} 5309 5310static void mem_cgroup_clear_mc(void) 5311{ 5312 struct mem_cgroup *from = mc.from; 5313 5314 /* 5315 * we must clear moving_task before waking up waiters at the end of 5316 * task migration. 5317 */ 5318 mc.moving_task = NULL; 5319 __mem_cgroup_clear_mc(); 5320 spin_lock(&mc.lock); 5321 mc.from = NULL; 5322 mc.to = NULL; 5323 spin_unlock(&mc.lock); 5324 mem_cgroup_end_move(from); 5325} 5326 5327static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5328 struct cgroup *cgroup, 5329 struct task_struct *p) 5330{ 5331 int ret = 0; 5332 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); 5333 5334 if (mem->move_charge_at_immigrate) { 5335 struct mm_struct *mm; 5336 struct mem_cgroup *from = mem_cgroup_from_task(p); 5337 5338 VM_BUG_ON(from == mem); 5339 5340 mm = get_task_mm(p); 5341 if (!mm) 5342 return 0; 5343 /* We move charges only when we move a owner of the mm */ 5344 if (mm->owner == p) { 5345 VM_BUG_ON(mc.from); 5346 VM_BUG_ON(mc.to); 5347 VM_BUG_ON(mc.precharge); 5348 VM_BUG_ON(mc.moved_charge); 5349 VM_BUG_ON(mc.moved_swap); 5350 mem_cgroup_start_move(from); 5351 spin_lock(&mc.lock); 5352 mc.from = from; 5353 mc.to = mem; 5354 spin_unlock(&mc.lock); 5355 /* We set mc.moving_task later */ 5356 5357 ret = mem_cgroup_precharge_mc(mm); 5358 if (ret) 5359 mem_cgroup_clear_mc(); 5360 } 5361 mmput(mm); 5362 } 5363 return ret; 5364} 5365 5366static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5367 struct cgroup *cgroup, 5368 struct task_struct *p) 5369{ 5370 mem_cgroup_clear_mc(); 5371} 5372 5373static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5374 unsigned long addr, unsigned long end, 5375 struct mm_walk *walk) 5376{ 5377 int ret = 0; 5378 struct vm_area_struct *vma = walk->private; 5379 pte_t *pte; 5380 spinlock_t *ptl; 5381 5382 split_huge_page_pmd(walk->mm, pmd); 5383retry: 5384 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5385 for (; addr != end; addr += PAGE_SIZE) { 5386 pte_t ptent = *(pte++); 5387 union mc_target target; 5388 int type; 5389 struct page *page; 5390 struct page_cgroup *pc; 5391 swp_entry_t ent; 5392 5393 if (!mc.precharge) 5394 break; 5395 5396 type = is_target_pte_for_mc(vma, addr, ptent, &target); 5397 switch (type) { 5398 case MC_TARGET_PAGE: 5399 page = target.page; 5400 if (isolate_lru_page(page)) 5401 goto put; 5402 pc = lookup_page_cgroup(page); 5403 if (!mem_cgroup_move_account(page, 1, pc, 5404 mc.from, mc.to, false)) { 5405 mc.precharge--; 5406 /* we uncharge from mc.from later. */ 5407 mc.moved_charge++; 5408 } 5409 putback_lru_page(page); 5410put: /* is_target_pte_for_mc() gets the page */ 5411 put_page(page); 5412 break; 5413 case MC_TARGET_SWAP: 5414 ent = target.ent; 5415 if (!mem_cgroup_move_swap_account(ent, 5416 mc.from, mc.to, false)) { 5417 mc.precharge--; 5418 /* we fixup refcnts and charges later. */ 5419 mc.moved_swap++; 5420 } 5421 break; 5422 default: 5423 break; 5424 } 5425 } 5426 pte_unmap_unlock(pte - 1, ptl); 5427 cond_resched(); 5428 5429 if (addr != end) { 5430 /* 5431 * We have consumed all precharges we got in can_attach(). 5432 * We try charge one by one, but don't do any additional 5433 * charges to mc.to if we have failed in charge once in attach() 5434 * phase. 5435 */ 5436 ret = mem_cgroup_do_precharge(1); 5437 if (!ret) 5438 goto retry; 5439 } 5440 5441 return ret; 5442} 5443 5444static void mem_cgroup_move_charge(struct mm_struct *mm) 5445{ 5446 struct vm_area_struct *vma; 5447 5448 lru_add_drain_all(); 5449retry: 5450 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5451 /* 5452 * Someone who are holding the mmap_sem might be waiting in 5453 * waitq. So we cancel all extra charges, wake up all waiters, 5454 * and retry. Because we cancel precharges, we might not be able 5455 * to move enough charges, but moving charge is a best-effort 5456 * feature anyway, so it wouldn't be a big problem. 5457 */ 5458 __mem_cgroup_clear_mc(); 5459 cond_resched(); 5460 goto retry; 5461 } 5462 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5463 int ret; 5464 struct mm_walk mem_cgroup_move_charge_walk = { 5465 .pmd_entry = mem_cgroup_move_charge_pte_range, 5466 .mm = mm, 5467 .private = vma, 5468 }; 5469 if (is_vm_hugetlb_page(vma)) 5470 continue; 5471 ret = walk_page_range(vma->vm_start, vma->vm_end, 5472 &mem_cgroup_move_charge_walk); 5473 if (ret) 5474 /* 5475 * means we have consumed all precharges and failed in 5476 * doing additional charge. Just abandon here. 5477 */ 5478 break; 5479 } 5480 up_read(&mm->mmap_sem); 5481} 5482 5483static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5484 struct cgroup *cont, 5485 struct cgroup *old_cont, 5486 struct task_struct *p) 5487{ 5488 struct mm_struct *mm = get_task_mm(p); 5489 5490 if (mm) { 5491 if (mc.to) 5492 mem_cgroup_move_charge(mm); 5493 put_swap_token(mm); 5494 mmput(mm); 5495 } 5496 if (mc.to) 5497 mem_cgroup_clear_mc(); 5498} 5499#else /* !CONFIG_MMU */ 5500static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5501 struct cgroup *cgroup, 5502 struct task_struct *p) 5503{ 5504 return 0; 5505} 5506static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5507 struct cgroup *cgroup, 5508 struct task_struct *p) 5509{ 5510} 5511static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5512 struct cgroup *cont, 5513 struct cgroup *old_cont, 5514 struct task_struct *p) 5515{ 5516} 5517#endif 5518 5519struct cgroup_subsys mem_cgroup_subsys = { 5520 .name = "memory", 5521 .subsys_id = mem_cgroup_subsys_id, 5522 .create = mem_cgroup_create, 5523 .pre_destroy = mem_cgroup_pre_destroy, 5524 .destroy = mem_cgroup_destroy, 5525 .populate = mem_cgroup_populate, 5526 .can_attach = mem_cgroup_can_attach, 5527 .cancel_attach = mem_cgroup_cancel_attach, 5528 .attach = mem_cgroup_move_task, 5529 .early_init = 0, 5530 .use_id = 1, 5531}; 5532 5533#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 5534static int __init enable_swap_account(char *s) 5535{ 5536 /* consider enabled if no parameter or 1 is given */ 5537 if (!strcmp(s, "1")) 5538 really_do_swap_account = 1; 5539 else if (!strcmp(s, "0")) 5540 really_do_swap_account = 0; 5541 return 1; 5542} 5543__setup("swapaccount=", enable_swap_account); 5544 5545#endif 5546