memcontrol.c revision 9f50fad65b87a8776ae989ca059ad6c17925dfc3
1/* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 */ 23 24#include <linux/res_counter.h> 25#include <linux/memcontrol.h> 26#include <linux/cgroup.h> 27#include <linux/mm.h> 28#include <linux/hugetlb.h> 29#include <linux/pagemap.h> 30#include <linux/smp.h> 31#include <linux/page-flags.h> 32#include <linux/backing-dev.h> 33#include <linux/bit_spinlock.h> 34#include <linux/rcupdate.h> 35#include <linux/limits.h> 36#include <linux/mutex.h> 37#include <linux/rbtree.h> 38#include <linux/slab.h> 39#include <linux/swap.h> 40#include <linux/swapops.h> 41#include <linux/spinlock.h> 42#include <linux/eventfd.h> 43#include <linux/sort.h> 44#include <linux/fs.h> 45#include <linux/seq_file.h> 46#include <linux/vmalloc.h> 47#include <linux/mm_inline.h> 48#include <linux/page_cgroup.h> 49#include <linux/cpu.h> 50#include <linux/oom.h> 51#include "internal.h" 52 53#include <asm/uaccess.h> 54 55#include <trace/events/vmscan.h> 56 57struct cgroup_subsys mem_cgroup_subsys __read_mostly; 58#define MEM_CGROUP_RECLAIM_RETRIES 5 59struct mem_cgroup *root_mem_cgroup __read_mostly; 60 61#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 62/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 63int do_swap_account __read_mostly; 64 65/* for remember boot option*/ 66#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED 67static int really_do_swap_account __initdata = 1; 68#else 69static int really_do_swap_account __initdata = 0; 70#endif 71 72#else 73#define do_swap_account (0) 74#endif 75 76 77/* 78 * Statistics for memory cgroup. 79 */ 80enum mem_cgroup_stat_index { 81 /* 82 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 83 */ 84 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 85 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 86 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 87 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 88 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ 89 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ 90 MEM_CGROUP_STAT_NSTATS, 91}; 92 93enum mem_cgroup_events_index { 94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ 97 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 98 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 99 MEM_CGROUP_EVENTS_NSTATS, 100}; 101/* 102 * Per memcg event counter is incremented at every pagein/pageout. With THP, 103 * it will be incremated by the number of pages. This counter is used for 104 * for trigger some periodic events. This is straightforward and better 105 * than using jiffies etc. to handle periodic memcg event. 106 */ 107enum mem_cgroup_events_target { 108 MEM_CGROUP_TARGET_THRESH, 109 MEM_CGROUP_TARGET_SOFTLIMIT, 110 MEM_CGROUP_TARGET_NUMAINFO, 111 MEM_CGROUP_NTARGETS, 112}; 113#define THRESHOLDS_EVENTS_TARGET (128) 114#define SOFTLIMIT_EVENTS_TARGET (1024) 115#define NUMAINFO_EVENTS_TARGET (1024) 116 117struct mem_cgroup_stat_cpu { 118 long count[MEM_CGROUP_STAT_NSTATS]; 119 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 120 unsigned long targets[MEM_CGROUP_NTARGETS]; 121}; 122 123/* 124 * per-zone information in memory controller. 125 */ 126struct mem_cgroup_per_zone { 127 /* 128 * spin_lock to protect the per cgroup LRU 129 */ 130 struct list_head lists[NR_LRU_LISTS]; 131 unsigned long count[NR_LRU_LISTS]; 132 133 struct zone_reclaim_stat reclaim_stat; 134 struct rb_node tree_node; /* RB tree node */ 135 unsigned long long usage_in_excess;/* Set to the value by which */ 136 /* the soft limit is exceeded*/ 137 bool on_tree; 138 struct mem_cgroup *mem; /* Back pointer, we cannot */ 139 /* use container_of */ 140}; 141/* Macro for accessing counter */ 142#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 143 144struct mem_cgroup_per_node { 145 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 146}; 147 148struct mem_cgroup_lru_info { 149 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; 150}; 151 152/* 153 * Cgroups above their limits are maintained in a RB-Tree, independent of 154 * their hierarchy representation 155 */ 156 157struct mem_cgroup_tree_per_zone { 158 struct rb_root rb_root; 159 spinlock_t lock; 160}; 161 162struct mem_cgroup_tree_per_node { 163 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 164}; 165 166struct mem_cgroup_tree { 167 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 168}; 169 170static struct mem_cgroup_tree soft_limit_tree __read_mostly; 171 172struct mem_cgroup_threshold { 173 struct eventfd_ctx *eventfd; 174 u64 threshold; 175}; 176 177/* For threshold */ 178struct mem_cgroup_threshold_ary { 179 /* An array index points to threshold just below usage. */ 180 int current_threshold; 181 /* Size of entries[] */ 182 unsigned int size; 183 /* Array of thresholds */ 184 struct mem_cgroup_threshold entries[0]; 185}; 186 187struct mem_cgroup_thresholds { 188 /* Primary thresholds array */ 189 struct mem_cgroup_threshold_ary *primary; 190 /* 191 * Spare threshold array. 192 * This is needed to make mem_cgroup_unregister_event() "never fail". 193 * It must be able to store at least primary->size - 1 entries. 194 */ 195 struct mem_cgroup_threshold_ary *spare; 196}; 197 198/* for OOM */ 199struct mem_cgroup_eventfd_list { 200 struct list_head list; 201 struct eventfd_ctx *eventfd; 202}; 203 204static void mem_cgroup_threshold(struct mem_cgroup *mem); 205static void mem_cgroup_oom_notify(struct mem_cgroup *mem); 206 207enum { 208 SCAN_BY_LIMIT, 209 SCAN_BY_SYSTEM, 210 NR_SCAN_CONTEXT, 211 SCAN_BY_SHRINK, /* not recorded now */ 212}; 213 214enum { 215 SCAN, 216 SCAN_ANON, 217 SCAN_FILE, 218 ROTATE, 219 ROTATE_ANON, 220 ROTATE_FILE, 221 FREED, 222 FREED_ANON, 223 FREED_FILE, 224 ELAPSED, 225 NR_SCANSTATS, 226}; 227 228struct scanstat { 229 spinlock_t lock; 230 unsigned long stats[NR_SCAN_CONTEXT][NR_SCANSTATS]; 231 unsigned long rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS]; 232}; 233 234const char *scanstat_string[NR_SCANSTATS] = { 235 "scanned_pages", 236 "scanned_anon_pages", 237 "scanned_file_pages", 238 "rotated_pages", 239 "rotated_anon_pages", 240 "rotated_file_pages", 241 "freed_pages", 242 "freed_anon_pages", 243 "freed_file_pages", 244 "elapsed_ns", 245}; 246#define SCANSTAT_WORD_LIMIT "_by_limit" 247#define SCANSTAT_WORD_SYSTEM "_by_system" 248#define SCANSTAT_WORD_HIERARCHY "_under_hierarchy" 249 250 251/* 252 * The memory controller data structure. The memory controller controls both 253 * page cache and RSS per cgroup. We would eventually like to provide 254 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 255 * to help the administrator determine what knobs to tune. 256 * 257 * TODO: Add a water mark for the memory controller. Reclaim will begin when 258 * we hit the water mark. May be even add a low water mark, such that 259 * no reclaim occurs from a cgroup at it's low water mark, this is 260 * a feature that will be implemented much later in the future. 261 */ 262struct mem_cgroup { 263 struct cgroup_subsys_state css; 264 /* 265 * the counter to account for memory usage 266 */ 267 struct res_counter res; 268 /* 269 * the counter to account for mem+swap usage. 270 */ 271 struct res_counter memsw; 272 /* 273 * Per cgroup active and inactive list, similar to the 274 * per zone LRU lists. 275 */ 276 struct mem_cgroup_lru_info info; 277 /* 278 * While reclaiming in a hierarchy, we cache the last child we 279 * reclaimed from. 280 */ 281 int last_scanned_child; 282 int last_scanned_node; 283#if MAX_NUMNODES > 1 284 nodemask_t scan_nodes; 285 atomic_t numainfo_events; 286 atomic_t numainfo_updating; 287#endif 288 /* 289 * Should the accounting and control be hierarchical, per subtree? 290 */ 291 bool use_hierarchy; 292 293 bool oom_lock; 294 atomic_t under_oom; 295 296 atomic_t refcnt; 297 298 int swappiness; 299 /* OOM-Killer disable */ 300 int oom_kill_disable; 301 302 /* set when res.limit == memsw.limit */ 303 bool memsw_is_minimum; 304 305 /* protect arrays of thresholds */ 306 struct mutex thresholds_lock; 307 308 /* thresholds for memory usage. RCU-protected */ 309 struct mem_cgroup_thresholds thresholds; 310 311 /* thresholds for mem+swap usage. RCU-protected */ 312 struct mem_cgroup_thresholds memsw_thresholds; 313 314 /* For oom notifier event fd */ 315 struct list_head oom_notify; 316 /* For recording LRU-scan statistics */ 317 struct scanstat scanstat; 318 /* 319 * Should we move charges of a task when a task is moved into this 320 * mem_cgroup ? And what type of charges should we move ? 321 */ 322 unsigned long move_charge_at_immigrate; 323 /* 324 * percpu counter. 325 */ 326 struct mem_cgroup_stat_cpu *stat; 327 /* 328 * used when a cpu is offlined or other synchronizations 329 * See mem_cgroup_read_stat(). 330 */ 331 struct mem_cgroup_stat_cpu nocpu_base; 332 spinlock_t pcp_counter_lock; 333}; 334 335/* Stuffs for move charges at task migration. */ 336/* 337 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a 338 * left-shifted bitmap of these types. 339 */ 340enum move_type { 341 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 342 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 343 NR_MOVE_TYPE, 344}; 345 346/* "mc" and its members are protected by cgroup_mutex */ 347static struct move_charge_struct { 348 spinlock_t lock; /* for from, to */ 349 struct mem_cgroup *from; 350 struct mem_cgroup *to; 351 unsigned long precharge; 352 unsigned long moved_charge; 353 unsigned long moved_swap; 354 struct task_struct *moving_task; /* a task moving charges */ 355 wait_queue_head_t waitq; /* a waitq for other context */ 356} mc = { 357 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 358 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 359}; 360 361static bool move_anon(void) 362{ 363 return test_bit(MOVE_CHARGE_TYPE_ANON, 364 &mc.to->move_charge_at_immigrate); 365} 366 367static bool move_file(void) 368{ 369 return test_bit(MOVE_CHARGE_TYPE_FILE, 370 &mc.to->move_charge_at_immigrate); 371} 372 373/* 374 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 375 * limit reclaim to prevent infinite loops, if they ever occur. 376 */ 377#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) 378#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) 379 380enum charge_type { 381 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 382 MEM_CGROUP_CHARGE_TYPE_MAPPED, 383 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 384 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 385 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 386 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 387 NR_CHARGE_TYPE, 388}; 389 390/* for encoding cft->private value on file */ 391#define _MEM (0) 392#define _MEMSWAP (1) 393#define _OOM_TYPE (2) 394#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 395#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 396#define MEMFILE_ATTR(val) ((val) & 0xffff) 397/* Used for OOM nofiier */ 398#define OOM_CONTROL (0) 399 400/* 401 * Reclaim flags for mem_cgroup_hierarchical_reclaim 402 */ 403#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 404#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 405#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 406#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 407#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2 408#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT) 409 410static void mem_cgroup_get(struct mem_cgroup *mem); 411static void mem_cgroup_put(struct mem_cgroup *mem); 412static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 413static void drain_all_stock_async(struct mem_cgroup *mem); 414 415static struct mem_cgroup_per_zone * 416mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 417{ 418 return &mem->info.nodeinfo[nid]->zoneinfo[zid]; 419} 420 421struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 422{ 423 return &mem->css; 424} 425 426static struct mem_cgroup_per_zone * 427page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page) 428{ 429 int nid = page_to_nid(page); 430 int zid = page_zonenum(page); 431 432 return mem_cgroup_zoneinfo(mem, nid, zid); 433} 434 435static struct mem_cgroup_tree_per_zone * 436soft_limit_tree_node_zone(int nid, int zid) 437{ 438 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 439} 440 441static struct mem_cgroup_tree_per_zone * 442soft_limit_tree_from_page(struct page *page) 443{ 444 int nid = page_to_nid(page); 445 int zid = page_zonenum(page); 446 447 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 448} 449 450static void 451__mem_cgroup_insert_exceeded(struct mem_cgroup *mem, 452 struct mem_cgroup_per_zone *mz, 453 struct mem_cgroup_tree_per_zone *mctz, 454 unsigned long long new_usage_in_excess) 455{ 456 struct rb_node **p = &mctz->rb_root.rb_node; 457 struct rb_node *parent = NULL; 458 struct mem_cgroup_per_zone *mz_node; 459 460 if (mz->on_tree) 461 return; 462 463 mz->usage_in_excess = new_usage_in_excess; 464 if (!mz->usage_in_excess) 465 return; 466 while (*p) { 467 parent = *p; 468 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 469 tree_node); 470 if (mz->usage_in_excess < mz_node->usage_in_excess) 471 p = &(*p)->rb_left; 472 /* 473 * We can't avoid mem cgroups that are over their soft 474 * limit by the same amount 475 */ 476 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 477 p = &(*p)->rb_right; 478 } 479 rb_link_node(&mz->tree_node, parent, p); 480 rb_insert_color(&mz->tree_node, &mctz->rb_root); 481 mz->on_tree = true; 482} 483 484static void 485__mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 486 struct mem_cgroup_per_zone *mz, 487 struct mem_cgroup_tree_per_zone *mctz) 488{ 489 if (!mz->on_tree) 490 return; 491 rb_erase(&mz->tree_node, &mctz->rb_root); 492 mz->on_tree = false; 493} 494 495static void 496mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 497 struct mem_cgroup_per_zone *mz, 498 struct mem_cgroup_tree_per_zone *mctz) 499{ 500 spin_lock(&mctz->lock); 501 __mem_cgroup_remove_exceeded(mem, mz, mctz); 502 spin_unlock(&mctz->lock); 503} 504 505 506static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) 507{ 508 unsigned long long excess; 509 struct mem_cgroup_per_zone *mz; 510 struct mem_cgroup_tree_per_zone *mctz; 511 int nid = page_to_nid(page); 512 int zid = page_zonenum(page); 513 mctz = soft_limit_tree_from_page(page); 514 515 /* 516 * Necessary to update all ancestors when hierarchy is used. 517 * because their event counter is not touched. 518 */ 519 for (; mem; mem = parent_mem_cgroup(mem)) { 520 mz = mem_cgroup_zoneinfo(mem, nid, zid); 521 excess = res_counter_soft_limit_excess(&mem->res); 522 /* 523 * We have to update the tree if mz is on RB-tree or 524 * mem is over its softlimit. 525 */ 526 if (excess || mz->on_tree) { 527 spin_lock(&mctz->lock); 528 /* if on-tree, remove it */ 529 if (mz->on_tree) 530 __mem_cgroup_remove_exceeded(mem, mz, mctz); 531 /* 532 * Insert again. mz->usage_in_excess will be updated. 533 * If excess is 0, no tree ops. 534 */ 535 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess); 536 spin_unlock(&mctz->lock); 537 } 538 } 539} 540 541static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) 542{ 543 int node, zone; 544 struct mem_cgroup_per_zone *mz; 545 struct mem_cgroup_tree_per_zone *mctz; 546 547 for_each_node_state(node, N_POSSIBLE) { 548 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 549 mz = mem_cgroup_zoneinfo(mem, node, zone); 550 mctz = soft_limit_tree_node_zone(node, zone); 551 mem_cgroup_remove_exceeded(mem, mz, mctz); 552 } 553 } 554} 555 556static struct mem_cgroup_per_zone * 557__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 558{ 559 struct rb_node *rightmost = NULL; 560 struct mem_cgroup_per_zone *mz; 561 562retry: 563 mz = NULL; 564 rightmost = rb_last(&mctz->rb_root); 565 if (!rightmost) 566 goto done; /* Nothing to reclaim from */ 567 568 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 569 /* 570 * Remove the node now but someone else can add it back, 571 * we will to add it back at the end of reclaim to its correct 572 * position in the tree. 573 */ 574 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 575 if (!res_counter_soft_limit_excess(&mz->mem->res) || 576 !css_tryget(&mz->mem->css)) 577 goto retry; 578done: 579 return mz; 580} 581 582static struct mem_cgroup_per_zone * 583mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 584{ 585 struct mem_cgroup_per_zone *mz; 586 587 spin_lock(&mctz->lock); 588 mz = __mem_cgroup_largest_soft_limit_node(mctz); 589 spin_unlock(&mctz->lock); 590 return mz; 591} 592 593/* 594 * Implementation Note: reading percpu statistics for memcg. 595 * 596 * Both of vmstat[] and percpu_counter has threshold and do periodic 597 * synchronization to implement "quick" read. There are trade-off between 598 * reading cost and precision of value. Then, we may have a chance to implement 599 * a periodic synchronizion of counter in memcg's counter. 600 * 601 * But this _read() function is used for user interface now. The user accounts 602 * memory usage by memory cgroup and he _always_ requires exact value because 603 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 604 * have to visit all online cpus and make sum. So, for now, unnecessary 605 * synchronization is not implemented. (just implemented for cpu hotplug) 606 * 607 * If there are kernel internal actions which can make use of some not-exact 608 * value, and reading all cpu value can be performance bottleneck in some 609 * common workload, threashold and synchonization as vmstat[] should be 610 * implemented. 611 */ 612static long mem_cgroup_read_stat(struct mem_cgroup *mem, 613 enum mem_cgroup_stat_index idx) 614{ 615 long val = 0; 616 int cpu; 617 618 get_online_cpus(); 619 for_each_online_cpu(cpu) 620 val += per_cpu(mem->stat->count[idx], cpu); 621#ifdef CONFIG_HOTPLUG_CPU 622 spin_lock(&mem->pcp_counter_lock); 623 val += mem->nocpu_base.count[idx]; 624 spin_unlock(&mem->pcp_counter_lock); 625#endif 626 put_online_cpus(); 627 return val; 628} 629 630static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, 631 bool charge) 632{ 633 int val = (charge) ? 1 : -1; 634 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); 635} 636 637void mem_cgroup_pgfault(struct mem_cgroup *mem, int val) 638{ 639 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val); 640} 641 642void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val) 643{ 644 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val); 645} 646 647static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, 648 enum mem_cgroup_events_index idx) 649{ 650 unsigned long val = 0; 651 int cpu; 652 653 for_each_online_cpu(cpu) 654 val += per_cpu(mem->stat->events[idx], cpu); 655#ifdef CONFIG_HOTPLUG_CPU 656 spin_lock(&mem->pcp_counter_lock); 657 val += mem->nocpu_base.events[idx]; 658 spin_unlock(&mem->pcp_counter_lock); 659#endif 660 return val; 661} 662 663static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, 664 bool file, int nr_pages) 665{ 666 preempt_disable(); 667 668 if (file) 669 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); 670 else 671 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages); 672 673 /* pagein of a big page is an event. So, ignore page size */ 674 if (nr_pages > 0) 675 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 676 else { 677 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 678 nr_pages = -nr_pages; /* for event */ 679 } 680 681 __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); 682 683 preempt_enable(); 684} 685 686unsigned long 687mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid, 688 unsigned int lru_mask) 689{ 690 struct mem_cgroup_per_zone *mz; 691 enum lru_list l; 692 unsigned long ret = 0; 693 694 mz = mem_cgroup_zoneinfo(mem, nid, zid); 695 696 for_each_lru(l) { 697 if (BIT(l) & lru_mask) 698 ret += MEM_CGROUP_ZSTAT(mz, l); 699 } 700 return ret; 701} 702 703static unsigned long 704mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem, 705 int nid, unsigned int lru_mask) 706{ 707 u64 total = 0; 708 int zid; 709 710 for (zid = 0; zid < MAX_NR_ZONES; zid++) 711 total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask); 712 713 return total; 714} 715 716static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem, 717 unsigned int lru_mask) 718{ 719 int nid; 720 u64 total = 0; 721 722 for_each_node_state(nid, N_HIGH_MEMORY) 723 total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask); 724 return total; 725} 726 727static bool __memcg_event_check(struct mem_cgroup *mem, int target) 728{ 729 unsigned long val, next; 730 731 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); 732 next = this_cpu_read(mem->stat->targets[target]); 733 /* from time_after() in jiffies.h */ 734 return ((long)next - (long)val < 0); 735} 736 737static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target) 738{ 739 unsigned long val, next; 740 741 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); 742 743 switch (target) { 744 case MEM_CGROUP_TARGET_THRESH: 745 next = val + THRESHOLDS_EVENTS_TARGET; 746 break; 747 case MEM_CGROUP_TARGET_SOFTLIMIT: 748 next = val + SOFTLIMIT_EVENTS_TARGET; 749 break; 750 case MEM_CGROUP_TARGET_NUMAINFO: 751 next = val + NUMAINFO_EVENTS_TARGET; 752 break; 753 default: 754 return; 755 } 756 757 this_cpu_write(mem->stat->targets[target], next); 758} 759 760/* 761 * Check events in order. 762 * 763 */ 764static void memcg_check_events(struct mem_cgroup *mem, struct page *page) 765{ 766 /* threshold event is triggered in finer grain than soft limit */ 767 if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) { 768 mem_cgroup_threshold(mem); 769 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH); 770 if (unlikely(__memcg_event_check(mem, 771 MEM_CGROUP_TARGET_SOFTLIMIT))) { 772 mem_cgroup_update_tree(mem, page); 773 __mem_cgroup_target_update(mem, 774 MEM_CGROUP_TARGET_SOFTLIMIT); 775 } 776#if MAX_NUMNODES > 1 777 if (unlikely(__memcg_event_check(mem, 778 MEM_CGROUP_TARGET_NUMAINFO))) { 779 atomic_inc(&mem->numainfo_events); 780 __mem_cgroup_target_update(mem, 781 MEM_CGROUP_TARGET_NUMAINFO); 782 } 783#endif 784 } 785} 786 787static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 788{ 789 return container_of(cgroup_subsys_state(cont, 790 mem_cgroup_subsys_id), struct mem_cgroup, 791 css); 792} 793 794struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 795{ 796 /* 797 * mm_update_next_owner() may clear mm->owner to NULL 798 * if it races with swapoff, page migration, etc. 799 * So this can be called with p == NULL. 800 */ 801 if (unlikely(!p)) 802 return NULL; 803 804 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 805 struct mem_cgroup, css); 806} 807 808struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 809{ 810 struct mem_cgroup *mem = NULL; 811 812 if (!mm) 813 return NULL; 814 /* 815 * Because we have no locks, mm->owner's may be being moved to other 816 * cgroup. We use css_tryget() here even if this looks 817 * pessimistic (rather than adding locks here). 818 */ 819 rcu_read_lock(); 820 do { 821 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 822 if (unlikely(!mem)) 823 break; 824 } while (!css_tryget(&mem->css)); 825 rcu_read_unlock(); 826 return mem; 827} 828 829/* The caller has to guarantee "mem" exists before calling this */ 830static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) 831{ 832 struct cgroup_subsys_state *css; 833 int found; 834 835 if (!mem) /* ROOT cgroup has the smallest ID */ 836 return root_mem_cgroup; /*css_put/get against root is ignored*/ 837 if (!mem->use_hierarchy) { 838 if (css_tryget(&mem->css)) 839 return mem; 840 return NULL; 841 } 842 rcu_read_lock(); 843 /* 844 * searching a memory cgroup which has the smallest ID under given 845 * ROOT cgroup. (ID >= 1) 846 */ 847 css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found); 848 if (css && css_tryget(css)) 849 mem = container_of(css, struct mem_cgroup, css); 850 else 851 mem = NULL; 852 rcu_read_unlock(); 853 return mem; 854} 855 856static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, 857 struct mem_cgroup *root, 858 bool cond) 859{ 860 int nextid = css_id(&iter->css) + 1; 861 int found; 862 int hierarchy_used; 863 struct cgroup_subsys_state *css; 864 865 hierarchy_used = iter->use_hierarchy; 866 867 css_put(&iter->css); 868 /* If no ROOT, walk all, ignore hierarchy */ 869 if (!cond || (root && !hierarchy_used)) 870 return NULL; 871 872 if (!root) 873 root = root_mem_cgroup; 874 875 do { 876 iter = NULL; 877 rcu_read_lock(); 878 879 css = css_get_next(&mem_cgroup_subsys, nextid, 880 &root->css, &found); 881 if (css && css_tryget(css)) 882 iter = container_of(css, struct mem_cgroup, css); 883 rcu_read_unlock(); 884 /* If css is NULL, no more cgroups will be found */ 885 nextid = found + 1; 886 } while (css && !iter); 887 888 return iter; 889} 890/* 891 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please 892 * be careful that "break" loop is not allowed. We have reference count. 893 * Instead of that modify "cond" to be false and "continue" to exit the loop. 894 */ 895#define for_each_mem_cgroup_tree_cond(iter, root, cond) \ 896 for (iter = mem_cgroup_start_loop(root);\ 897 iter != NULL;\ 898 iter = mem_cgroup_get_next(iter, root, cond)) 899 900#define for_each_mem_cgroup_tree(iter, root) \ 901 for_each_mem_cgroup_tree_cond(iter, root, true) 902 903#define for_each_mem_cgroup_all(iter) \ 904 for_each_mem_cgroup_tree_cond(iter, NULL, true) 905 906 907static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) 908{ 909 return (mem == root_mem_cgroup); 910} 911 912void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 913{ 914 struct mem_cgroup *mem; 915 916 if (!mm) 917 return; 918 919 rcu_read_lock(); 920 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 921 if (unlikely(!mem)) 922 goto out; 923 924 switch (idx) { 925 case PGMAJFAULT: 926 mem_cgroup_pgmajfault(mem, 1); 927 break; 928 case PGFAULT: 929 mem_cgroup_pgfault(mem, 1); 930 break; 931 default: 932 BUG(); 933 } 934out: 935 rcu_read_unlock(); 936} 937EXPORT_SYMBOL(mem_cgroup_count_vm_event); 938 939/* 940 * Following LRU functions are allowed to be used without PCG_LOCK. 941 * Operations are called by routine of global LRU independently from memcg. 942 * What we have to take care of here is validness of pc->mem_cgroup. 943 * 944 * Changes to pc->mem_cgroup happens when 945 * 1. charge 946 * 2. moving account 947 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 948 * It is added to LRU before charge. 949 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 950 * When moving account, the page is not on LRU. It's isolated. 951 */ 952 953void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) 954{ 955 struct page_cgroup *pc; 956 struct mem_cgroup_per_zone *mz; 957 958 if (mem_cgroup_disabled()) 959 return; 960 pc = lookup_page_cgroup(page); 961 /* can happen while we handle swapcache. */ 962 if (!TestClearPageCgroupAcctLRU(pc)) 963 return; 964 VM_BUG_ON(!pc->mem_cgroup); 965 /* 966 * We don't check PCG_USED bit. It's cleared when the "page" is finally 967 * removed from global LRU. 968 */ 969 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 970 /* huge page split is done under lru_lock. so, we have no races. */ 971 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); 972 if (mem_cgroup_is_root(pc->mem_cgroup)) 973 return; 974 VM_BUG_ON(list_empty(&pc->lru)); 975 list_del_init(&pc->lru); 976} 977 978void mem_cgroup_del_lru(struct page *page) 979{ 980 mem_cgroup_del_lru_list(page, page_lru(page)); 981} 982 983/* 984 * Writeback is about to end against a page which has been marked for immediate 985 * reclaim. If it still appears to be reclaimable, move it to the tail of the 986 * inactive list. 987 */ 988void mem_cgroup_rotate_reclaimable_page(struct page *page) 989{ 990 struct mem_cgroup_per_zone *mz; 991 struct page_cgroup *pc; 992 enum lru_list lru = page_lru(page); 993 994 if (mem_cgroup_disabled()) 995 return; 996 997 pc = lookup_page_cgroup(page); 998 /* unused or root page is not rotated. */ 999 if (!PageCgroupUsed(pc)) 1000 return; 1001 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1002 smp_rmb(); 1003 if (mem_cgroup_is_root(pc->mem_cgroup)) 1004 return; 1005 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1006 list_move_tail(&pc->lru, &mz->lists[lru]); 1007} 1008 1009void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 1010{ 1011 struct mem_cgroup_per_zone *mz; 1012 struct page_cgroup *pc; 1013 1014 if (mem_cgroup_disabled()) 1015 return; 1016 1017 pc = lookup_page_cgroup(page); 1018 /* unused or root page is not rotated. */ 1019 if (!PageCgroupUsed(pc)) 1020 return; 1021 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1022 smp_rmb(); 1023 if (mem_cgroup_is_root(pc->mem_cgroup)) 1024 return; 1025 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1026 list_move(&pc->lru, &mz->lists[lru]); 1027} 1028 1029void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) 1030{ 1031 struct page_cgroup *pc; 1032 struct mem_cgroup_per_zone *mz; 1033 1034 if (mem_cgroup_disabled()) 1035 return; 1036 pc = lookup_page_cgroup(page); 1037 VM_BUG_ON(PageCgroupAcctLRU(pc)); 1038 if (!PageCgroupUsed(pc)) 1039 return; 1040 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1041 smp_rmb(); 1042 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1043 /* huge page split is done under lru_lock. so, we have no races. */ 1044 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1045 SetPageCgroupAcctLRU(pc); 1046 if (mem_cgroup_is_root(pc->mem_cgroup)) 1047 return; 1048 list_add(&pc->lru, &mz->lists[lru]); 1049} 1050 1051/* 1052 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed 1053 * while it's linked to lru because the page may be reused after it's fully 1054 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again. 1055 * It's done under lock_page and expected that zone->lru_lock isnever held. 1056 */ 1057static void mem_cgroup_lru_del_before_commit(struct page *page) 1058{ 1059 unsigned long flags; 1060 struct zone *zone = page_zone(page); 1061 struct page_cgroup *pc = lookup_page_cgroup(page); 1062 1063 /* 1064 * Doing this check without taking ->lru_lock seems wrong but this 1065 * is safe. Because if page_cgroup's USED bit is unset, the page 1066 * will not be added to any memcg's LRU. If page_cgroup's USED bit is 1067 * set, the commit after this will fail, anyway. 1068 * This all charge/uncharge is done under some mutual execustion. 1069 * So, we don't need to taking care of changes in USED bit. 1070 */ 1071 if (likely(!PageLRU(page))) 1072 return; 1073 1074 spin_lock_irqsave(&zone->lru_lock, flags); 1075 /* 1076 * Forget old LRU when this page_cgroup is *not* used. This Used bit 1077 * is guarded by lock_page() because the page is SwapCache. 1078 */ 1079 if (!PageCgroupUsed(pc)) 1080 mem_cgroup_del_lru_list(page, page_lru(page)); 1081 spin_unlock_irqrestore(&zone->lru_lock, flags); 1082} 1083 1084static void mem_cgroup_lru_add_after_commit(struct page *page) 1085{ 1086 unsigned long flags; 1087 struct zone *zone = page_zone(page); 1088 struct page_cgroup *pc = lookup_page_cgroup(page); 1089 1090 /* taking care of that the page is added to LRU while we commit it */ 1091 if (likely(!PageLRU(page))) 1092 return; 1093 spin_lock_irqsave(&zone->lru_lock, flags); 1094 /* link when the page is linked to LRU but page_cgroup isn't */ 1095 if (PageLRU(page) && !PageCgroupAcctLRU(pc)) 1096 mem_cgroup_add_lru_list(page, page_lru(page)); 1097 spin_unlock_irqrestore(&zone->lru_lock, flags); 1098} 1099 1100 1101void mem_cgroup_move_lists(struct page *page, 1102 enum lru_list from, enum lru_list to) 1103{ 1104 if (mem_cgroup_disabled()) 1105 return; 1106 mem_cgroup_del_lru_list(page, from); 1107 mem_cgroup_add_lru_list(page, to); 1108} 1109 1110/* 1111 * Checks whether given mem is same or in the root_mem's 1112 * hierarchy subtree 1113 */ 1114static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem, 1115 struct mem_cgroup *mem) 1116{ 1117 if (root_mem != mem) { 1118 return (root_mem->use_hierarchy && 1119 css_is_ancestor(&mem->css, &root_mem->css)); 1120 } 1121 1122 return true; 1123} 1124 1125int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 1126{ 1127 int ret; 1128 struct mem_cgroup *curr = NULL; 1129 struct task_struct *p; 1130 1131 p = find_lock_task_mm(task); 1132 if (!p) 1133 return 0; 1134 curr = try_get_mem_cgroup_from_mm(p->mm); 1135 task_unlock(p); 1136 if (!curr) 1137 return 0; 1138 /* 1139 * We should check use_hierarchy of "mem" not "curr". Because checking 1140 * use_hierarchy of "curr" here make this function true if hierarchy is 1141 * enabled in "curr" and "curr" is a child of "mem" in *cgroup* 1142 * hierarchy(even if use_hierarchy is disabled in "mem"). 1143 */ 1144 ret = mem_cgroup_same_or_subtree(mem, curr); 1145 css_put(&curr->css); 1146 return ret; 1147} 1148 1149static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages) 1150{ 1151 unsigned long active; 1152 unsigned long inactive; 1153 unsigned long gb; 1154 unsigned long inactive_ratio; 1155 1156 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); 1157 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); 1158 1159 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1160 if (gb) 1161 inactive_ratio = int_sqrt(10 * gb); 1162 else 1163 inactive_ratio = 1; 1164 1165 if (present_pages) { 1166 present_pages[0] = inactive; 1167 present_pages[1] = active; 1168 } 1169 1170 return inactive_ratio; 1171} 1172 1173int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 1174{ 1175 unsigned long active; 1176 unsigned long inactive; 1177 unsigned long present_pages[2]; 1178 unsigned long inactive_ratio; 1179 1180 inactive_ratio = calc_inactive_ratio(memcg, present_pages); 1181 1182 inactive = present_pages[0]; 1183 active = present_pages[1]; 1184 1185 if (inactive * inactive_ratio < active) 1186 return 1; 1187 1188 return 0; 1189} 1190 1191int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 1192{ 1193 unsigned long active; 1194 unsigned long inactive; 1195 1196 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); 1197 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); 1198 1199 return (active > inactive); 1200} 1201 1202struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 1203 struct zone *zone) 1204{ 1205 int nid = zone_to_nid(zone); 1206 int zid = zone_idx(zone); 1207 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); 1208 1209 return &mz->reclaim_stat; 1210} 1211 1212struct zone_reclaim_stat * 1213mem_cgroup_get_reclaim_stat_from_page(struct page *page) 1214{ 1215 struct page_cgroup *pc; 1216 struct mem_cgroup_per_zone *mz; 1217 1218 if (mem_cgroup_disabled()) 1219 return NULL; 1220 1221 pc = lookup_page_cgroup(page); 1222 if (!PageCgroupUsed(pc)) 1223 return NULL; 1224 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1225 smp_rmb(); 1226 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1227 return &mz->reclaim_stat; 1228} 1229 1230unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 1231 struct list_head *dst, 1232 unsigned long *scanned, int order, 1233 int mode, struct zone *z, 1234 struct mem_cgroup *mem_cont, 1235 int active, int file) 1236{ 1237 unsigned long nr_taken = 0; 1238 struct page *page; 1239 unsigned long scan; 1240 LIST_HEAD(pc_list); 1241 struct list_head *src; 1242 struct page_cgroup *pc, *tmp; 1243 int nid = zone_to_nid(z); 1244 int zid = zone_idx(z); 1245 struct mem_cgroup_per_zone *mz; 1246 int lru = LRU_FILE * file + active; 1247 int ret; 1248 1249 BUG_ON(!mem_cont); 1250 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 1251 src = &mz->lists[lru]; 1252 1253 scan = 0; 1254 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 1255 if (scan >= nr_to_scan) 1256 break; 1257 1258 if (unlikely(!PageCgroupUsed(pc))) 1259 continue; 1260 1261 page = lookup_cgroup_page(pc); 1262 1263 if (unlikely(!PageLRU(page))) 1264 continue; 1265 1266 scan++; 1267 ret = __isolate_lru_page(page, mode, file); 1268 switch (ret) { 1269 case 0: 1270 list_move(&page->lru, dst); 1271 mem_cgroup_del_lru(page); 1272 nr_taken += hpage_nr_pages(page); 1273 break; 1274 case -EBUSY: 1275 /* we don't affect global LRU but rotate in our LRU */ 1276 mem_cgroup_rotate_lru_list(page, page_lru(page)); 1277 break; 1278 default: 1279 break; 1280 } 1281 } 1282 1283 *scanned = scan; 1284 1285 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken, 1286 0, 0, 0, mode); 1287 1288 return nr_taken; 1289} 1290 1291#define mem_cgroup_from_res_counter(counter, member) \ 1292 container_of(counter, struct mem_cgroup, member) 1293 1294/** 1295 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1296 * @mem: the memory cgroup 1297 * 1298 * Returns the maximum amount of memory @mem can be charged with, in 1299 * pages. 1300 */ 1301static unsigned long mem_cgroup_margin(struct mem_cgroup *mem) 1302{ 1303 unsigned long long margin; 1304 1305 margin = res_counter_margin(&mem->res); 1306 if (do_swap_account) 1307 margin = min(margin, res_counter_margin(&mem->memsw)); 1308 return margin >> PAGE_SHIFT; 1309} 1310 1311int mem_cgroup_swappiness(struct mem_cgroup *memcg) 1312{ 1313 struct cgroup *cgrp = memcg->css.cgroup; 1314 1315 /* root ? */ 1316 if (cgrp->parent == NULL) 1317 return vm_swappiness; 1318 1319 return memcg->swappiness; 1320} 1321 1322static void mem_cgroup_start_move(struct mem_cgroup *mem) 1323{ 1324 int cpu; 1325 1326 get_online_cpus(); 1327 spin_lock(&mem->pcp_counter_lock); 1328 for_each_online_cpu(cpu) 1329 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; 1330 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; 1331 spin_unlock(&mem->pcp_counter_lock); 1332 put_online_cpus(); 1333 1334 synchronize_rcu(); 1335} 1336 1337static void mem_cgroup_end_move(struct mem_cgroup *mem) 1338{ 1339 int cpu; 1340 1341 if (!mem) 1342 return; 1343 get_online_cpus(); 1344 spin_lock(&mem->pcp_counter_lock); 1345 for_each_online_cpu(cpu) 1346 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; 1347 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; 1348 spin_unlock(&mem->pcp_counter_lock); 1349 put_online_cpus(); 1350} 1351/* 1352 * 2 routines for checking "mem" is under move_account() or not. 1353 * 1354 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used 1355 * for avoiding race in accounting. If true, 1356 * pc->mem_cgroup may be overwritten. 1357 * 1358 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1359 * under hierarchy of moving cgroups. This is for 1360 * waiting at hith-memory prressure caused by "move". 1361 */ 1362 1363static bool mem_cgroup_stealed(struct mem_cgroup *mem) 1364{ 1365 VM_BUG_ON(!rcu_read_lock_held()); 1366 return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0; 1367} 1368 1369static bool mem_cgroup_under_move(struct mem_cgroup *mem) 1370{ 1371 struct mem_cgroup *from; 1372 struct mem_cgroup *to; 1373 bool ret = false; 1374 /* 1375 * Unlike task_move routines, we access mc.to, mc.from not under 1376 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1377 */ 1378 spin_lock(&mc.lock); 1379 from = mc.from; 1380 to = mc.to; 1381 if (!from) 1382 goto unlock; 1383 1384 ret = mem_cgroup_same_or_subtree(mem, from) 1385 || mem_cgroup_same_or_subtree(mem, to); 1386unlock: 1387 spin_unlock(&mc.lock); 1388 return ret; 1389} 1390 1391static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem) 1392{ 1393 if (mc.moving_task && current != mc.moving_task) { 1394 if (mem_cgroup_under_move(mem)) { 1395 DEFINE_WAIT(wait); 1396 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1397 /* moving charge context might have finished. */ 1398 if (mc.moving_task) 1399 schedule(); 1400 finish_wait(&mc.waitq, &wait); 1401 return true; 1402 } 1403 } 1404 return false; 1405} 1406 1407/** 1408 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. 1409 * @memcg: The memory cgroup that went over limit 1410 * @p: Task that is going to be killed 1411 * 1412 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1413 * enabled 1414 */ 1415void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1416{ 1417 struct cgroup *task_cgrp; 1418 struct cgroup *mem_cgrp; 1419 /* 1420 * Need a buffer in BSS, can't rely on allocations. The code relies 1421 * on the assumption that OOM is serialized for memory controller. 1422 * If this assumption is broken, revisit this code. 1423 */ 1424 static char memcg_name[PATH_MAX]; 1425 int ret; 1426 1427 if (!memcg || !p) 1428 return; 1429 1430 1431 rcu_read_lock(); 1432 1433 mem_cgrp = memcg->css.cgroup; 1434 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1435 1436 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1437 if (ret < 0) { 1438 /* 1439 * Unfortunately, we are unable to convert to a useful name 1440 * But we'll still print out the usage information 1441 */ 1442 rcu_read_unlock(); 1443 goto done; 1444 } 1445 rcu_read_unlock(); 1446 1447 printk(KERN_INFO "Task in %s killed", memcg_name); 1448 1449 rcu_read_lock(); 1450 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1451 if (ret < 0) { 1452 rcu_read_unlock(); 1453 goto done; 1454 } 1455 rcu_read_unlock(); 1456 1457 /* 1458 * Continues from above, so we don't need an KERN_ level 1459 */ 1460 printk(KERN_CONT " as a result of limit of %s\n", memcg_name); 1461done: 1462 1463 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n", 1464 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1465 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1466 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1467 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, " 1468 "failcnt %llu\n", 1469 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1470 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1471 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1472} 1473 1474/* 1475 * This function returns the number of memcg under hierarchy tree. Returns 1476 * 1(self count) if no children. 1477 */ 1478static int mem_cgroup_count_children(struct mem_cgroup *mem) 1479{ 1480 int num = 0; 1481 struct mem_cgroup *iter; 1482 1483 for_each_mem_cgroup_tree(iter, mem) 1484 num++; 1485 return num; 1486} 1487 1488/* 1489 * Return the memory (and swap, if configured) limit for a memcg. 1490 */ 1491u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1492{ 1493 u64 limit; 1494 u64 memsw; 1495 1496 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1497 limit += total_swap_pages << PAGE_SHIFT; 1498 1499 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1500 /* 1501 * If memsw is finite and limits the amount of swap space available 1502 * to this memcg, return that limit. 1503 */ 1504 return min(limit, memsw); 1505} 1506 1507/* 1508 * Visit the first child (need not be the first child as per the ordering 1509 * of the cgroup list, since we track last_scanned_child) of @mem and use 1510 * that to reclaim free pages from. 1511 */ 1512static struct mem_cgroup * 1513mem_cgroup_select_victim(struct mem_cgroup *root_mem) 1514{ 1515 struct mem_cgroup *ret = NULL; 1516 struct cgroup_subsys_state *css; 1517 int nextid, found; 1518 1519 if (!root_mem->use_hierarchy) { 1520 css_get(&root_mem->css); 1521 ret = root_mem; 1522 } 1523 1524 while (!ret) { 1525 rcu_read_lock(); 1526 nextid = root_mem->last_scanned_child + 1; 1527 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css, 1528 &found); 1529 if (css && css_tryget(css)) 1530 ret = container_of(css, struct mem_cgroup, css); 1531 1532 rcu_read_unlock(); 1533 /* Updates scanning parameter */ 1534 if (!css) { 1535 /* this means start scan from ID:1 */ 1536 root_mem->last_scanned_child = 0; 1537 } else 1538 root_mem->last_scanned_child = found; 1539 } 1540 1541 return ret; 1542} 1543 1544/** 1545 * test_mem_cgroup_node_reclaimable 1546 * @mem: the target memcg 1547 * @nid: the node ID to be checked. 1548 * @noswap : specify true here if the user wants flle only information. 1549 * 1550 * This function returns whether the specified memcg contains any 1551 * reclaimable pages on a node. Returns true if there are any reclaimable 1552 * pages in the node. 1553 */ 1554static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem, 1555 int nid, bool noswap) 1556{ 1557 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE)) 1558 return true; 1559 if (noswap || !total_swap_pages) 1560 return false; 1561 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON)) 1562 return true; 1563 return false; 1564 1565} 1566#if MAX_NUMNODES > 1 1567 1568/* 1569 * Always updating the nodemask is not very good - even if we have an empty 1570 * list or the wrong list here, we can start from some node and traverse all 1571 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1572 * 1573 */ 1574static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) 1575{ 1576 int nid; 1577 /* 1578 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1579 * pagein/pageout changes since the last update. 1580 */ 1581 if (!atomic_read(&mem->numainfo_events)) 1582 return; 1583 if (atomic_inc_return(&mem->numainfo_updating) > 1) 1584 return; 1585 1586 /* make a nodemask where this memcg uses memory from */ 1587 mem->scan_nodes = node_states[N_HIGH_MEMORY]; 1588 1589 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { 1590 1591 if (!test_mem_cgroup_node_reclaimable(mem, nid, false)) 1592 node_clear(nid, mem->scan_nodes); 1593 } 1594 1595 atomic_set(&mem->numainfo_events, 0); 1596 atomic_set(&mem->numainfo_updating, 0); 1597} 1598 1599/* 1600 * Selecting a node where we start reclaim from. Because what we need is just 1601 * reducing usage counter, start from anywhere is O,K. Considering 1602 * memory reclaim from current node, there are pros. and cons. 1603 * 1604 * Freeing memory from current node means freeing memory from a node which 1605 * we'll use or we've used. So, it may make LRU bad. And if several threads 1606 * hit limits, it will see a contention on a node. But freeing from remote 1607 * node means more costs for memory reclaim because of memory latency. 1608 * 1609 * Now, we use round-robin. Better algorithm is welcomed. 1610 */ 1611int mem_cgroup_select_victim_node(struct mem_cgroup *mem) 1612{ 1613 int node; 1614 1615 mem_cgroup_may_update_nodemask(mem); 1616 node = mem->last_scanned_node; 1617 1618 node = next_node(node, mem->scan_nodes); 1619 if (node == MAX_NUMNODES) 1620 node = first_node(mem->scan_nodes); 1621 /* 1622 * We call this when we hit limit, not when pages are added to LRU. 1623 * No LRU may hold pages because all pages are UNEVICTABLE or 1624 * memcg is too small and all pages are not on LRU. In that case, 1625 * we use curret node. 1626 */ 1627 if (unlikely(node == MAX_NUMNODES)) 1628 node = numa_node_id(); 1629 1630 mem->last_scanned_node = node; 1631 return node; 1632} 1633 1634/* 1635 * Check all nodes whether it contains reclaimable pages or not. 1636 * For quick scan, we make use of scan_nodes. This will allow us to skip 1637 * unused nodes. But scan_nodes is lazily updated and may not cotain 1638 * enough new information. We need to do double check. 1639 */ 1640bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) 1641{ 1642 int nid; 1643 1644 /* 1645 * quick check...making use of scan_node. 1646 * We can skip unused nodes. 1647 */ 1648 if (!nodes_empty(mem->scan_nodes)) { 1649 for (nid = first_node(mem->scan_nodes); 1650 nid < MAX_NUMNODES; 1651 nid = next_node(nid, mem->scan_nodes)) { 1652 1653 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) 1654 return true; 1655 } 1656 } 1657 /* 1658 * Check rest of nodes. 1659 */ 1660 for_each_node_state(nid, N_HIGH_MEMORY) { 1661 if (node_isset(nid, mem->scan_nodes)) 1662 continue; 1663 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) 1664 return true; 1665 } 1666 return false; 1667} 1668 1669#else 1670int mem_cgroup_select_victim_node(struct mem_cgroup *mem) 1671{ 1672 return 0; 1673} 1674 1675bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) 1676{ 1677 return test_mem_cgroup_node_reclaimable(mem, 0, noswap); 1678} 1679#endif 1680 1681static void __mem_cgroup_record_scanstat(unsigned long *stats, 1682 struct memcg_scanrecord *rec) 1683{ 1684 1685 stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1]; 1686 stats[SCAN_ANON] += rec->nr_scanned[0]; 1687 stats[SCAN_FILE] += rec->nr_scanned[1]; 1688 1689 stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1]; 1690 stats[ROTATE_ANON] += rec->nr_rotated[0]; 1691 stats[ROTATE_FILE] += rec->nr_rotated[1]; 1692 1693 stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1]; 1694 stats[FREED_ANON] += rec->nr_freed[0]; 1695 stats[FREED_FILE] += rec->nr_freed[1]; 1696 1697 stats[ELAPSED] += rec->elapsed; 1698} 1699 1700static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec) 1701{ 1702 struct mem_cgroup *mem; 1703 int context = rec->context; 1704 1705 if (context >= NR_SCAN_CONTEXT) 1706 return; 1707 1708 mem = rec->mem; 1709 spin_lock(&mem->scanstat.lock); 1710 __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec); 1711 spin_unlock(&mem->scanstat.lock); 1712 1713 mem = rec->root; 1714 spin_lock(&mem->scanstat.lock); 1715 __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec); 1716 spin_unlock(&mem->scanstat.lock); 1717} 1718 1719/* 1720 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1721 * we reclaimed from, so that we don't end up penalizing one child extensively 1722 * based on its position in the children list. 1723 * 1724 * root_mem is the original ancestor that we've been reclaim from. 1725 * 1726 * We give up and return to the caller when we visit root_mem twice. 1727 * (other groups can be removed while we're walking....) 1728 * 1729 * If shrink==true, for avoiding to free too much, this returns immedieately. 1730 */ 1731static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, 1732 struct zone *zone, 1733 gfp_t gfp_mask, 1734 unsigned long reclaim_options, 1735 unsigned long *total_scanned) 1736{ 1737 struct mem_cgroup *victim; 1738 int ret, total = 0; 1739 int loop = 0; 1740 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; 1741 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1742 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1743 struct memcg_scanrecord rec; 1744 unsigned long excess; 1745 unsigned long scanned; 1746 1747 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1748 1749 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1750 if (!check_soft && !shrink && root_mem->memsw_is_minimum) 1751 noswap = true; 1752 1753 if (shrink) 1754 rec.context = SCAN_BY_SHRINK; 1755 else if (check_soft) 1756 rec.context = SCAN_BY_SYSTEM; 1757 else 1758 rec.context = SCAN_BY_LIMIT; 1759 1760 rec.root = root_mem; 1761 1762 while (1) { 1763 victim = mem_cgroup_select_victim(root_mem); 1764 if (victim == root_mem) { 1765 loop++; 1766 /* 1767 * We are not draining per cpu cached charges during 1768 * soft limit reclaim because global reclaim doesn't 1769 * care about charges. It tries to free some memory and 1770 * charges will not give any. 1771 */ 1772 if (!check_soft && loop >= 1) 1773 drain_all_stock_async(root_mem); 1774 if (loop >= 2) { 1775 /* 1776 * If we have not been able to reclaim 1777 * anything, it might because there are 1778 * no reclaimable pages under this hierarchy 1779 */ 1780 if (!check_soft || !total) { 1781 css_put(&victim->css); 1782 break; 1783 } 1784 /* 1785 * We want to do more targeted reclaim. 1786 * excess >> 2 is not to excessive so as to 1787 * reclaim too much, nor too less that we keep 1788 * coming back to reclaim from this cgroup 1789 */ 1790 if (total >= (excess >> 2) || 1791 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) { 1792 css_put(&victim->css); 1793 break; 1794 } 1795 } 1796 } 1797 if (!mem_cgroup_reclaimable(victim, noswap)) { 1798 /* this cgroup's local usage == 0 */ 1799 css_put(&victim->css); 1800 continue; 1801 } 1802 rec.mem = victim; 1803 rec.nr_scanned[0] = 0; 1804 rec.nr_scanned[1] = 0; 1805 rec.nr_rotated[0] = 0; 1806 rec.nr_rotated[1] = 0; 1807 rec.nr_freed[0] = 0; 1808 rec.nr_freed[1] = 0; 1809 rec.elapsed = 0; 1810 /* we use swappiness of local cgroup */ 1811 if (check_soft) { 1812 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1813 noswap, zone, &rec, &scanned); 1814 *total_scanned += scanned; 1815 } else 1816 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1817 noswap, &rec); 1818 mem_cgroup_record_scanstat(&rec); 1819 css_put(&victim->css); 1820 /* 1821 * At shrinking usage, we can't check we should stop here or 1822 * reclaim more. It's depends on callers. last_scanned_child 1823 * will work enough for keeping fairness under tree. 1824 */ 1825 if (shrink) 1826 return ret; 1827 total += ret; 1828 if (check_soft) { 1829 if (!res_counter_soft_limit_excess(&root_mem->res)) 1830 return total; 1831 } else if (mem_cgroup_margin(root_mem)) 1832 return total; 1833 } 1834 return total; 1835} 1836 1837/* 1838 * Check OOM-Killer is already running under our hierarchy. 1839 * If someone is running, return false. 1840 * Has to be called with memcg_oom_lock 1841 */ 1842static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) 1843{ 1844 int lock_count = -1; 1845 struct mem_cgroup *iter, *failed = NULL; 1846 bool cond = true; 1847 1848 for_each_mem_cgroup_tree_cond(iter, mem, cond) { 1849 bool locked = iter->oom_lock; 1850 1851 iter->oom_lock = true; 1852 if (lock_count == -1) 1853 lock_count = iter->oom_lock; 1854 else if (lock_count != locked) { 1855 /* 1856 * this subtree of our hierarchy is already locked 1857 * so we cannot give a lock. 1858 */ 1859 lock_count = 0; 1860 failed = iter; 1861 cond = false; 1862 } 1863 } 1864 1865 if (!failed) 1866 goto done; 1867 1868 /* 1869 * OK, we failed to lock the whole subtree so we have to clean up 1870 * what we set up to the failing subtree 1871 */ 1872 cond = true; 1873 for_each_mem_cgroup_tree_cond(iter, mem, cond) { 1874 if (iter == failed) { 1875 cond = false; 1876 continue; 1877 } 1878 iter->oom_lock = false; 1879 } 1880done: 1881 return lock_count; 1882} 1883 1884/* 1885 * Has to be called with memcg_oom_lock 1886 */ 1887static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) 1888{ 1889 struct mem_cgroup *iter; 1890 1891 for_each_mem_cgroup_tree(iter, mem) 1892 iter->oom_lock = false; 1893 return 0; 1894} 1895 1896static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem) 1897{ 1898 struct mem_cgroup *iter; 1899 1900 for_each_mem_cgroup_tree(iter, mem) 1901 atomic_inc(&iter->under_oom); 1902} 1903 1904static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem) 1905{ 1906 struct mem_cgroup *iter; 1907 1908 /* 1909 * When a new child is created while the hierarchy is under oom, 1910 * mem_cgroup_oom_lock() may not be called. We have to use 1911 * atomic_add_unless() here. 1912 */ 1913 for_each_mem_cgroup_tree(iter, mem) 1914 atomic_add_unless(&iter->under_oom, -1, 0); 1915} 1916 1917static DEFINE_SPINLOCK(memcg_oom_lock); 1918static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1919 1920struct oom_wait_info { 1921 struct mem_cgroup *mem; 1922 wait_queue_t wait; 1923}; 1924 1925static int memcg_oom_wake_function(wait_queue_t *wait, 1926 unsigned mode, int sync, void *arg) 1927{ 1928 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg, 1929 *oom_wait_mem; 1930 struct oom_wait_info *oom_wait_info; 1931 1932 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1933 oom_wait_mem = oom_wait_info->mem; 1934 1935 /* 1936 * Both of oom_wait_info->mem and wake_mem are stable under us. 1937 * Then we can use css_is_ancestor without taking care of RCU. 1938 */ 1939 if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem) 1940 && !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem)) 1941 return 0; 1942 return autoremove_wake_function(wait, mode, sync, arg); 1943} 1944 1945static void memcg_wakeup_oom(struct mem_cgroup *mem) 1946{ 1947 /* for filtering, pass "mem" as argument. */ 1948 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem); 1949} 1950 1951static void memcg_oom_recover(struct mem_cgroup *mem) 1952{ 1953 if (mem && atomic_read(&mem->under_oom)) 1954 memcg_wakeup_oom(mem); 1955} 1956 1957/* 1958 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1959 */ 1960bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) 1961{ 1962 struct oom_wait_info owait; 1963 bool locked, need_to_kill; 1964 1965 owait.mem = mem; 1966 owait.wait.flags = 0; 1967 owait.wait.func = memcg_oom_wake_function; 1968 owait.wait.private = current; 1969 INIT_LIST_HEAD(&owait.wait.task_list); 1970 need_to_kill = true; 1971 mem_cgroup_mark_under_oom(mem); 1972 1973 /* At first, try to OOM lock hierarchy under mem.*/ 1974 spin_lock(&memcg_oom_lock); 1975 locked = mem_cgroup_oom_lock(mem); 1976 /* 1977 * Even if signal_pending(), we can't quit charge() loop without 1978 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 1979 * under OOM is always welcomed, use TASK_KILLABLE here. 1980 */ 1981 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1982 if (!locked || mem->oom_kill_disable) 1983 need_to_kill = false; 1984 if (locked) 1985 mem_cgroup_oom_notify(mem); 1986 spin_unlock(&memcg_oom_lock); 1987 1988 if (need_to_kill) { 1989 finish_wait(&memcg_oom_waitq, &owait.wait); 1990 mem_cgroup_out_of_memory(mem, mask); 1991 } else { 1992 schedule(); 1993 finish_wait(&memcg_oom_waitq, &owait.wait); 1994 } 1995 spin_lock(&memcg_oom_lock); 1996 if (locked) 1997 mem_cgroup_oom_unlock(mem); 1998 memcg_wakeup_oom(mem); 1999 spin_unlock(&memcg_oom_lock); 2000 2001 mem_cgroup_unmark_under_oom(mem); 2002 2003 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 2004 return false; 2005 /* Give chance to dying process */ 2006 schedule_timeout(1); 2007 return true; 2008} 2009 2010/* 2011 * Currently used to update mapped file statistics, but the routine can be 2012 * generalized to update other statistics as well. 2013 * 2014 * Notes: Race condition 2015 * 2016 * We usually use page_cgroup_lock() for accessing page_cgroup member but 2017 * it tends to be costly. But considering some conditions, we doesn't need 2018 * to do so _always_. 2019 * 2020 * Considering "charge", lock_page_cgroup() is not required because all 2021 * file-stat operations happen after a page is attached to radix-tree. There 2022 * are no race with "charge". 2023 * 2024 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 2025 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 2026 * if there are race with "uncharge". Statistics itself is properly handled 2027 * by flags. 2028 * 2029 * Considering "move", this is an only case we see a race. To make the race 2030 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are 2031 * possibility of race condition. If there is, we take a lock. 2032 */ 2033 2034void mem_cgroup_update_page_stat(struct page *page, 2035 enum mem_cgroup_page_stat_item idx, int val) 2036{ 2037 struct mem_cgroup *mem; 2038 struct page_cgroup *pc = lookup_page_cgroup(page); 2039 bool need_unlock = false; 2040 unsigned long uninitialized_var(flags); 2041 2042 if (unlikely(!pc)) 2043 return; 2044 2045 rcu_read_lock(); 2046 mem = pc->mem_cgroup; 2047 if (unlikely(!mem || !PageCgroupUsed(pc))) 2048 goto out; 2049 /* pc->mem_cgroup is unstable ? */ 2050 if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) { 2051 /* take a lock against to access pc->mem_cgroup */ 2052 move_lock_page_cgroup(pc, &flags); 2053 need_unlock = true; 2054 mem = pc->mem_cgroup; 2055 if (!mem || !PageCgroupUsed(pc)) 2056 goto out; 2057 } 2058 2059 switch (idx) { 2060 case MEMCG_NR_FILE_MAPPED: 2061 if (val > 0) 2062 SetPageCgroupFileMapped(pc); 2063 else if (!page_mapped(page)) 2064 ClearPageCgroupFileMapped(pc); 2065 idx = MEM_CGROUP_STAT_FILE_MAPPED; 2066 break; 2067 default: 2068 BUG(); 2069 } 2070 2071 this_cpu_add(mem->stat->count[idx], val); 2072 2073out: 2074 if (unlikely(need_unlock)) 2075 move_unlock_page_cgroup(pc, &flags); 2076 rcu_read_unlock(); 2077 return; 2078} 2079EXPORT_SYMBOL(mem_cgroup_update_page_stat); 2080 2081/* 2082 * size of first charge trial. "32" comes from vmscan.c's magic value. 2083 * TODO: maybe necessary to use big numbers in big irons. 2084 */ 2085#define CHARGE_BATCH 32U 2086struct memcg_stock_pcp { 2087 struct mem_cgroup *cached; /* this never be root cgroup */ 2088 unsigned int nr_pages; 2089 struct work_struct work; 2090 unsigned long flags; 2091#define FLUSHING_CACHED_CHARGE (0) 2092}; 2093static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2094static DEFINE_MUTEX(percpu_charge_mutex); 2095 2096/* 2097 * Try to consume stocked charge on this cpu. If success, one page is consumed 2098 * from local stock and true is returned. If the stock is 0 or charges from a 2099 * cgroup which is not current target, returns false. This stock will be 2100 * refilled. 2101 */ 2102static bool consume_stock(struct mem_cgroup *mem) 2103{ 2104 struct memcg_stock_pcp *stock; 2105 bool ret = true; 2106 2107 stock = &get_cpu_var(memcg_stock); 2108 if (mem == stock->cached && stock->nr_pages) 2109 stock->nr_pages--; 2110 else /* need to call res_counter_charge */ 2111 ret = false; 2112 put_cpu_var(memcg_stock); 2113 return ret; 2114} 2115 2116/* 2117 * Returns stocks cached in percpu to res_counter and reset cached information. 2118 */ 2119static void drain_stock(struct memcg_stock_pcp *stock) 2120{ 2121 struct mem_cgroup *old = stock->cached; 2122 2123 if (stock->nr_pages) { 2124 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 2125 2126 res_counter_uncharge(&old->res, bytes); 2127 if (do_swap_account) 2128 res_counter_uncharge(&old->memsw, bytes); 2129 stock->nr_pages = 0; 2130 } 2131 stock->cached = NULL; 2132} 2133 2134/* 2135 * This must be called under preempt disabled or must be called by 2136 * a thread which is pinned to local cpu. 2137 */ 2138static void drain_local_stock(struct work_struct *dummy) 2139{ 2140 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 2141 drain_stock(stock); 2142 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2143} 2144 2145/* 2146 * Cache charges(val) which is from res_counter, to local per_cpu area. 2147 * This will be consumed by consume_stock() function, later. 2148 */ 2149static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages) 2150{ 2151 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2152 2153 if (stock->cached != mem) { /* reset if necessary */ 2154 drain_stock(stock); 2155 stock->cached = mem; 2156 } 2157 stock->nr_pages += nr_pages; 2158 put_cpu_var(memcg_stock); 2159} 2160 2161/* 2162 * Drains all per-CPU charge caches for given root_mem resp. subtree 2163 * of the hierarchy under it. sync flag says whether we should block 2164 * until the work is done. 2165 */ 2166static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) 2167{ 2168 int cpu, curcpu; 2169 2170 /* Notify other cpus that system-wide "drain" is running */ 2171 get_online_cpus(); 2172 /* 2173 * Get a hint for avoiding draining charges on the current cpu, 2174 * which must be exhausted by our charging. It is not required that 2175 * this be a precise check, so we use raw_smp_processor_id() instead of 2176 * getcpu()/putcpu(). 2177 */ 2178 curcpu = raw_smp_processor_id(); 2179 for_each_online_cpu(cpu) { 2180 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2181 struct mem_cgroup *mem; 2182 2183 mem = stock->cached; 2184 if (!mem || !stock->nr_pages) 2185 continue; 2186 if (!mem_cgroup_same_or_subtree(root_mem, mem)) 2187 continue; 2188 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2189 if (cpu == curcpu) 2190 drain_local_stock(&stock->work); 2191 else 2192 schedule_work_on(cpu, &stock->work); 2193 } 2194 } 2195 2196 if (!sync) 2197 goto out; 2198 2199 for_each_online_cpu(cpu) { 2200 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2201 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2202 flush_work(&stock->work); 2203 } 2204out: 2205 put_online_cpus(); 2206} 2207 2208/* 2209 * Tries to drain stocked charges in other cpus. This function is asynchronous 2210 * and just put a work per cpu for draining localy on each cpu. Caller can 2211 * expects some charges will be back to res_counter later but cannot wait for 2212 * it. 2213 */ 2214static void drain_all_stock_async(struct mem_cgroup *root_mem) 2215{ 2216 /* 2217 * If someone calls draining, avoid adding more kworker runs. 2218 */ 2219 if (!mutex_trylock(&percpu_charge_mutex)) 2220 return; 2221 drain_all_stock(root_mem, false); 2222 mutex_unlock(&percpu_charge_mutex); 2223} 2224 2225/* This is a synchronous drain interface. */ 2226static void drain_all_stock_sync(struct mem_cgroup *root_mem) 2227{ 2228 /* called when force_empty is called */ 2229 mutex_lock(&percpu_charge_mutex); 2230 drain_all_stock(root_mem, true); 2231 mutex_unlock(&percpu_charge_mutex); 2232} 2233 2234/* 2235 * This function drains percpu counter value from DEAD cpu and 2236 * move it to local cpu. Note that this function can be preempted. 2237 */ 2238static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) 2239{ 2240 int i; 2241 2242 spin_lock(&mem->pcp_counter_lock); 2243 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { 2244 long x = per_cpu(mem->stat->count[i], cpu); 2245 2246 per_cpu(mem->stat->count[i], cpu) = 0; 2247 mem->nocpu_base.count[i] += x; 2248 } 2249 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2250 unsigned long x = per_cpu(mem->stat->events[i], cpu); 2251 2252 per_cpu(mem->stat->events[i], cpu) = 0; 2253 mem->nocpu_base.events[i] += x; 2254 } 2255 /* need to clear ON_MOVE value, works as a kind of lock. */ 2256 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; 2257 spin_unlock(&mem->pcp_counter_lock); 2258} 2259 2260static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) 2261{ 2262 int idx = MEM_CGROUP_ON_MOVE; 2263 2264 spin_lock(&mem->pcp_counter_lock); 2265 per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; 2266 spin_unlock(&mem->pcp_counter_lock); 2267} 2268 2269static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 2270 unsigned long action, 2271 void *hcpu) 2272{ 2273 int cpu = (unsigned long)hcpu; 2274 struct memcg_stock_pcp *stock; 2275 struct mem_cgroup *iter; 2276 2277 if ((action == CPU_ONLINE)) { 2278 for_each_mem_cgroup_all(iter) 2279 synchronize_mem_cgroup_on_move(iter, cpu); 2280 return NOTIFY_OK; 2281 } 2282 2283 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) 2284 return NOTIFY_OK; 2285 2286 for_each_mem_cgroup_all(iter) 2287 mem_cgroup_drain_pcp_counter(iter, cpu); 2288 2289 stock = &per_cpu(memcg_stock, cpu); 2290 drain_stock(stock); 2291 return NOTIFY_OK; 2292} 2293 2294 2295/* See __mem_cgroup_try_charge() for details */ 2296enum { 2297 CHARGE_OK, /* success */ 2298 CHARGE_RETRY, /* need to retry but retry is not bad */ 2299 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 2300 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 2301 CHARGE_OOM_DIE, /* the current is killed because of OOM */ 2302}; 2303 2304static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, 2305 unsigned int nr_pages, bool oom_check) 2306{ 2307 unsigned long csize = nr_pages * PAGE_SIZE; 2308 struct mem_cgroup *mem_over_limit; 2309 struct res_counter *fail_res; 2310 unsigned long flags = 0; 2311 int ret; 2312 2313 ret = res_counter_charge(&mem->res, csize, &fail_res); 2314 2315 if (likely(!ret)) { 2316 if (!do_swap_account) 2317 return CHARGE_OK; 2318 ret = res_counter_charge(&mem->memsw, csize, &fail_res); 2319 if (likely(!ret)) 2320 return CHARGE_OK; 2321 2322 res_counter_uncharge(&mem->res, csize); 2323 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 2324 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 2325 } else 2326 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2327 /* 2328 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch 2329 * of regular pages (CHARGE_BATCH), or a single regular page (1). 2330 * 2331 * Never reclaim on behalf of optional batching, retry with a 2332 * single page instead. 2333 */ 2334 if (nr_pages == CHARGE_BATCH) 2335 return CHARGE_RETRY; 2336 2337 if (!(gfp_mask & __GFP_WAIT)) 2338 return CHARGE_WOULDBLOCK; 2339 2340 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, 2341 gfp_mask, flags, NULL); 2342 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2343 return CHARGE_RETRY; 2344 /* 2345 * Even though the limit is exceeded at this point, reclaim 2346 * may have been able to free some pages. Retry the charge 2347 * before killing the task. 2348 * 2349 * Only for regular pages, though: huge pages are rather 2350 * unlikely to succeed so close to the limit, and we fall back 2351 * to regular pages anyway in case of failure. 2352 */ 2353 if (nr_pages == 1 && ret) 2354 return CHARGE_RETRY; 2355 2356 /* 2357 * At task move, charge accounts can be doubly counted. So, it's 2358 * better to wait until the end of task_move if something is going on. 2359 */ 2360 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2361 return CHARGE_RETRY; 2362 2363 /* If we don't need to call oom-killer at el, return immediately */ 2364 if (!oom_check) 2365 return CHARGE_NOMEM; 2366 /* check OOM */ 2367 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) 2368 return CHARGE_OOM_DIE; 2369 2370 return CHARGE_RETRY; 2371} 2372 2373/* 2374 * Unlike exported interface, "oom" parameter is added. if oom==true, 2375 * oom-killer can be invoked. 2376 */ 2377static int __mem_cgroup_try_charge(struct mm_struct *mm, 2378 gfp_t gfp_mask, 2379 unsigned int nr_pages, 2380 struct mem_cgroup **memcg, 2381 bool oom) 2382{ 2383 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2384 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2385 struct mem_cgroup *mem = NULL; 2386 int ret; 2387 2388 /* 2389 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 2390 * in system level. So, allow to go ahead dying process in addition to 2391 * MEMDIE process. 2392 */ 2393 if (unlikely(test_thread_flag(TIF_MEMDIE) 2394 || fatal_signal_pending(current))) 2395 goto bypass; 2396 2397 /* 2398 * We always charge the cgroup the mm_struct belongs to. 2399 * The mm_struct's mem_cgroup changes on task migration if the 2400 * thread group leader migrates. It's possible that mm is not 2401 * set, if so charge the init_mm (happens for pagecache usage). 2402 */ 2403 if (!*memcg && !mm) 2404 goto bypass; 2405again: 2406 if (*memcg) { /* css should be a valid one */ 2407 mem = *memcg; 2408 VM_BUG_ON(css_is_removed(&mem->css)); 2409 if (mem_cgroup_is_root(mem)) 2410 goto done; 2411 if (nr_pages == 1 && consume_stock(mem)) 2412 goto done; 2413 css_get(&mem->css); 2414 } else { 2415 struct task_struct *p; 2416 2417 rcu_read_lock(); 2418 p = rcu_dereference(mm->owner); 2419 /* 2420 * Because we don't have task_lock(), "p" can exit. 2421 * In that case, "mem" can point to root or p can be NULL with 2422 * race with swapoff. Then, we have small risk of mis-accouning. 2423 * But such kind of mis-account by race always happens because 2424 * we don't have cgroup_mutex(). It's overkill and we allo that 2425 * small race, here. 2426 * (*) swapoff at el will charge against mm-struct not against 2427 * task-struct. So, mm->owner can be NULL. 2428 */ 2429 mem = mem_cgroup_from_task(p); 2430 if (!mem || mem_cgroup_is_root(mem)) { 2431 rcu_read_unlock(); 2432 goto done; 2433 } 2434 if (nr_pages == 1 && consume_stock(mem)) { 2435 /* 2436 * It seems dagerous to access memcg without css_get(). 2437 * But considering how consume_stok works, it's not 2438 * necessary. If consume_stock success, some charges 2439 * from this memcg are cached on this cpu. So, we 2440 * don't need to call css_get()/css_tryget() before 2441 * calling consume_stock(). 2442 */ 2443 rcu_read_unlock(); 2444 goto done; 2445 } 2446 /* after here, we may be blocked. we need to get refcnt */ 2447 if (!css_tryget(&mem->css)) { 2448 rcu_read_unlock(); 2449 goto again; 2450 } 2451 rcu_read_unlock(); 2452 } 2453 2454 do { 2455 bool oom_check; 2456 2457 /* If killed, bypass charge */ 2458 if (fatal_signal_pending(current)) { 2459 css_put(&mem->css); 2460 goto bypass; 2461 } 2462 2463 oom_check = false; 2464 if (oom && !nr_oom_retries) { 2465 oom_check = true; 2466 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2467 } 2468 2469 ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check); 2470 switch (ret) { 2471 case CHARGE_OK: 2472 break; 2473 case CHARGE_RETRY: /* not in OOM situation but retry */ 2474 batch = nr_pages; 2475 css_put(&mem->css); 2476 mem = NULL; 2477 goto again; 2478 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2479 css_put(&mem->css); 2480 goto nomem; 2481 case CHARGE_NOMEM: /* OOM routine works */ 2482 if (!oom) { 2483 css_put(&mem->css); 2484 goto nomem; 2485 } 2486 /* If oom, we never return -ENOMEM */ 2487 nr_oom_retries--; 2488 break; 2489 case CHARGE_OOM_DIE: /* Killed by OOM Killer */ 2490 css_put(&mem->css); 2491 goto bypass; 2492 } 2493 } while (ret != CHARGE_OK); 2494 2495 if (batch > nr_pages) 2496 refill_stock(mem, batch - nr_pages); 2497 css_put(&mem->css); 2498done: 2499 *memcg = mem; 2500 return 0; 2501nomem: 2502 *memcg = NULL; 2503 return -ENOMEM; 2504bypass: 2505 *memcg = NULL; 2506 return 0; 2507} 2508 2509/* 2510 * Somemtimes we have to undo a charge we got by try_charge(). 2511 * This function is for that and do uncharge, put css's refcnt. 2512 * gotten by try_charge(). 2513 */ 2514static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, 2515 unsigned int nr_pages) 2516{ 2517 if (!mem_cgroup_is_root(mem)) { 2518 unsigned long bytes = nr_pages * PAGE_SIZE; 2519 2520 res_counter_uncharge(&mem->res, bytes); 2521 if (do_swap_account) 2522 res_counter_uncharge(&mem->memsw, bytes); 2523 } 2524} 2525 2526/* 2527 * A helper function to get mem_cgroup from ID. must be called under 2528 * rcu_read_lock(). The caller must check css_is_removed() or some if 2529 * it's concern. (dropping refcnt from swap can be called against removed 2530 * memcg.) 2531 */ 2532static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2533{ 2534 struct cgroup_subsys_state *css; 2535 2536 /* ID 0 is unused ID */ 2537 if (!id) 2538 return NULL; 2539 css = css_lookup(&mem_cgroup_subsys, id); 2540 if (!css) 2541 return NULL; 2542 return container_of(css, struct mem_cgroup, css); 2543} 2544 2545struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2546{ 2547 struct mem_cgroup *mem = NULL; 2548 struct page_cgroup *pc; 2549 unsigned short id; 2550 swp_entry_t ent; 2551 2552 VM_BUG_ON(!PageLocked(page)); 2553 2554 pc = lookup_page_cgroup(page); 2555 lock_page_cgroup(pc); 2556 if (PageCgroupUsed(pc)) { 2557 mem = pc->mem_cgroup; 2558 if (mem && !css_tryget(&mem->css)) 2559 mem = NULL; 2560 } else if (PageSwapCache(page)) { 2561 ent.val = page_private(page); 2562 id = lookup_swap_cgroup(ent); 2563 rcu_read_lock(); 2564 mem = mem_cgroup_lookup(id); 2565 if (mem && !css_tryget(&mem->css)) 2566 mem = NULL; 2567 rcu_read_unlock(); 2568 } 2569 unlock_page_cgroup(pc); 2570 return mem; 2571} 2572 2573static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, 2574 struct page *page, 2575 unsigned int nr_pages, 2576 struct page_cgroup *pc, 2577 enum charge_type ctype) 2578{ 2579 lock_page_cgroup(pc); 2580 if (unlikely(PageCgroupUsed(pc))) { 2581 unlock_page_cgroup(pc); 2582 __mem_cgroup_cancel_charge(mem, nr_pages); 2583 return; 2584 } 2585 /* 2586 * we don't need page_cgroup_lock about tail pages, becase they are not 2587 * accessed by any other context at this point. 2588 */ 2589 pc->mem_cgroup = mem; 2590 /* 2591 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2592 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2593 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2594 * before USED bit, we need memory barrier here. 2595 * See mem_cgroup_add_lru_list(), etc. 2596 */ 2597 smp_wmb(); 2598 switch (ctype) { 2599 case MEM_CGROUP_CHARGE_TYPE_CACHE: 2600 case MEM_CGROUP_CHARGE_TYPE_SHMEM: 2601 SetPageCgroupCache(pc); 2602 SetPageCgroupUsed(pc); 2603 break; 2604 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2605 ClearPageCgroupCache(pc); 2606 SetPageCgroupUsed(pc); 2607 break; 2608 default: 2609 break; 2610 } 2611 2612 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages); 2613 unlock_page_cgroup(pc); 2614 /* 2615 * "charge_statistics" updated event counter. Then, check it. 2616 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2617 * if they exceeds softlimit. 2618 */ 2619 memcg_check_events(mem, page); 2620} 2621 2622#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2623 2624#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ 2625 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) 2626/* 2627 * Because tail pages are not marked as "used", set it. We're under 2628 * zone->lru_lock, 'splitting on pmd' and compund_lock. 2629 */ 2630void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) 2631{ 2632 struct page_cgroup *head_pc = lookup_page_cgroup(head); 2633 struct page_cgroup *tail_pc = lookup_page_cgroup(tail); 2634 unsigned long flags; 2635 2636 if (mem_cgroup_disabled()) 2637 return; 2638 /* 2639 * We have no races with charge/uncharge but will have races with 2640 * page state accounting. 2641 */ 2642 move_lock_page_cgroup(head_pc, &flags); 2643 2644 tail_pc->mem_cgroup = head_pc->mem_cgroup; 2645 smp_wmb(); /* see __commit_charge() */ 2646 if (PageCgroupAcctLRU(head_pc)) { 2647 enum lru_list lru; 2648 struct mem_cgroup_per_zone *mz; 2649 2650 /* 2651 * LRU flags cannot be copied because we need to add tail 2652 *.page to LRU by generic call and our hook will be called. 2653 * We hold lru_lock, then, reduce counter directly. 2654 */ 2655 lru = page_lru(head); 2656 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); 2657 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 2658 } 2659 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 2660 move_unlock_page_cgroup(head_pc, &flags); 2661} 2662#endif 2663 2664/** 2665 * mem_cgroup_move_account - move account of the page 2666 * @page: the page 2667 * @nr_pages: number of regular pages (>1 for huge pages) 2668 * @pc: page_cgroup of the page. 2669 * @from: mem_cgroup which the page is moved from. 2670 * @to: mem_cgroup which the page is moved to. @from != @to. 2671 * @uncharge: whether we should call uncharge and css_put against @from. 2672 * 2673 * The caller must confirm following. 2674 * - page is not on LRU (isolate_page() is useful.) 2675 * - compound_lock is held when nr_pages > 1 2676 * 2677 * This function doesn't do "charge" nor css_get to new cgroup. It should be 2678 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is 2679 * true, this function does "uncharge" from old cgroup, but it doesn't if 2680 * @uncharge is false, so a caller should do "uncharge". 2681 */ 2682static int mem_cgroup_move_account(struct page *page, 2683 unsigned int nr_pages, 2684 struct page_cgroup *pc, 2685 struct mem_cgroup *from, 2686 struct mem_cgroup *to, 2687 bool uncharge) 2688{ 2689 unsigned long flags; 2690 int ret; 2691 2692 VM_BUG_ON(from == to); 2693 VM_BUG_ON(PageLRU(page)); 2694 /* 2695 * The page is isolated from LRU. So, collapse function 2696 * will not handle this page. But page splitting can happen. 2697 * Do this check under compound_page_lock(). The caller should 2698 * hold it. 2699 */ 2700 ret = -EBUSY; 2701 if (nr_pages > 1 && !PageTransHuge(page)) 2702 goto out; 2703 2704 lock_page_cgroup(pc); 2705 2706 ret = -EINVAL; 2707 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 2708 goto unlock; 2709 2710 move_lock_page_cgroup(pc, &flags); 2711 2712 if (PageCgroupFileMapped(pc)) { 2713 /* Update mapped_file data for mem_cgroup */ 2714 preempt_disable(); 2715 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2716 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2717 preempt_enable(); 2718 } 2719 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); 2720 if (uncharge) 2721 /* This is not "cancel", but cancel_charge does all we need. */ 2722 __mem_cgroup_cancel_charge(from, nr_pages); 2723 2724 /* caller should have done css_get */ 2725 pc->mem_cgroup = to; 2726 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); 2727 /* 2728 * We charges against "to" which may not have any tasks. Then, "to" 2729 * can be under rmdir(). But in current implementation, caller of 2730 * this function is just force_empty() and move charge, so it's 2731 * guaranteed that "to" is never removed. So, we don't check rmdir 2732 * status here. 2733 */ 2734 move_unlock_page_cgroup(pc, &flags); 2735 ret = 0; 2736unlock: 2737 unlock_page_cgroup(pc); 2738 /* 2739 * check events 2740 */ 2741 memcg_check_events(to, page); 2742 memcg_check_events(from, page); 2743out: 2744 return ret; 2745} 2746 2747/* 2748 * move charges to its parent. 2749 */ 2750 2751static int mem_cgroup_move_parent(struct page *page, 2752 struct page_cgroup *pc, 2753 struct mem_cgroup *child, 2754 gfp_t gfp_mask) 2755{ 2756 struct cgroup *cg = child->css.cgroup; 2757 struct cgroup *pcg = cg->parent; 2758 struct mem_cgroup *parent; 2759 unsigned int nr_pages; 2760 unsigned long uninitialized_var(flags); 2761 int ret; 2762 2763 /* Is ROOT ? */ 2764 if (!pcg) 2765 return -EINVAL; 2766 2767 ret = -EBUSY; 2768 if (!get_page_unless_zero(page)) 2769 goto out; 2770 if (isolate_lru_page(page)) 2771 goto put; 2772 2773 nr_pages = hpage_nr_pages(page); 2774 2775 parent = mem_cgroup_from_cont(pcg); 2776 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); 2777 if (ret || !parent) 2778 goto put_back; 2779 2780 if (nr_pages > 1) 2781 flags = compound_lock_irqsave(page); 2782 2783 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); 2784 if (ret) 2785 __mem_cgroup_cancel_charge(parent, nr_pages); 2786 2787 if (nr_pages > 1) 2788 compound_unlock_irqrestore(page, flags); 2789put_back: 2790 putback_lru_page(page); 2791put: 2792 put_page(page); 2793out: 2794 return ret; 2795} 2796 2797/* 2798 * Charge the memory controller for page usage. 2799 * Return 2800 * 0 if the charge was successful 2801 * < 0 if the cgroup is over its limit 2802 */ 2803static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 2804 gfp_t gfp_mask, enum charge_type ctype) 2805{ 2806 struct mem_cgroup *mem = NULL; 2807 unsigned int nr_pages = 1; 2808 struct page_cgroup *pc; 2809 bool oom = true; 2810 int ret; 2811 2812 if (PageTransHuge(page)) { 2813 nr_pages <<= compound_order(page); 2814 VM_BUG_ON(!PageTransHuge(page)); 2815 /* 2816 * Never OOM-kill a process for a huge page. The 2817 * fault handler will fall back to regular pages. 2818 */ 2819 oom = false; 2820 } 2821 2822 pc = lookup_page_cgroup(page); 2823 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */ 2824 2825 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom); 2826 if (ret || !mem) 2827 return ret; 2828 2829 __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype); 2830 return 0; 2831} 2832 2833int mem_cgroup_newpage_charge(struct page *page, 2834 struct mm_struct *mm, gfp_t gfp_mask) 2835{ 2836 if (mem_cgroup_disabled()) 2837 return 0; 2838 /* 2839 * If already mapped, we don't have to account. 2840 * If page cache, page->mapping has address_space. 2841 * But page->mapping may have out-of-use anon_vma pointer, 2842 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping 2843 * is NULL. 2844 */ 2845 if (page_mapped(page) || (page->mapping && !PageAnon(page))) 2846 return 0; 2847 if (unlikely(!mm)) 2848 mm = &init_mm; 2849 return mem_cgroup_charge_common(page, mm, gfp_mask, 2850 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2851} 2852 2853static void 2854__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2855 enum charge_type ctype); 2856 2857static void 2858__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem, 2859 enum charge_type ctype) 2860{ 2861 struct page_cgroup *pc = lookup_page_cgroup(page); 2862 /* 2863 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page 2864 * is already on LRU. It means the page may on some other page_cgroup's 2865 * LRU. Take care of it. 2866 */ 2867 mem_cgroup_lru_del_before_commit(page); 2868 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); 2869 mem_cgroup_lru_add_after_commit(page); 2870 return; 2871} 2872 2873int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2874 gfp_t gfp_mask) 2875{ 2876 struct mem_cgroup *mem = NULL; 2877 int ret; 2878 2879 if (mem_cgroup_disabled()) 2880 return 0; 2881 if (PageCompound(page)) 2882 return 0; 2883 2884 if (unlikely(!mm)) 2885 mm = &init_mm; 2886 2887 if (page_is_file_cache(page)) { 2888 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true); 2889 if (ret || !mem) 2890 return ret; 2891 2892 /* 2893 * FUSE reuses pages without going through the final 2894 * put that would remove them from the LRU list, make 2895 * sure that they get relinked properly. 2896 */ 2897 __mem_cgroup_commit_charge_lrucare(page, mem, 2898 MEM_CGROUP_CHARGE_TYPE_CACHE); 2899 return ret; 2900 } 2901 /* shmem */ 2902 if (PageSwapCache(page)) { 2903 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); 2904 if (!ret) 2905 __mem_cgroup_commit_charge_swapin(page, mem, 2906 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2907 } else 2908 ret = mem_cgroup_charge_common(page, mm, gfp_mask, 2909 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2910 2911 return ret; 2912} 2913 2914/* 2915 * While swap-in, try_charge -> commit or cancel, the page is locked. 2916 * And when try_charge() successfully returns, one refcnt to memcg without 2917 * struct page_cgroup is acquired. This refcnt will be consumed by 2918 * "commit()" or removed by "cancel()" 2919 */ 2920int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 2921 struct page *page, 2922 gfp_t mask, struct mem_cgroup **ptr) 2923{ 2924 struct mem_cgroup *mem; 2925 int ret; 2926 2927 *ptr = NULL; 2928 2929 if (mem_cgroup_disabled()) 2930 return 0; 2931 2932 if (!do_swap_account) 2933 goto charge_cur_mm; 2934 /* 2935 * A racing thread's fault, or swapoff, may have already updated 2936 * the pte, and even removed page from swap cache: in those cases 2937 * do_swap_page()'s pte_same() test will fail; but there's also a 2938 * KSM case which does need to charge the page. 2939 */ 2940 if (!PageSwapCache(page)) 2941 goto charge_cur_mm; 2942 mem = try_get_mem_cgroup_from_page(page); 2943 if (!mem) 2944 goto charge_cur_mm; 2945 *ptr = mem; 2946 ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true); 2947 css_put(&mem->css); 2948 return ret; 2949charge_cur_mm: 2950 if (unlikely(!mm)) 2951 mm = &init_mm; 2952 return __mem_cgroup_try_charge(mm, mask, 1, ptr, true); 2953} 2954 2955static void 2956__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2957 enum charge_type ctype) 2958{ 2959 if (mem_cgroup_disabled()) 2960 return; 2961 if (!ptr) 2962 return; 2963 cgroup_exclude_rmdir(&ptr->css); 2964 2965 __mem_cgroup_commit_charge_lrucare(page, ptr, ctype); 2966 /* 2967 * Now swap is on-memory. This means this page may be 2968 * counted both as mem and swap....double count. 2969 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 2970 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 2971 * may call delete_from_swap_cache() before reach here. 2972 */ 2973 if (do_swap_account && PageSwapCache(page)) { 2974 swp_entry_t ent = {.val = page_private(page)}; 2975 unsigned short id; 2976 struct mem_cgroup *memcg; 2977 2978 id = swap_cgroup_record(ent, 0); 2979 rcu_read_lock(); 2980 memcg = mem_cgroup_lookup(id); 2981 if (memcg) { 2982 /* 2983 * This recorded memcg can be obsolete one. So, avoid 2984 * calling css_tryget 2985 */ 2986 if (!mem_cgroup_is_root(memcg)) 2987 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 2988 mem_cgroup_swap_statistics(memcg, false); 2989 mem_cgroup_put(memcg); 2990 } 2991 rcu_read_unlock(); 2992 } 2993 /* 2994 * At swapin, we may charge account against cgroup which has no tasks. 2995 * So, rmdir()->pre_destroy() can be called while we do this charge. 2996 * In that case, we need to call pre_destroy() again. check it here. 2997 */ 2998 cgroup_release_and_wakeup_rmdir(&ptr->css); 2999} 3000 3001void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) 3002{ 3003 __mem_cgroup_commit_charge_swapin(page, ptr, 3004 MEM_CGROUP_CHARGE_TYPE_MAPPED); 3005} 3006 3007void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) 3008{ 3009 if (mem_cgroup_disabled()) 3010 return; 3011 if (!mem) 3012 return; 3013 __mem_cgroup_cancel_charge(mem, 1); 3014} 3015 3016static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, 3017 unsigned int nr_pages, 3018 const enum charge_type ctype) 3019{ 3020 struct memcg_batch_info *batch = NULL; 3021 bool uncharge_memsw = true; 3022 3023 /* If swapout, usage of swap doesn't decrease */ 3024 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 3025 uncharge_memsw = false; 3026 3027 batch = ¤t->memcg_batch; 3028 /* 3029 * In usual, we do css_get() when we remember memcg pointer. 3030 * But in this case, we keep res->usage until end of a series of 3031 * uncharges. Then, it's ok to ignore memcg's refcnt. 3032 */ 3033 if (!batch->memcg) 3034 batch->memcg = mem; 3035 /* 3036 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 3037 * In those cases, all pages freed continuously can be expected to be in 3038 * the same cgroup and we have chance to coalesce uncharges. 3039 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 3040 * because we want to do uncharge as soon as possible. 3041 */ 3042 3043 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 3044 goto direct_uncharge; 3045 3046 if (nr_pages > 1) 3047 goto direct_uncharge; 3048 3049 /* 3050 * In typical case, batch->memcg == mem. This means we can 3051 * merge a series of uncharges to an uncharge of res_counter. 3052 * If not, we uncharge res_counter ony by one. 3053 */ 3054 if (batch->memcg != mem) 3055 goto direct_uncharge; 3056 /* remember freed charge and uncharge it later */ 3057 batch->nr_pages++; 3058 if (uncharge_memsw) 3059 batch->memsw_nr_pages++; 3060 return; 3061direct_uncharge: 3062 res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE); 3063 if (uncharge_memsw) 3064 res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE); 3065 if (unlikely(batch->memcg != mem)) 3066 memcg_oom_recover(mem); 3067 return; 3068} 3069 3070/* 3071 * uncharge if !page_mapped(page) 3072 */ 3073static struct mem_cgroup * 3074__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 3075{ 3076 struct mem_cgroup *mem = NULL; 3077 unsigned int nr_pages = 1; 3078 struct page_cgroup *pc; 3079 3080 if (mem_cgroup_disabled()) 3081 return NULL; 3082 3083 if (PageSwapCache(page)) 3084 return NULL; 3085 3086 if (PageTransHuge(page)) { 3087 nr_pages <<= compound_order(page); 3088 VM_BUG_ON(!PageTransHuge(page)); 3089 } 3090 /* 3091 * Check if our page_cgroup is valid 3092 */ 3093 pc = lookup_page_cgroup(page); 3094 if (unlikely(!pc || !PageCgroupUsed(pc))) 3095 return NULL; 3096 3097 lock_page_cgroup(pc); 3098 3099 mem = pc->mem_cgroup; 3100 3101 if (!PageCgroupUsed(pc)) 3102 goto unlock_out; 3103 3104 switch (ctype) { 3105 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 3106 case MEM_CGROUP_CHARGE_TYPE_DROP: 3107 /* See mem_cgroup_prepare_migration() */ 3108 if (page_mapped(page) || PageCgroupMigration(pc)) 3109 goto unlock_out; 3110 break; 3111 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 3112 if (!PageAnon(page)) { /* Shared memory */ 3113 if (page->mapping && !page_is_file_cache(page)) 3114 goto unlock_out; 3115 } else if (page_mapped(page)) /* Anon */ 3116 goto unlock_out; 3117 break; 3118 default: 3119 break; 3120 } 3121 3122 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages); 3123 3124 ClearPageCgroupUsed(pc); 3125 /* 3126 * pc->mem_cgroup is not cleared here. It will be accessed when it's 3127 * freed from LRU. This is safe because uncharged page is expected not 3128 * to be reused (freed soon). Exception is SwapCache, it's handled by 3129 * special functions. 3130 */ 3131 3132 unlock_page_cgroup(pc); 3133 /* 3134 * even after unlock, we have mem->res.usage here and this memcg 3135 * will never be freed. 3136 */ 3137 memcg_check_events(mem, page); 3138 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 3139 mem_cgroup_swap_statistics(mem, true); 3140 mem_cgroup_get(mem); 3141 } 3142 if (!mem_cgroup_is_root(mem)) 3143 mem_cgroup_do_uncharge(mem, nr_pages, ctype); 3144 3145 return mem; 3146 3147unlock_out: 3148 unlock_page_cgroup(pc); 3149 return NULL; 3150} 3151 3152void mem_cgroup_uncharge_page(struct page *page) 3153{ 3154 /* early check. */ 3155 if (page_mapped(page)) 3156 return; 3157 if (page->mapping && !PageAnon(page)) 3158 return; 3159 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); 3160} 3161 3162void mem_cgroup_uncharge_cache_page(struct page *page) 3163{ 3164 VM_BUG_ON(page_mapped(page)); 3165 VM_BUG_ON(page->mapping); 3166 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 3167} 3168 3169/* 3170 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 3171 * In that cases, pages are freed continuously and we can expect pages 3172 * are in the same memcg. All these calls itself limits the number of 3173 * pages freed at once, then uncharge_start/end() is called properly. 3174 * This may be called prural(2) times in a context, 3175 */ 3176 3177void mem_cgroup_uncharge_start(void) 3178{ 3179 current->memcg_batch.do_batch++; 3180 /* We can do nest. */ 3181 if (current->memcg_batch.do_batch == 1) { 3182 current->memcg_batch.memcg = NULL; 3183 current->memcg_batch.nr_pages = 0; 3184 current->memcg_batch.memsw_nr_pages = 0; 3185 } 3186} 3187 3188void mem_cgroup_uncharge_end(void) 3189{ 3190 struct memcg_batch_info *batch = ¤t->memcg_batch; 3191 3192 if (!batch->do_batch) 3193 return; 3194 3195 batch->do_batch--; 3196 if (batch->do_batch) /* If stacked, do nothing. */ 3197 return; 3198 3199 if (!batch->memcg) 3200 return; 3201 /* 3202 * This "batch->memcg" is valid without any css_get/put etc... 3203 * bacause we hide charges behind us. 3204 */ 3205 if (batch->nr_pages) 3206 res_counter_uncharge(&batch->memcg->res, 3207 batch->nr_pages * PAGE_SIZE); 3208 if (batch->memsw_nr_pages) 3209 res_counter_uncharge(&batch->memcg->memsw, 3210 batch->memsw_nr_pages * PAGE_SIZE); 3211 memcg_oom_recover(batch->memcg); 3212 /* forget this pointer (for sanity check) */ 3213 batch->memcg = NULL; 3214} 3215 3216#ifdef CONFIG_SWAP 3217/* 3218 * called after __delete_from_swap_cache() and drop "page" account. 3219 * memcg information is recorded to swap_cgroup of "ent" 3220 */ 3221void 3222mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 3223{ 3224 struct mem_cgroup *memcg; 3225 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 3226 3227 if (!swapout) /* this was a swap cache but the swap is unused ! */ 3228 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 3229 3230 memcg = __mem_cgroup_uncharge_common(page, ctype); 3231 3232 /* 3233 * record memcg information, if swapout && memcg != NULL, 3234 * mem_cgroup_get() was called in uncharge(). 3235 */ 3236 if (do_swap_account && swapout && memcg) 3237 swap_cgroup_record(ent, css_id(&memcg->css)); 3238} 3239#endif 3240 3241#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 3242/* 3243 * called from swap_entry_free(). remove record in swap_cgroup and 3244 * uncharge "memsw" account. 3245 */ 3246void mem_cgroup_uncharge_swap(swp_entry_t ent) 3247{ 3248 struct mem_cgroup *memcg; 3249 unsigned short id; 3250 3251 if (!do_swap_account) 3252 return; 3253 3254 id = swap_cgroup_record(ent, 0); 3255 rcu_read_lock(); 3256 memcg = mem_cgroup_lookup(id); 3257 if (memcg) { 3258 /* 3259 * We uncharge this because swap is freed. 3260 * This memcg can be obsolete one. We avoid calling css_tryget 3261 */ 3262 if (!mem_cgroup_is_root(memcg)) 3263 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 3264 mem_cgroup_swap_statistics(memcg, false); 3265 mem_cgroup_put(memcg); 3266 } 3267 rcu_read_unlock(); 3268} 3269 3270/** 3271 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3272 * @entry: swap entry to be moved 3273 * @from: mem_cgroup which the entry is moved from 3274 * @to: mem_cgroup which the entry is moved to 3275 * @need_fixup: whether we should fixup res_counters and refcounts. 3276 * 3277 * It succeeds only when the swap_cgroup's record for this entry is the same 3278 * as the mem_cgroup's id of @from. 3279 * 3280 * Returns 0 on success, -EINVAL on failure. 3281 * 3282 * The caller must have charged to @to, IOW, called res_counter_charge() about 3283 * both res and memsw, and called css_get(). 3284 */ 3285static int mem_cgroup_move_swap_account(swp_entry_t entry, 3286 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3287{ 3288 unsigned short old_id, new_id; 3289 3290 old_id = css_id(&from->css); 3291 new_id = css_id(&to->css); 3292 3293 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3294 mem_cgroup_swap_statistics(from, false); 3295 mem_cgroup_swap_statistics(to, true); 3296 /* 3297 * This function is only called from task migration context now. 3298 * It postpones res_counter and refcount handling till the end 3299 * of task migration(mem_cgroup_clear_mc()) for performance 3300 * improvement. But we cannot postpone mem_cgroup_get(to) 3301 * because if the process that has been moved to @to does 3302 * swap-in, the refcount of @to might be decreased to 0. 3303 */ 3304 mem_cgroup_get(to); 3305 if (need_fixup) { 3306 if (!mem_cgroup_is_root(from)) 3307 res_counter_uncharge(&from->memsw, PAGE_SIZE); 3308 mem_cgroup_put(from); 3309 /* 3310 * we charged both to->res and to->memsw, so we should 3311 * uncharge to->res. 3312 */ 3313 if (!mem_cgroup_is_root(to)) 3314 res_counter_uncharge(&to->res, PAGE_SIZE); 3315 } 3316 return 0; 3317 } 3318 return -EINVAL; 3319} 3320#else 3321static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3322 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3323{ 3324 return -EINVAL; 3325} 3326#endif 3327 3328/* 3329 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 3330 * page belongs to. 3331 */ 3332int mem_cgroup_prepare_migration(struct page *page, 3333 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) 3334{ 3335 struct mem_cgroup *mem = NULL; 3336 struct page_cgroup *pc; 3337 enum charge_type ctype; 3338 int ret = 0; 3339 3340 *ptr = NULL; 3341 3342 VM_BUG_ON(PageTransHuge(page)); 3343 if (mem_cgroup_disabled()) 3344 return 0; 3345 3346 pc = lookup_page_cgroup(page); 3347 lock_page_cgroup(pc); 3348 if (PageCgroupUsed(pc)) { 3349 mem = pc->mem_cgroup; 3350 css_get(&mem->css); 3351 /* 3352 * At migrating an anonymous page, its mapcount goes down 3353 * to 0 and uncharge() will be called. But, even if it's fully 3354 * unmapped, migration may fail and this page has to be 3355 * charged again. We set MIGRATION flag here and delay uncharge 3356 * until end_migration() is called 3357 * 3358 * Corner Case Thinking 3359 * A) 3360 * When the old page was mapped as Anon and it's unmap-and-freed 3361 * while migration was ongoing. 3362 * If unmap finds the old page, uncharge() of it will be delayed 3363 * until end_migration(). If unmap finds a new page, it's 3364 * uncharged when it make mapcount to be 1->0. If unmap code 3365 * finds swap_migration_entry, the new page will not be mapped 3366 * and end_migration() will find it(mapcount==0). 3367 * 3368 * B) 3369 * When the old page was mapped but migraion fails, the kernel 3370 * remaps it. A charge for it is kept by MIGRATION flag even 3371 * if mapcount goes down to 0. We can do remap successfully 3372 * without charging it again. 3373 * 3374 * C) 3375 * The "old" page is under lock_page() until the end of 3376 * migration, so, the old page itself will not be swapped-out. 3377 * If the new page is swapped out before end_migraton, our 3378 * hook to usual swap-out path will catch the event. 3379 */ 3380 if (PageAnon(page)) 3381 SetPageCgroupMigration(pc); 3382 } 3383 unlock_page_cgroup(pc); 3384 /* 3385 * If the page is not charged at this point, 3386 * we return here. 3387 */ 3388 if (!mem) 3389 return 0; 3390 3391 *ptr = mem; 3392 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false); 3393 css_put(&mem->css);/* drop extra refcnt */ 3394 if (ret || *ptr == NULL) { 3395 if (PageAnon(page)) { 3396 lock_page_cgroup(pc); 3397 ClearPageCgroupMigration(pc); 3398 unlock_page_cgroup(pc); 3399 /* 3400 * The old page may be fully unmapped while we kept it. 3401 */ 3402 mem_cgroup_uncharge_page(page); 3403 } 3404 return -ENOMEM; 3405 } 3406 /* 3407 * We charge new page before it's used/mapped. So, even if unlock_page() 3408 * is called before end_migration, we can catch all events on this new 3409 * page. In the case new page is migrated but not remapped, new page's 3410 * mapcount will be finally 0 and we call uncharge in end_migration(). 3411 */ 3412 pc = lookup_page_cgroup(newpage); 3413 if (PageAnon(page)) 3414 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 3415 else if (page_is_file_cache(page)) 3416 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3417 else 3418 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3419 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); 3420 return ret; 3421} 3422 3423/* remove redundant charge if migration failed*/ 3424void mem_cgroup_end_migration(struct mem_cgroup *mem, 3425 struct page *oldpage, struct page *newpage, bool migration_ok) 3426{ 3427 struct page *used, *unused; 3428 struct page_cgroup *pc; 3429 3430 if (!mem) 3431 return; 3432 /* blocks rmdir() */ 3433 cgroup_exclude_rmdir(&mem->css); 3434 if (!migration_ok) { 3435 used = oldpage; 3436 unused = newpage; 3437 } else { 3438 used = newpage; 3439 unused = oldpage; 3440 } 3441 /* 3442 * We disallowed uncharge of pages under migration because mapcount 3443 * of the page goes down to zero, temporarly. 3444 * Clear the flag and check the page should be charged. 3445 */ 3446 pc = lookup_page_cgroup(oldpage); 3447 lock_page_cgroup(pc); 3448 ClearPageCgroupMigration(pc); 3449 unlock_page_cgroup(pc); 3450 3451 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); 3452 3453 /* 3454 * If a page is a file cache, radix-tree replacement is very atomic 3455 * and we can skip this check. When it was an Anon page, its mapcount 3456 * goes down to 0. But because we added MIGRATION flage, it's not 3457 * uncharged yet. There are several case but page->mapcount check 3458 * and USED bit check in mem_cgroup_uncharge_page() will do enough 3459 * check. (see prepare_charge() also) 3460 */ 3461 if (PageAnon(used)) 3462 mem_cgroup_uncharge_page(used); 3463 /* 3464 * At migration, we may charge account against cgroup which has no 3465 * tasks. 3466 * So, rmdir()->pre_destroy() can be called while we do this charge. 3467 * In that case, we need to call pre_destroy() again. check it here. 3468 */ 3469 cgroup_release_and_wakeup_rmdir(&mem->css); 3470} 3471 3472#ifdef CONFIG_DEBUG_VM 3473static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 3474{ 3475 struct page_cgroup *pc; 3476 3477 pc = lookup_page_cgroup(page); 3478 if (likely(pc) && PageCgroupUsed(pc)) 3479 return pc; 3480 return NULL; 3481} 3482 3483bool mem_cgroup_bad_page_check(struct page *page) 3484{ 3485 if (mem_cgroup_disabled()) 3486 return false; 3487 3488 return lookup_page_cgroup_used(page) != NULL; 3489} 3490 3491void mem_cgroup_print_bad_page(struct page *page) 3492{ 3493 struct page_cgroup *pc; 3494 3495 pc = lookup_page_cgroup_used(page); 3496 if (pc) { 3497 int ret = -1; 3498 char *path; 3499 3500 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", 3501 pc, pc->flags, pc->mem_cgroup); 3502 3503 path = kmalloc(PATH_MAX, GFP_KERNEL); 3504 if (path) { 3505 rcu_read_lock(); 3506 ret = cgroup_path(pc->mem_cgroup->css.cgroup, 3507 path, PATH_MAX); 3508 rcu_read_unlock(); 3509 } 3510 3511 printk(KERN_CONT "(%s)\n", 3512 (ret < 0) ? "cannot get the path" : path); 3513 kfree(path); 3514 } 3515} 3516#endif 3517 3518static DEFINE_MUTEX(set_limit_mutex); 3519 3520static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 3521 unsigned long long val) 3522{ 3523 int retry_count; 3524 u64 memswlimit, memlimit; 3525 int ret = 0; 3526 int children = mem_cgroup_count_children(memcg); 3527 u64 curusage, oldusage; 3528 int enlarge; 3529 3530 /* 3531 * For keeping hierarchical_reclaim simple, how long we should retry 3532 * is depends on callers. We set our retry-count to be function 3533 * of # of children which we should visit in this loop. 3534 */ 3535 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 3536 3537 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3538 3539 enlarge = 0; 3540 while (retry_count) { 3541 if (signal_pending(current)) { 3542 ret = -EINTR; 3543 break; 3544 } 3545 /* 3546 * Rather than hide all in some function, I do this in 3547 * open coded manner. You see what this really does. 3548 * We have to guarantee mem->res.limit < mem->memsw.limit. 3549 */ 3550 mutex_lock(&set_limit_mutex); 3551 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3552 if (memswlimit < val) { 3553 ret = -EINVAL; 3554 mutex_unlock(&set_limit_mutex); 3555 break; 3556 } 3557 3558 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3559 if (memlimit < val) 3560 enlarge = 1; 3561 3562 ret = res_counter_set_limit(&memcg->res, val); 3563 if (!ret) { 3564 if (memswlimit == val) 3565 memcg->memsw_is_minimum = true; 3566 else 3567 memcg->memsw_is_minimum = false; 3568 } 3569 mutex_unlock(&set_limit_mutex); 3570 3571 if (!ret) 3572 break; 3573 3574 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3575 MEM_CGROUP_RECLAIM_SHRINK, 3576 NULL); 3577 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3578 /* Usage is reduced ? */ 3579 if (curusage >= oldusage) 3580 retry_count--; 3581 else 3582 oldusage = curusage; 3583 } 3584 if (!ret && enlarge) 3585 memcg_oom_recover(memcg); 3586 3587 return ret; 3588} 3589 3590static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 3591 unsigned long long val) 3592{ 3593 int retry_count; 3594 u64 memlimit, memswlimit, oldusage, curusage; 3595 int children = mem_cgroup_count_children(memcg); 3596 int ret = -EBUSY; 3597 int enlarge = 0; 3598 3599 /* see mem_cgroup_resize_res_limit */ 3600 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 3601 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3602 while (retry_count) { 3603 if (signal_pending(current)) { 3604 ret = -EINTR; 3605 break; 3606 } 3607 /* 3608 * Rather than hide all in some function, I do this in 3609 * open coded manner. You see what this really does. 3610 * We have to guarantee mem->res.limit < mem->memsw.limit. 3611 */ 3612 mutex_lock(&set_limit_mutex); 3613 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3614 if (memlimit > val) { 3615 ret = -EINVAL; 3616 mutex_unlock(&set_limit_mutex); 3617 break; 3618 } 3619 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3620 if (memswlimit < val) 3621 enlarge = 1; 3622 ret = res_counter_set_limit(&memcg->memsw, val); 3623 if (!ret) { 3624 if (memlimit == val) 3625 memcg->memsw_is_minimum = true; 3626 else 3627 memcg->memsw_is_minimum = false; 3628 } 3629 mutex_unlock(&set_limit_mutex); 3630 3631 if (!ret) 3632 break; 3633 3634 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3635 MEM_CGROUP_RECLAIM_NOSWAP | 3636 MEM_CGROUP_RECLAIM_SHRINK, 3637 NULL); 3638 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3639 /* Usage is reduced ? */ 3640 if (curusage >= oldusage) 3641 retry_count--; 3642 else 3643 oldusage = curusage; 3644 } 3645 if (!ret && enlarge) 3646 memcg_oom_recover(memcg); 3647 return ret; 3648} 3649 3650unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 3651 gfp_t gfp_mask, 3652 unsigned long *total_scanned) 3653{ 3654 unsigned long nr_reclaimed = 0; 3655 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 3656 unsigned long reclaimed; 3657 int loop = 0; 3658 struct mem_cgroup_tree_per_zone *mctz; 3659 unsigned long long excess; 3660 unsigned long nr_scanned; 3661 3662 if (order > 0) 3663 return 0; 3664 3665 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 3666 /* 3667 * This loop can run a while, specially if mem_cgroup's continuously 3668 * keep exceeding their soft limit and putting the system under 3669 * pressure 3670 */ 3671 do { 3672 if (next_mz) 3673 mz = next_mz; 3674 else 3675 mz = mem_cgroup_largest_soft_limit_node(mctz); 3676 if (!mz) 3677 break; 3678 3679 nr_scanned = 0; 3680 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, 3681 gfp_mask, 3682 MEM_CGROUP_RECLAIM_SOFT, 3683 &nr_scanned); 3684 nr_reclaimed += reclaimed; 3685 *total_scanned += nr_scanned; 3686 spin_lock(&mctz->lock); 3687 3688 /* 3689 * If we failed to reclaim anything from this memory cgroup 3690 * it is time to move on to the next cgroup 3691 */ 3692 next_mz = NULL; 3693 if (!reclaimed) { 3694 do { 3695 /* 3696 * Loop until we find yet another one. 3697 * 3698 * By the time we get the soft_limit lock 3699 * again, someone might have aded the 3700 * group back on the RB tree. Iterate to 3701 * make sure we get a different mem. 3702 * mem_cgroup_largest_soft_limit_node returns 3703 * NULL if no other cgroup is present on 3704 * the tree 3705 */ 3706 next_mz = 3707 __mem_cgroup_largest_soft_limit_node(mctz); 3708 if (next_mz == mz) 3709 css_put(&next_mz->mem->css); 3710 else /* next_mz == NULL or other memcg */ 3711 break; 3712 } while (1); 3713 } 3714 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 3715 excess = res_counter_soft_limit_excess(&mz->mem->res); 3716 /* 3717 * One school of thought says that we should not add 3718 * back the node to the tree if reclaim returns 0. 3719 * But our reclaim could return 0, simply because due 3720 * to priority we are exposing a smaller subset of 3721 * memory to reclaim from. Consider this as a longer 3722 * term TODO. 3723 */ 3724 /* If excess == 0, no tree ops */ 3725 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); 3726 spin_unlock(&mctz->lock); 3727 css_put(&mz->mem->css); 3728 loop++; 3729 /* 3730 * Could not reclaim anything and there are no more 3731 * mem cgroups to try or we seem to be looping without 3732 * reclaiming anything. 3733 */ 3734 if (!nr_reclaimed && 3735 (next_mz == NULL || 3736 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3737 break; 3738 } while (!nr_reclaimed); 3739 if (next_mz) 3740 css_put(&next_mz->mem->css); 3741 return nr_reclaimed; 3742} 3743 3744/* 3745 * This routine traverse page_cgroup in given list and drop them all. 3746 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 3747 */ 3748static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, 3749 int node, int zid, enum lru_list lru) 3750{ 3751 struct zone *zone; 3752 struct mem_cgroup_per_zone *mz; 3753 struct page_cgroup *pc, *busy; 3754 unsigned long flags, loop; 3755 struct list_head *list; 3756 int ret = 0; 3757 3758 zone = &NODE_DATA(node)->node_zones[zid]; 3759 mz = mem_cgroup_zoneinfo(mem, node, zid); 3760 list = &mz->lists[lru]; 3761 3762 loop = MEM_CGROUP_ZSTAT(mz, lru); 3763 /* give some margin against EBUSY etc...*/ 3764 loop += 256; 3765 busy = NULL; 3766 while (loop--) { 3767 struct page *page; 3768 3769 ret = 0; 3770 spin_lock_irqsave(&zone->lru_lock, flags); 3771 if (list_empty(list)) { 3772 spin_unlock_irqrestore(&zone->lru_lock, flags); 3773 break; 3774 } 3775 pc = list_entry(list->prev, struct page_cgroup, lru); 3776 if (busy == pc) { 3777 list_move(&pc->lru, list); 3778 busy = NULL; 3779 spin_unlock_irqrestore(&zone->lru_lock, flags); 3780 continue; 3781 } 3782 spin_unlock_irqrestore(&zone->lru_lock, flags); 3783 3784 page = lookup_cgroup_page(pc); 3785 3786 ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL); 3787 if (ret == -ENOMEM) 3788 break; 3789 3790 if (ret == -EBUSY || ret == -EINVAL) { 3791 /* found lock contention or "pc" is obsolete. */ 3792 busy = pc; 3793 cond_resched(); 3794 } else 3795 busy = NULL; 3796 } 3797 3798 if (!ret && !list_empty(list)) 3799 return -EBUSY; 3800 return ret; 3801} 3802 3803/* 3804 * make mem_cgroup's charge to be 0 if there is no task. 3805 * This enables deleting this mem_cgroup. 3806 */ 3807static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) 3808{ 3809 int ret; 3810 int node, zid, shrink; 3811 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3812 struct cgroup *cgrp = mem->css.cgroup; 3813 3814 css_get(&mem->css); 3815 3816 shrink = 0; 3817 /* should free all ? */ 3818 if (free_all) 3819 goto try_to_free; 3820move_account: 3821 do { 3822 ret = -EBUSY; 3823 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 3824 goto out; 3825 ret = -EINTR; 3826 if (signal_pending(current)) 3827 goto out; 3828 /* This is for making all *used* pages to be on LRU. */ 3829 lru_add_drain_all(); 3830 drain_all_stock_sync(mem); 3831 ret = 0; 3832 mem_cgroup_start_move(mem); 3833 for_each_node_state(node, N_HIGH_MEMORY) { 3834 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { 3835 enum lru_list l; 3836 for_each_lru(l) { 3837 ret = mem_cgroup_force_empty_list(mem, 3838 node, zid, l); 3839 if (ret) 3840 break; 3841 } 3842 } 3843 if (ret) 3844 break; 3845 } 3846 mem_cgroup_end_move(mem); 3847 memcg_oom_recover(mem); 3848 /* it seems parent cgroup doesn't have enough mem */ 3849 if (ret == -ENOMEM) 3850 goto try_to_free; 3851 cond_resched(); 3852 /* "ret" should also be checked to ensure all lists are empty. */ 3853 } while (mem->res.usage > 0 || ret); 3854out: 3855 css_put(&mem->css); 3856 return ret; 3857 3858try_to_free: 3859 /* returns EBUSY if there is a task or if we come here twice. */ 3860 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { 3861 ret = -EBUSY; 3862 goto out; 3863 } 3864 /* we call try-to-free pages for make this cgroup empty */ 3865 lru_add_drain_all(); 3866 /* try to free all pages in this cgroup */ 3867 shrink = 1; 3868 while (nr_retries && mem->res.usage > 0) { 3869 struct memcg_scanrecord rec; 3870 int progress; 3871 3872 if (signal_pending(current)) { 3873 ret = -EINTR; 3874 goto out; 3875 } 3876 rec.context = SCAN_BY_SHRINK; 3877 rec.mem = mem; 3878 rec.root = mem; 3879 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, 3880 false, &rec); 3881 if (!progress) { 3882 nr_retries--; 3883 /* maybe some writeback is necessary */ 3884 congestion_wait(BLK_RW_ASYNC, HZ/10); 3885 } 3886 3887 } 3888 lru_add_drain(); 3889 /* try move_account...there may be some *locked* pages. */ 3890 goto move_account; 3891} 3892 3893int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 3894{ 3895 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); 3896} 3897 3898 3899static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) 3900{ 3901 return mem_cgroup_from_cont(cont)->use_hierarchy; 3902} 3903 3904static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, 3905 u64 val) 3906{ 3907 int retval = 0; 3908 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 3909 struct cgroup *parent = cont->parent; 3910 struct mem_cgroup *parent_mem = NULL; 3911 3912 if (parent) 3913 parent_mem = mem_cgroup_from_cont(parent); 3914 3915 cgroup_lock(); 3916 /* 3917 * If parent's use_hierarchy is set, we can't make any modifications 3918 * in the child subtrees. If it is unset, then the change can 3919 * occur, provided the current cgroup has no children. 3920 * 3921 * For the root cgroup, parent_mem is NULL, we allow value to be 3922 * set if there are no children. 3923 */ 3924 if ((!parent_mem || !parent_mem->use_hierarchy) && 3925 (val == 1 || val == 0)) { 3926 if (list_empty(&cont->children)) 3927 mem->use_hierarchy = val; 3928 else 3929 retval = -EBUSY; 3930 } else 3931 retval = -EINVAL; 3932 cgroup_unlock(); 3933 3934 return retval; 3935} 3936 3937 3938static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem, 3939 enum mem_cgroup_stat_index idx) 3940{ 3941 struct mem_cgroup *iter; 3942 long val = 0; 3943 3944 /* Per-cpu values can be negative, use a signed accumulator */ 3945 for_each_mem_cgroup_tree(iter, mem) 3946 val += mem_cgroup_read_stat(iter, idx); 3947 3948 if (val < 0) /* race ? */ 3949 val = 0; 3950 return val; 3951} 3952 3953static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) 3954{ 3955 u64 val; 3956 3957 if (!mem_cgroup_is_root(mem)) { 3958 if (!swap) 3959 return res_counter_read_u64(&mem->res, RES_USAGE); 3960 else 3961 return res_counter_read_u64(&mem->memsw, RES_USAGE); 3962 } 3963 3964 val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE); 3965 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS); 3966 3967 if (swap) 3968 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 3969 3970 return val << PAGE_SHIFT; 3971} 3972 3973static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 3974{ 3975 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 3976 u64 val; 3977 int type, name; 3978 3979 type = MEMFILE_TYPE(cft->private); 3980 name = MEMFILE_ATTR(cft->private); 3981 switch (type) { 3982 case _MEM: 3983 if (name == RES_USAGE) 3984 val = mem_cgroup_usage(mem, false); 3985 else 3986 val = res_counter_read_u64(&mem->res, name); 3987 break; 3988 case _MEMSWAP: 3989 if (name == RES_USAGE) 3990 val = mem_cgroup_usage(mem, true); 3991 else 3992 val = res_counter_read_u64(&mem->memsw, name); 3993 break; 3994 default: 3995 BUG(); 3996 break; 3997 } 3998 return val; 3999} 4000/* 4001 * The user of this function is... 4002 * RES_LIMIT. 4003 */ 4004static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 4005 const char *buffer) 4006{ 4007 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 4008 int type, name; 4009 unsigned long long val; 4010 int ret; 4011 4012 type = MEMFILE_TYPE(cft->private); 4013 name = MEMFILE_ATTR(cft->private); 4014 switch (name) { 4015 case RES_LIMIT: 4016 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 4017 ret = -EINVAL; 4018 break; 4019 } 4020 /* This function does all necessary parse...reuse it */ 4021 ret = res_counter_memparse_write_strategy(buffer, &val); 4022 if (ret) 4023 break; 4024 if (type == _MEM) 4025 ret = mem_cgroup_resize_limit(memcg, val); 4026 else 4027 ret = mem_cgroup_resize_memsw_limit(memcg, val); 4028 break; 4029 case RES_SOFT_LIMIT: 4030 ret = res_counter_memparse_write_strategy(buffer, &val); 4031 if (ret) 4032 break; 4033 /* 4034 * For memsw, soft limits are hard to implement in terms 4035 * of semantics, for now, we support soft limits for 4036 * control without swap 4037 */ 4038 if (type == _MEM) 4039 ret = res_counter_set_soft_limit(&memcg->res, val); 4040 else 4041 ret = -EINVAL; 4042 break; 4043 default: 4044 ret = -EINVAL; /* should be BUG() ? */ 4045 break; 4046 } 4047 return ret; 4048} 4049 4050static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 4051 unsigned long long *mem_limit, unsigned long long *memsw_limit) 4052{ 4053 struct cgroup *cgroup; 4054 unsigned long long min_limit, min_memsw_limit, tmp; 4055 4056 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4057 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4058 cgroup = memcg->css.cgroup; 4059 if (!memcg->use_hierarchy) 4060 goto out; 4061 4062 while (cgroup->parent) { 4063 cgroup = cgroup->parent; 4064 memcg = mem_cgroup_from_cont(cgroup); 4065 if (!memcg->use_hierarchy) 4066 break; 4067 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 4068 min_limit = min(min_limit, tmp); 4069 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4070 min_memsw_limit = min(min_memsw_limit, tmp); 4071 } 4072out: 4073 *mem_limit = min_limit; 4074 *memsw_limit = min_memsw_limit; 4075 return; 4076} 4077 4078static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 4079{ 4080 struct mem_cgroup *mem; 4081 int type, name; 4082 4083 mem = mem_cgroup_from_cont(cont); 4084 type = MEMFILE_TYPE(event); 4085 name = MEMFILE_ATTR(event); 4086 switch (name) { 4087 case RES_MAX_USAGE: 4088 if (type == _MEM) 4089 res_counter_reset_max(&mem->res); 4090 else 4091 res_counter_reset_max(&mem->memsw); 4092 break; 4093 case RES_FAILCNT: 4094 if (type == _MEM) 4095 res_counter_reset_failcnt(&mem->res); 4096 else 4097 res_counter_reset_failcnt(&mem->memsw); 4098 break; 4099 } 4100 4101 return 0; 4102} 4103 4104static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, 4105 struct cftype *cft) 4106{ 4107 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; 4108} 4109 4110#ifdef CONFIG_MMU 4111static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4112 struct cftype *cft, u64 val) 4113{ 4114 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4115 4116 if (val >= (1 << NR_MOVE_TYPE)) 4117 return -EINVAL; 4118 /* 4119 * We check this value several times in both in can_attach() and 4120 * attach(), so we need cgroup lock to prevent this value from being 4121 * inconsistent. 4122 */ 4123 cgroup_lock(); 4124 mem->move_charge_at_immigrate = val; 4125 cgroup_unlock(); 4126 4127 return 0; 4128} 4129#else 4130static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4131 struct cftype *cft, u64 val) 4132{ 4133 return -ENOSYS; 4134} 4135#endif 4136 4137 4138/* For read statistics */ 4139enum { 4140 MCS_CACHE, 4141 MCS_RSS, 4142 MCS_FILE_MAPPED, 4143 MCS_PGPGIN, 4144 MCS_PGPGOUT, 4145 MCS_SWAP, 4146 MCS_PGFAULT, 4147 MCS_PGMAJFAULT, 4148 MCS_INACTIVE_ANON, 4149 MCS_ACTIVE_ANON, 4150 MCS_INACTIVE_FILE, 4151 MCS_ACTIVE_FILE, 4152 MCS_UNEVICTABLE, 4153 NR_MCS_STAT, 4154}; 4155 4156struct mcs_total_stat { 4157 s64 stat[NR_MCS_STAT]; 4158}; 4159 4160struct { 4161 char *local_name; 4162 char *total_name; 4163} memcg_stat_strings[NR_MCS_STAT] = { 4164 {"cache", "total_cache"}, 4165 {"rss", "total_rss"}, 4166 {"mapped_file", "total_mapped_file"}, 4167 {"pgpgin", "total_pgpgin"}, 4168 {"pgpgout", "total_pgpgout"}, 4169 {"swap", "total_swap"}, 4170 {"pgfault", "total_pgfault"}, 4171 {"pgmajfault", "total_pgmajfault"}, 4172 {"inactive_anon", "total_inactive_anon"}, 4173 {"active_anon", "total_active_anon"}, 4174 {"inactive_file", "total_inactive_file"}, 4175 {"active_file", "total_active_file"}, 4176 {"unevictable", "total_unevictable"} 4177}; 4178 4179 4180static void 4181mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) 4182{ 4183 s64 val; 4184 4185 /* per cpu stat */ 4186 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); 4187 s->stat[MCS_CACHE] += val * PAGE_SIZE; 4188 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); 4189 s->stat[MCS_RSS] += val * PAGE_SIZE; 4190 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); 4191 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; 4192 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN); 4193 s->stat[MCS_PGPGIN] += val; 4194 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT); 4195 s->stat[MCS_PGPGOUT] += val; 4196 if (do_swap_account) { 4197 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 4198 s->stat[MCS_SWAP] += val * PAGE_SIZE; 4199 } 4200 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT); 4201 s->stat[MCS_PGFAULT] += val; 4202 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT); 4203 s->stat[MCS_PGMAJFAULT] += val; 4204 4205 /* per zone stat */ 4206 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON)); 4207 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; 4208 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON)); 4209 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; 4210 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE)); 4211 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; 4212 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE)); 4213 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; 4214 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE)); 4215 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; 4216} 4217 4218static void 4219mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) 4220{ 4221 struct mem_cgroup *iter; 4222 4223 for_each_mem_cgroup_tree(iter, mem) 4224 mem_cgroup_get_local_stat(iter, s); 4225} 4226 4227#ifdef CONFIG_NUMA 4228static int mem_control_numa_stat_show(struct seq_file *m, void *arg) 4229{ 4230 int nid; 4231 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 4232 unsigned long node_nr; 4233 struct cgroup *cont = m->private; 4234 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4235 4236 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL); 4237 seq_printf(m, "total=%lu", total_nr); 4238 for_each_node_state(nid, N_HIGH_MEMORY) { 4239 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL); 4240 seq_printf(m, " N%d=%lu", nid, node_nr); 4241 } 4242 seq_putc(m, '\n'); 4243 4244 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE); 4245 seq_printf(m, "file=%lu", file_nr); 4246 for_each_node_state(nid, N_HIGH_MEMORY) { 4247 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4248 LRU_ALL_FILE); 4249 seq_printf(m, " N%d=%lu", nid, node_nr); 4250 } 4251 seq_putc(m, '\n'); 4252 4253 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON); 4254 seq_printf(m, "anon=%lu", anon_nr); 4255 for_each_node_state(nid, N_HIGH_MEMORY) { 4256 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4257 LRU_ALL_ANON); 4258 seq_printf(m, " N%d=%lu", nid, node_nr); 4259 } 4260 seq_putc(m, '\n'); 4261 4262 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE)); 4263 seq_printf(m, "unevictable=%lu", unevictable_nr); 4264 for_each_node_state(nid, N_HIGH_MEMORY) { 4265 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4266 BIT(LRU_UNEVICTABLE)); 4267 seq_printf(m, " N%d=%lu", nid, node_nr); 4268 } 4269 seq_putc(m, '\n'); 4270 return 0; 4271} 4272#endif /* CONFIG_NUMA */ 4273 4274static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4275 struct cgroup_map_cb *cb) 4276{ 4277 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4278 struct mcs_total_stat mystat; 4279 int i; 4280 4281 memset(&mystat, 0, sizeof(mystat)); 4282 mem_cgroup_get_local_stat(mem_cont, &mystat); 4283 4284 4285 for (i = 0; i < NR_MCS_STAT; i++) { 4286 if (i == MCS_SWAP && !do_swap_account) 4287 continue; 4288 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); 4289 } 4290 4291 /* Hierarchical information */ 4292 { 4293 unsigned long long limit, memsw_limit; 4294 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); 4295 cb->fill(cb, "hierarchical_memory_limit", limit); 4296 if (do_swap_account) 4297 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 4298 } 4299 4300 memset(&mystat, 0, sizeof(mystat)); 4301 mem_cgroup_get_total_stat(mem_cont, &mystat); 4302 for (i = 0; i < NR_MCS_STAT; i++) { 4303 if (i == MCS_SWAP && !do_swap_account) 4304 continue; 4305 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); 4306 } 4307 4308#ifdef CONFIG_DEBUG_VM 4309 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); 4310 4311 { 4312 int nid, zid; 4313 struct mem_cgroup_per_zone *mz; 4314 unsigned long recent_rotated[2] = {0, 0}; 4315 unsigned long recent_scanned[2] = {0, 0}; 4316 4317 for_each_online_node(nid) 4318 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4319 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 4320 4321 recent_rotated[0] += 4322 mz->reclaim_stat.recent_rotated[0]; 4323 recent_rotated[1] += 4324 mz->reclaim_stat.recent_rotated[1]; 4325 recent_scanned[0] += 4326 mz->reclaim_stat.recent_scanned[0]; 4327 recent_scanned[1] += 4328 mz->reclaim_stat.recent_scanned[1]; 4329 } 4330 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); 4331 cb->fill(cb, "recent_rotated_file", recent_rotated[1]); 4332 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); 4333 cb->fill(cb, "recent_scanned_file", recent_scanned[1]); 4334 } 4335#endif 4336 4337 return 0; 4338} 4339 4340static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) 4341{ 4342 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4343 4344 return mem_cgroup_swappiness(memcg); 4345} 4346 4347static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, 4348 u64 val) 4349{ 4350 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4351 struct mem_cgroup *parent; 4352 4353 if (val > 100) 4354 return -EINVAL; 4355 4356 if (cgrp->parent == NULL) 4357 return -EINVAL; 4358 4359 parent = mem_cgroup_from_cont(cgrp->parent); 4360 4361 cgroup_lock(); 4362 4363 /* If under hierarchy, only empty-root can set this value */ 4364 if ((parent->use_hierarchy) || 4365 (memcg->use_hierarchy && !list_empty(&cgrp->children))) { 4366 cgroup_unlock(); 4367 return -EINVAL; 4368 } 4369 4370 memcg->swappiness = val; 4371 4372 cgroup_unlock(); 4373 4374 return 0; 4375} 4376 4377static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4378{ 4379 struct mem_cgroup_threshold_ary *t; 4380 u64 usage; 4381 int i; 4382 4383 rcu_read_lock(); 4384 if (!swap) 4385 t = rcu_dereference(memcg->thresholds.primary); 4386 else 4387 t = rcu_dereference(memcg->memsw_thresholds.primary); 4388 4389 if (!t) 4390 goto unlock; 4391 4392 usage = mem_cgroup_usage(memcg, swap); 4393 4394 /* 4395 * current_threshold points to threshold just below usage. 4396 * If it's not true, a threshold was crossed after last 4397 * call of __mem_cgroup_threshold(). 4398 */ 4399 i = t->current_threshold; 4400 4401 /* 4402 * Iterate backward over array of thresholds starting from 4403 * current_threshold and check if a threshold is crossed. 4404 * If none of thresholds below usage is crossed, we read 4405 * only one element of the array here. 4406 */ 4407 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4408 eventfd_signal(t->entries[i].eventfd, 1); 4409 4410 /* i = current_threshold + 1 */ 4411 i++; 4412 4413 /* 4414 * Iterate forward over array of thresholds starting from 4415 * current_threshold+1 and check if a threshold is crossed. 4416 * If none of thresholds above usage is crossed, we read 4417 * only one element of the array here. 4418 */ 4419 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4420 eventfd_signal(t->entries[i].eventfd, 1); 4421 4422 /* Update current_threshold */ 4423 t->current_threshold = i - 1; 4424unlock: 4425 rcu_read_unlock(); 4426} 4427 4428static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4429{ 4430 while (memcg) { 4431 __mem_cgroup_threshold(memcg, false); 4432 if (do_swap_account) 4433 __mem_cgroup_threshold(memcg, true); 4434 4435 memcg = parent_mem_cgroup(memcg); 4436 } 4437} 4438 4439static int compare_thresholds(const void *a, const void *b) 4440{ 4441 const struct mem_cgroup_threshold *_a = a; 4442 const struct mem_cgroup_threshold *_b = b; 4443 4444 return _a->threshold - _b->threshold; 4445} 4446 4447static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) 4448{ 4449 struct mem_cgroup_eventfd_list *ev; 4450 4451 list_for_each_entry(ev, &mem->oom_notify, list) 4452 eventfd_signal(ev->eventfd, 1); 4453 return 0; 4454} 4455 4456static void mem_cgroup_oom_notify(struct mem_cgroup *mem) 4457{ 4458 struct mem_cgroup *iter; 4459 4460 for_each_mem_cgroup_tree(iter, mem) 4461 mem_cgroup_oom_notify_cb(iter); 4462} 4463 4464static int mem_cgroup_usage_register_event(struct cgroup *cgrp, 4465 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4466{ 4467 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4468 struct mem_cgroup_thresholds *thresholds; 4469 struct mem_cgroup_threshold_ary *new; 4470 int type = MEMFILE_TYPE(cft->private); 4471 u64 threshold, usage; 4472 int i, size, ret; 4473 4474 ret = res_counter_memparse_write_strategy(args, &threshold); 4475 if (ret) 4476 return ret; 4477 4478 mutex_lock(&memcg->thresholds_lock); 4479 4480 if (type == _MEM) 4481 thresholds = &memcg->thresholds; 4482 else if (type == _MEMSWAP) 4483 thresholds = &memcg->memsw_thresholds; 4484 else 4485 BUG(); 4486 4487 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4488 4489 /* Check if a threshold crossed before adding a new one */ 4490 if (thresholds->primary) 4491 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4492 4493 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4494 4495 /* Allocate memory for new array of thresholds */ 4496 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 4497 GFP_KERNEL); 4498 if (!new) { 4499 ret = -ENOMEM; 4500 goto unlock; 4501 } 4502 new->size = size; 4503 4504 /* Copy thresholds (if any) to new array */ 4505 if (thresholds->primary) { 4506 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 4507 sizeof(struct mem_cgroup_threshold)); 4508 } 4509 4510 /* Add new threshold */ 4511 new->entries[size - 1].eventfd = eventfd; 4512 new->entries[size - 1].threshold = threshold; 4513 4514 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4515 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 4516 compare_thresholds, NULL); 4517 4518 /* Find current threshold */ 4519 new->current_threshold = -1; 4520 for (i = 0; i < size; i++) { 4521 if (new->entries[i].threshold < usage) { 4522 /* 4523 * new->current_threshold will not be used until 4524 * rcu_assign_pointer(), so it's safe to increment 4525 * it here. 4526 */ 4527 ++new->current_threshold; 4528 } 4529 } 4530 4531 /* Free old spare buffer and save old primary buffer as spare */ 4532 kfree(thresholds->spare); 4533 thresholds->spare = thresholds->primary; 4534 4535 rcu_assign_pointer(thresholds->primary, new); 4536 4537 /* To be sure that nobody uses thresholds */ 4538 synchronize_rcu(); 4539 4540unlock: 4541 mutex_unlock(&memcg->thresholds_lock); 4542 4543 return ret; 4544} 4545 4546static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, 4547 struct cftype *cft, struct eventfd_ctx *eventfd) 4548{ 4549 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4550 struct mem_cgroup_thresholds *thresholds; 4551 struct mem_cgroup_threshold_ary *new; 4552 int type = MEMFILE_TYPE(cft->private); 4553 u64 usage; 4554 int i, j, size; 4555 4556 mutex_lock(&memcg->thresholds_lock); 4557 if (type == _MEM) 4558 thresholds = &memcg->thresholds; 4559 else if (type == _MEMSWAP) 4560 thresholds = &memcg->memsw_thresholds; 4561 else 4562 BUG(); 4563 4564 /* 4565 * Something went wrong if we trying to unregister a threshold 4566 * if we don't have thresholds 4567 */ 4568 BUG_ON(!thresholds); 4569 4570 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4571 4572 /* Check if a threshold crossed before removing */ 4573 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4574 4575 /* Calculate new number of threshold */ 4576 size = 0; 4577 for (i = 0; i < thresholds->primary->size; i++) { 4578 if (thresholds->primary->entries[i].eventfd != eventfd) 4579 size++; 4580 } 4581 4582 new = thresholds->spare; 4583 4584 /* Set thresholds array to NULL if we don't have thresholds */ 4585 if (!size) { 4586 kfree(new); 4587 new = NULL; 4588 goto swap_buffers; 4589 } 4590 4591 new->size = size; 4592 4593 /* Copy thresholds and find current threshold */ 4594 new->current_threshold = -1; 4595 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4596 if (thresholds->primary->entries[i].eventfd == eventfd) 4597 continue; 4598 4599 new->entries[j] = thresholds->primary->entries[i]; 4600 if (new->entries[j].threshold < usage) { 4601 /* 4602 * new->current_threshold will not be used 4603 * until rcu_assign_pointer(), so it's safe to increment 4604 * it here. 4605 */ 4606 ++new->current_threshold; 4607 } 4608 j++; 4609 } 4610 4611swap_buffers: 4612 /* Swap primary and spare array */ 4613 thresholds->spare = thresholds->primary; 4614 rcu_assign_pointer(thresholds->primary, new); 4615 4616 /* To be sure that nobody uses thresholds */ 4617 synchronize_rcu(); 4618 4619 mutex_unlock(&memcg->thresholds_lock); 4620} 4621 4622static int mem_cgroup_oom_register_event(struct cgroup *cgrp, 4623 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4624{ 4625 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4626 struct mem_cgroup_eventfd_list *event; 4627 int type = MEMFILE_TYPE(cft->private); 4628 4629 BUG_ON(type != _OOM_TYPE); 4630 event = kmalloc(sizeof(*event), GFP_KERNEL); 4631 if (!event) 4632 return -ENOMEM; 4633 4634 spin_lock(&memcg_oom_lock); 4635 4636 event->eventfd = eventfd; 4637 list_add(&event->list, &memcg->oom_notify); 4638 4639 /* already in OOM ? */ 4640 if (atomic_read(&memcg->under_oom)) 4641 eventfd_signal(eventfd, 1); 4642 spin_unlock(&memcg_oom_lock); 4643 4644 return 0; 4645} 4646 4647static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, 4648 struct cftype *cft, struct eventfd_ctx *eventfd) 4649{ 4650 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4651 struct mem_cgroup_eventfd_list *ev, *tmp; 4652 int type = MEMFILE_TYPE(cft->private); 4653 4654 BUG_ON(type != _OOM_TYPE); 4655 4656 spin_lock(&memcg_oom_lock); 4657 4658 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { 4659 if (ev->eventfd == eventfd) { 4660 list_del(&ev->list); 4661 kfree(ev); 4662 } 4663 } 4664 4665 spin_unlock(&memcg_oom_lock); 4666} 4667 4668static int mem_cgroup_oom_control_read(struct cgroup *cgrp, 4669 struct cftype *cft, struct cgroup_map_cb *cb) 4670{ 4671 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4672 4673 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable); 4674 4675 if (atomic_read(&mem->under_oom)) 4676 cb->fill(cb, "under_oom", 1); 4677 else 4678 cb->fill(cb, "under_oom", 0); 4679 return 0; 4680} 4681 4682static int mem_cgroup_oom_control_write(struct cgroup *cgrp, 4683 struct cftype *cft, u64 val) 4684{ 4685 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4686 struct mem_cgroup *parent; 4687 4688 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4689 if (!cgrp->parent || !((val == 0) || (val == 1))) 4690 return -EINVAL; 4691 4692 parent = mem_cgroup_from_cont(cgrp->parent); 4693 4694 cgroup_lock(); 4695 /* oom-kill-disable is a flag for subhierarchy. */ 4696 if ((parent->use_hierarchy) || 4697 (mem->use_hierarchy && !list_empty(&cgrp->children))) { 4698 cgroup_unlock(); 4699 return -EINVAL; 4700 } 4701 mem->oom_kill_disable = val; 4702 if (!val) 4703 memcg_oom_recover(mem); 4704 cgroup_unlock(); 4705 return 0; 4706} 4707 4708#ifdef CONFIG_NUMA 4709static const struct file_operations mem_control_numa_stat_file_operations = { 4710 .read = seq_read, 4711 .llseek = seq_lseek, 4712 .release = single_release, 4713}; 4714 4715static int mem_control_numa_stat_open(struct inode *unused, struct file *file) 4716{ 4717 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; 4718 4719 file->f_op = &mem_control_numa_stat_file_operations; 4720 return single_open(file, mem_control_numa_stat_show, cont); 4721} 4722#endif /* CONFIG_NUMA */ 4723 4724static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp, 4725 struct cftype *cft, 4726 struct cgroup_map_cb *cb) 4727{ 4728 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4729 char string[64]; 4730 int i; 4731 4732 for (i = 0; i < NR_SCANSTATS; i++) { 4733 strcpy(string, scanstat_string[i]); 4734 strcat(string, SCANSTAT_WORD_LIMIT); 4735 cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_LIMIT][i]); 4736 } 4737 4738 for (i = 0; i < NR_SCANSTATS; i++) { 4739 strcpy(string, scanstat_string[i]); 4740 strcat(string, SCANSTAT_WORD_SYSTEM); 4741 cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_SYSTEM][i]); 4742 } 4743 4744 for (i = 0; i < NR_SCANSTATS; i++) { 4745 strcpy(string, scanstat_string[i]); 4746 strcat(string, SCANSTAT_WORD_LIMIT); 4747 strcat(string, SCANSTAT_WORD_HIERARCHY); 4748 cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_LIMIT][i]); 4749 } 4750 for (i = 0; i < NR_SCANSTATS; i++) { 4751 strcpy(string, scanstat_string[i]); 4752 strcat(string, SCANSTAT_WORD_SYSTEM); 4753 strcat(string, SCANSTAT_WORD_HIERARCHY); 4754 cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]); 4755 } 4756 return 0; 4757} 4758 4759static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp, 4760 unsigned int event) 4761{ 4762 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4763 4764 spin_lock(&mem->scanstat.lock); 4765 memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats)); 4766 memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats)); 4767 spin_unlock(&mem->scanstat.lock); 4768 return 0; 4769} 4770 4771 4772static struct cftype mem_cgroup_files[] = { 4773 { 4774 .name = "usage_in_bytes", 4775 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4776 .read_u64 = mem_cgroup_read, 4777 .register_event = mem_cgroup_usage_register_event, 4778 .unregister_event = mem_cgroup_usage_unregister_event, 4779 }, 4780 { 4781 .name = "max_usage_in_bytes", 4782 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4783 .trigger = mem_cgroup_reset, 4784 .read_u64 = mem_cgroup_read, 4785 }, 4786 { 4787 .name = "limit_in_bytes", 4788 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4789 .write_string = mem_cgroup_write, 4790 .read_u64 = mem_cgroup_read, 4791 }, 4792 { 4793 .name = "soft_limit_in_bytes", 4794 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4795 .write_string = mem_cgroup_write, 4796 .read_u64 = mem_cgroup_read, 4797 }, 4798 { 4799 .name = "failcnt", 4800 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4801 .trigger = mem_cgroup_reset, 4802 .read_u64 = mem_cgroup_read, 4803 }, 4804 { 4805 .name = "stat", 4806 .read_map = mem_control_stat_show, 4807 }, 4808 { 4809 .name = "force_empty", 4810 .trigger = mem_cgroup_force_empty_write, 4811 }, 4812 { 4813 .name = "use_hierarchy", 4814 .write_u64 = mem_cgroup_hierarchy_write, 4815 .read_u64 = mem_cgroup_hierarchy_read, 4816 }, 4817 { 4818 .name = "swappiness", 4819 .read_u64 = mem_cgroup_swappiness_read, 4820 .write_u64 = mem_cgroup_swappiness_write, 4821 }, 4822 { 4823 .name = "move_charge_at_immigrate", 4824 .read_u64 = mem_cgroup_move_charge_read, 4825 .write_u64 = mem_cgroup_move_charge_write, 4826 }, 4827 { 4828 .name = "oom_control", 4829 .read_map = mem_cgroup_oom_control_read, 4830 .write_u64 = mem_cgroup_oom_control_write, 4831 .register_event = mem_cgroup_oom_register_event, 4832 .unregister_event = mem_cgroup_oom_unregister_event, 4833 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4834 }, 4835#ifdef CONFIG_NUMA 4836 { 4837 .name = "numa_stat", 4838 .open = mem_control_numa_stat_open, 4839 .mode = S_IRUGO, 4840 }, 4841#endif 4842 { 4843 .name = "vmscan_stat", 4844 .read_map = mem_cgroup_vmscan_stat_read, 4845 .trigger = mem_cgroup_reset_vmscan_stat, 4846 }, 4847}; 4848 4849#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4850static struct cftype memsw_cgroup_files[] = { 4851 { 4852 .name = "memsw.usage_in_bytes", 4853 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 4854 .read_u64 = mem_cgroup_read, 4855 .register_event = mem_cgroup_usage_register_event, 4856 .unregister_event = mem_cgroup_usage_unregister_event, 4857 }, 4858 { 4859 .name = "memsw.max_usage_in_bytes", 4860 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 4861 .trigger = mem_cgroup_reset, 4862 .read_u64 = mem_cgroup_read, 4863 }, 4864 { 4865 .name = "memsw.limit_in_bytes", 4866 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 4867 .write_string = mem_cgroup_write, 4868 .read_u64 = mem_cgroup_read, 4869 }, 4870 { 4871 .name = "memsw.failcnt", 4872 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 4873 .trigger = mem_cgroup_reset, 4874 .read_u64 = mem_cgroup_read, 4875 }, 4876}; 4877 4878static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4879{ 4880 if (!do_swap_account) 4881 return 0; 4882 return cgroup_add_files(cont, ss, memsw_cgroup_files, 4883 ARRAY_SIZE(memsw_cgroup_files)); 4884}; 4885#else 4886static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4887{ 4888 return 0; 4889} 4890#endif 4891 4892static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 4893{ 4894 struct mem_cgroup_per_node *pn; 4895 struct mem_cgroup_per_zone *mz; 4896 enum lru_list l; 4897 int zone, tmp = node; 4898 /* 4899 * This routine is called against possible nodes. 4900 * But it's BUG to call kmalloc() against offline node. 4901 * 4902 * TODO: this routine can waste much memory for nodes which will 4903 * never be onlined. It's better to use memory hotplug callback 4904 * function. 4905 */ 4906 if (!node_state(node, N_NORMAL_MEMORY)) 4907 tmp = -1; 4908 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4909 if (!pn) 4910 return 1; 4911 4912 mem->info.nodeinfo[node] = pn; 4913 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4914 mz = &pn->zoneinfo[zone]; 4915 for_each_lru(l) 4916 INIT_LIST_HEAD(&mz->lists[l]); 4917 mz->usage_in_excess = 0; 4918 mz->on_tree = false; 4919 mz->mem = mem; 4920 } 4921 return 0; 4922} 4923 4924static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 4925{ 4926 kfree(mem->info.nodeinfo[node]); 4927} 4928 4929static struct mem_cgroup *mem_cgroup_alloc(void) 4930{ 4931 struct mem_cgroup *mem; 4932 int size = sizeof(struct mem_cgroup); 4933 4934 /* Can be very big if MAX_NUMNODES is very big */ 4935 if (size < PAGE_SIZE) 4936 mem = kzalloc(size, GFP_KERNEL); 4937 else 4938 mem = vzalloc(size); 4939 4940 if (!mem) 4941 return NULL; 4942 4943 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4944 if (!mem->stat) 4945 goto out_free; 4946 spin_lock_init(&mem->pcp_counter_lock); 4947 return mem; 4948 4949out_free: 4950 if (size < PAGE_SIZE) 4951 kfree(mem); 4952 else 4953 vfree(mem); 4954 return NULL; 4955} 4956 4957/* 4958 * At destroying mem_cgroup, references from swap_cgroup can remain. 4959 * (scanning all at force_empty is too costly...) 4960 * 4961 * Instead of clearing all references at force_empty, we remember 4962 * the number of reference from swap_cgroup and free mem_cgroup when 4963 * it goes down to 0. 4964 * 4965 * Removal of cgroup itself succeeds regardless of refs from swap. 4966 */ 4967 4968static void __mem_cgroup_free(struct mem_cgroup *mem) 4969{ 4970 int node; 4971 4972 mem_cgroup_remove_from_trees(mem); 4973 free_css_id(&mem_cgroup_subsys, &mem->css); 4974 4975 for_each_node_state(node, N_POSSIBLE) 4976 free_mem_cgroup_per_zone_info(mem, node); 4977 4978 free_percpu(mem->stat); 4979 if (sizeof(struct mem_cgroup) < PAGE_SIZE) 4980 kfree(mem); 4981 else 4982 vfree(mem); 4983} 4984 4985static void mem_cgroup_get(struct mem_cgroup *mem) 4986{ 4987 atomic_inc(&mem->refcnt); 4988} 4989 4990static void __mem_cgroup_put(struct mem_cgroup *mem, int count) 4991{ 4992 if (atomic_sub_and_test(count, &mem->refcnt)) { 4993 struct mem_cgroup *parent = parent_mem_cgroup(mem); 4994 __mem_cgroup_free(mem); 4995 if (parent) 4996 mem_cgroup_put(parent); 4997 } 4998} 4999 5000static void mem_cgroup_put(struct mem_cgroup *mem) 5001{ 5002 __mem_cgroup_put(mem, 1); 5003} 5004 5005/* 5006 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 5007 */ 5008static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) 5009{ 5010 if (!mem->res.parent) 5011 return NULL; 5012 return mem_cgroup_from_res_counter(mem->res.parent, res); 5013} 5014 5015#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 5016static void __init enable_swap_cgroup(void) 5017{ 5018 if (!mem_cgroup_disabled() && really_do_swap_account) 5019 do_swap_account = 1; 5020} 5021#else 5022static void __init enable_swap_cgroup(void) 5023{ 5024} 5025#endif 5026 5027static int mem_cgroup_soft_limit_tree_init(void) 5028{ 5029 struct mem_cgroup_tree_per_node *rtpn; 5030 struct mem_cgroup_tree_per_zone *rtpz; 5031 int tmp, node, zone; 5032 5033 for_each_node_state(node, N_POSSIBLE) { 5034 tmp = node; 5035 if (!node_state(node, N_NORMAL_MEMORY)) 5036 tmp = -1; 5037 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 5038 if (!rtpn) 5039 return 1; 5040 5041 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5042 5043 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 5044 rtpz = &rtpn->rb_tree_per_zone[zone]; 5045 rtpz->rb_root = RB_ROOT; 5046 spin_lock_init(&rtpz->lock); 5047 } 5048 } 5049 return 0; 5050} 5051 5052static struct cgroup_subsys_state * __ref 5053mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 5054{ 5055 struct mem_cgroup *mem, *parent; 5056 long error = -ENOMEM; 5057 int node; 5058 5059 mem = mem_cgroup_alloc(); 5060 if (!mem) 5061 return ERR_PTR(error); 5062 5063 for_each_node_state(node, N_POSSIBLE) 5064 if (alloc_mem_cgroup_per_zone_info(mem, node)) 5065 goto free_out; 5066 5067 /* root ? */ 5068 if (cont->parent == NULL) { 5069 int cpu; 5070 enable_swap_cgroup(); 5071 parent = NULL; 5072 root_mem_cgroup = mem; 5073 if (mem_cgroup_soft_limit_tree_init()) 5074 goto free_out; 5075 for_each_possible_cpu(cpu) { 5076 struct memcg_stock_pcp *stock = 5077 &per_cpu(memcg_stock, cpu); 5078 INIT_WORK(&stock->work, drain_local_stock); 5079 } 5080 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5081 } else { 5082 parent = mem_cgroup_from_cont(cont->parent); 5083 mem->use_hierarchy = parent->use_hierarchy; 5084 mem->oom_kill_disable = parent->oom_kill_disable; 5085 } 5086 5087 if (parent && parent->use_hierarchy) { 5088 res_counter_init(&mem->res, &parent->res); 5089 res_counter_init(&mem->memsw, &parent->memsw); 5090 /* 5091 * We increment refcnt of the parent to ensure that we can 5092 * safely access it on res_counter_charge/uncharge. 5093 * This refcnt will be decremented when freeing this 5094 * mem_cgroup(see mem_cgroup_put). 5095 */ 5096 mem_cgroup_get(parent); 5097 } else { 5098 res_counter_init(&mem->res, NULL); 5099 res_counter_init(&mem->memsw, NULL); 5100 } 5101 mem->last_scanned_child = 0; 5102 mem->last_scanned_node = MAX_NUMNODES; 5103 INIT_LIST_HEAD(&mem->oom_notify); 5104 5105 if (parent) 5106 mem->swappiness = mem_cgroup_swappiness(parent); 5107 atomic_set(&mem->refcnt, 1); 5108 mem->move_charge_at_immigrate = 0; 5109 mutex_init(&mem->thresholds_lock); 5110 spin_lock_init(&mem->scanstat.lock); 5111 return &mem->css; 5112free_out: 5113 __mem_cgroup_free(mem); 5114 root_mem_cgroup = NULL; 5115 return ERR_PTR(error); 5116} 5117 5118static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 5119 struct cgroup *cont) 5120{ 5121 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 5122 5123 return mem_cgroup_force_empty(mem, false); 5124} 5125 5126static void mem_cgroup_destroy(struct cgroup_subsys *ss, 5127 struct cgroup *cont) 5128{ 5129 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 5130 5131 mem_cgroup_put(mem); 5132} 5133 5134static int mem_cgroup_populate(struct cgroup_subsys *ss, 5135 struct cgroup *cont) 5136{ 5137 int ret; 5138 5139 ret = cgroup_add_files(cont, ss, mem_cgroup_files, 5140 ARRAY_SIZE(mem_cgroup_files)); 5141 5142 if (!ret) 5143 ret = register_memsw_files(cont, ss); 5144 return ret; 5145} 5146 5147#ifdef CONFIG_MMU 5148/* Handlers for move charge at task migration. */ 5149#define PRECHARGE_COUNT_AT_ONCE 256 5150static int mem_cgroup_do_precharge(unsigned long count) 5151{ 5152 int ret = 0; 5153 int batch_count = PRECHARGE_COUNT_AT_ONCE; 5154 struct mem_cgroup *mem = mc.to; 5155 5156 if (mem_cgroup_is_root(mem)) { 5157 mc.precharge += count; 5158 /* we don't need css_get for root */ 5159 return ret; 5160 } 5161 /* try to charge at once */ 5162 if (count > 1) { 5163 struct res_counter *dummy; 5164 /* 5165 * "mem" cannot be under rmdir() because we've already checked 5166 * by cgroup_lock_live_cgroup() that it is not removed and we 5167 * are still under the same cgroup_mutex. So we can postpone 5168 * css_get(). 5169 */ 5170 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) 5171 goto one_by_one; 5172 if (do_swap_account && res_counter_charge(&mem->memsw, 5173 PAGE_SIZE * count, &dummy)) { 5174 res_counter_uncharge(&mem->res, PAGE_SIZE * count); 5175 goto one_by_one; 5176 } 5177 mc.precharge += count; 5178 return ret; 5179 } 5180one_by_one: 5181 /* fall back to one by one charge */ 5182 while (count--) { 5183 if (signal_pending(current)) { 5184 ret = -EINTR; 5185 break; 5186 } 5187 if (!batch_count--) { 5188 batch_count = PRECHARGE_COUNT_AT_ONCE; 5189 cond_resched(); 5190 } 5191 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false); 5192 if (ret || !mem) 5193 /* mem_cgroup_clear_mc() will do uncharge later */ 5194 return -ENOMEM; 5195 mc.precharge++; 5196 } 5197 return ret; 5198} 5199 5200/** 5201 * is_target_pte_for_mc - check a pte whether it is valid for move charge 5202 * @vma: the vma the pte to be checked belongs 5203 * @addr: the address corresponding to the pte to be checked 5204 * @ptent: the pte to be checked 5205 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5206 * 5207 * Returns 5208 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5209 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5210 * move charge. if @target is not NULL, the page is stored in target->page 5211 * with extra refcnt got(Callers should handle it). 5212 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5213 * target for charge migration. if @target is not NULL, the entry is stored 5214 * in target->ent. 5215 * 5216 * Called with pte lock held. 5217 */ 5218union mc_target { 5219 struct page *page; 5220 swp_entry_t ent; 5221}; 5222 5223enum mc_target_type { 5224 MC_TARGET_NONE, /* not used */ 5225 MC_TARGET_PAGE, 5226 MC_TARGET_SWAP, 5227}; 5228 5229static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5230 unsigned long addr, pte_t ptent) 5231{ 5232 struct page *page = vm_normal_page(vma, addr, ptent); 5233 5234 if (!page || !page_mapped(page)) 5235 return NULL; 5236 if (PageAnon(page)) { 5237 /* we don't move shared anon */ 5238 if (!move_anon() || page_mapcount(page) > 2) 5239 return NULL; 5240 } else if (!move_file()) 5241 /* we ignore mapcount for file pages */ 5242 return NULL; 5243 if (!get_page_unless_zero(page)) 5244 return NULL; 5245 5246 return page; 5247} 5248 5249static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5250 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5251{ 5252 int usage_count; 5253 struct page *page = NULL; 5254 swp_entry_t ent = pte_to_swp_entry(ptent); 5255 5256 if (!move_anon() || non_swap_entry(ent)) 5257 return NULL; 5258 usage_count = mem_cgroup_count_swap_user(ent, &page); 5259 if (usage_count > 1) { /* we don't move shared anon */ 5260 if (page) 5261 put_page(page); 5262 return NULL; 5263 } 5264 if (do_swap_account) 5265 entry->val = ent.val; 5266 5267 return page; 5268} 5269 5270static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5271 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5272{ 5273 struct page *page = NULL; 5274 struct inode *inode; 5275 struct address_space *mapping; 5276 pgoff_t pgoff; 5277 5278 if (!vma->vm_file) /* anonymous vma */ 5279 return NULL; 5280 if (!move_file()) 5281 return NULL; 5282 5283 inode = vma->vm_file->f_path.dentry->d_inode; 5284 mapping = vma->vm_file->f_mapping; 5285 if (pte_none(ptent)) 5286 pgoff = linear_page_index(vma, addr); 5287 else /* pte_file(ptent) is true */ 5288 pgoff = pte_to_pgoff(ptent); 5289 5290 /* page is moved even if it's not RSS of this task(page-faulted). */ 5291 page = find_get_page(mapping, pgoff); 5292 5293#ifdef CONFIG_SWAP 5294 /* shmem/tmpfs may report page out on swap: account for that too. */ 5295 if (radix_tree_exceptional_entry(page)) { 5296 swp_entry_t swap = radix_to_swp_entry(page); 5297 if (do_swap_account) 5298 *entry = swap; 5299 page = find_get_page(&swapper_space, swap.val); 5300 } 5301#endif 5302 return page; 5303} 5304 5305static int is_target_pte_for_mc(struct vm_area_struct *vma, 5306 unsigned long addr, pte_t ptent, union mc_target *target) 5307{ 5308 struct page *page = NULL; 5309 struct page_cgroup *pc; 5310 int ret = 0; 5311 swp_entry_t ent = { .val = 0 }; 5312 5313 if (pte_present(ptent)) 5314 page = mc_handle_present_pte(vma, addr, ptent); 5315 else if (is_swap_pte(ptent)) 5316 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 5317 else if (pte_none(ptent) || pte_file(ptent)) 5318 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5319 5320 if (!page && !ent.val) 5321 return 0; 5322 if (page) { 5323 pc = lookup_page_cgroup(page); 5324 /* 5325 * Do only loose check w/o page_cgroup lock. 5326 * mem_cgroup_move_account() checks the pc is valid or not under 5327 * the lock. 5328 */ 5329 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 5330 ret = MC_TARGET_PAGE; 5331 if (target) 5332 target->page = page; 5333 } 5334 if (!ret || !target) 5335 put_page(page); 5336 } 5337 /* There is a swap entry and a page doesn't exist or isn't charged */ 5338 if (ent.val && !ret && 5339 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { 5340 ret = MC_TARGET_SWAP; 5341 if (target) 5342 target->ent = ent; 5343 } 5344 return ret; 5345} 5346 5347static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5348 unsigned long addr, unsigned long end, 5349 struct mm_walk *walk) 5350{ 5351 struct vm_area_struct *vma = walk->private; 5352 pte_t *pte; 5353 spinlock_t *ptl; 5354 5355 split_huge_page_pmd(walk->mm, pmd); 5356 5357 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5358 for (; addr != end; pte++, addr += PAGE_SIZE) 5359 if (is_target_pte_for_mc(vma, addr, *pte, NULL)) 5360 mc.precharge++; /* increment precharge temporarily */ 5361 pte_unmap_unlock(pte - 1, ptl); 5362 cond_resched(); 5363 5364 return 0; 5365} 5366 5367static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5368{ 5369 unsigned long precharge; 5370 struct vm_area_struct *vma; 5371 5372 down_read(&mm->mmap_sem); 5373 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5374 struct mm_walk mem_cgroup_count_precharge_walk = { 5375 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5376 .mm = mm, 5377 .private = vma, 5378 }; 5379 if (is_vm_hugetlb_page(vma)) 5380 continue; 5381 walk_page_range(vma->vm_start, vma->vm_end, 5382 &mem_cgroup_count_precharge_walk); 5383 } 5384 up_read(&mm->mmap_sem); 5385 5386 precharge = mc.precharge; 5387 mc.precharge = 0; 5388 5389 return precharge; 5390} 5391 5392static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5393{ 5394 unsigned long precharge = mem_cgroup_count_precharge(mm); 5395 5396 VM_BUG_ON(mc.moving_task); 5397 mc.moving_task = current; 5398 return mem_cgroup_do_precharge(precharge); 5399} 5400 5401/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5402static void __mem_cgroup_clear_mc(void) 5403{ 5404 struct mem_cgroup *from = mc.from; 5405 struct mem_cgroup *to = mc.to; 5406 5407 /* we must uncharge all the leftover precharges from mc.to */ 5408 if (mc.precharge) { 5409 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 5410 mc.precharge = 0; 5411 } 5412 /* 5413 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5414 * we must uncharge here. 5415 */ 5416 if (mc.moved_charge) { 5417 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 5418 mc.moved_charge = 0; 5419 } 5420 /* we must fixup refcnts and charges */ 5421 if (mc.moved_swap) { 5422 /* uncharge swap account from the old cgroup */ 5423 if (!mem_cgroup_is_root(mc.from)) 5424 res_counter_uncharge(&mc.from->memsw, 5425 PAGE_SIZE * mc.moved_swap); 5426 __mem_cgroup_put(mc.from, mc.moved_swap); 5427 5428 if (!mem_cgroup_is_root(mc.to)) { 5429 /* 5430 * we charged both to->res and to->memsw, so we should 5431 * uncharge to->res. 5432 */ 5433 res_counter_uncharge(&mc.to->res, 5434 PAGE_SIZE * mc.moved_swap); 5435 } 5436 /* we've already done mem_cgroup_get(mc.to) */ 5437 mc.moved_swap = 0; 5438 } 5439 memcg_oom_recover(from); 5440 memcg_oom_recover(to); 5441 wake_up_all(&mc.waitq); 5442} 5443 5444static void mem_cgroup_clear_mc(void) 5445{ 5446 struct mem_cgroup *from = mc.from; 5447 5448 /* 5449 * we must clear moving_task before waking up waiters at the end of 5450 * task migration. 5451 */ 5452 mc.moving_task = NULL; 5453 __mem_cgroup_clear_mc(); 5454 spin_lock(&mc.lock); 5455 mc.from = NULL; 5456 mc.to = NULL; 5457 spin_unlock(&mc.lock); 5458 mem_cgroup_end_move(from); 5459} 5460 5461static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5462 struct cgroup *cgroup, 5463 struct task_struct *p) 5464{ 5465 int ret = 0; 5466 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); 5467 5468 if (mem->move_charge_at_immigrate) { 5469 struct mm_struct *mm; 5470 struct mem_cgroup *from = mem_cgroup_from_task(p); 5471 5472 VM_BUG_ON(from == mem); 5473 5474 mm = get_task_mm(p); 5475 if (!mm) 5476 return 0; 5477 /* We move charges only when we move a owner of the mm */ 5478 if (mm->owner == p) { 5479 VM_BUG_ON(mc.from); 5480 VM_BUG_ON(mc.to); 5481 VM_BUG_ON(mc.precharge); 5482 VM_BUG_ON(mc.moved_charge); 5483 VM_BUG_ON(mc.moved_swap); 5484 mem_cgroup_start_move(from); 5485 spin_lock(&mc.lock); 5486 mc.from = from; 5487 mc.to = mem; 5488 spin_unlock(&mc.lock); 5489 /* We set mc.moving_task later */ 5490 5491 ret = mem_cgroup_precharge_mc(mm); 5492 if (ret) 5493 mem_cgroup_clear_mc(); 5494 } 5495 mmput(mm); 5496 } 5497 return ret; 5498} 5499 5500static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5501 struct cgroup *cgroup, 5502 struct task_struct *p) 5503{ 5504 mem_cgroup_clear_mc(); 5505} 5506 5507static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5508 unsigned long addr, unsigned long end, 5509 struct mm_walk *walk) 5510{ 5511 int ret = 0; 5512 struct vm_area_struct *vma = walk->private; 5513 pte_t *pte; 5514 spinlock_t *ptl; 5515 5516 split_huge_page_pmd(walk->mm, pmd); 5517retry: 5518 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5519 for (; addr != end; addr += PAGE_SIZE) { 5520 pte_t ptent = *(pte++); 5521 union mc_target target; 5522 int type; 5523 struct page *page; 5524 struct page_cgroup *pc; 5525 swp_entry_t ent; 5526 5527 if (!mc.precharge) 5528 break; 5529 5530 type = is_target_pte_for_mc(vma, addr, ptent, &target); 5531 switch (type) { 5532 case MC_TARGET_PAGE: 5533 page = target.page; 5534 if (isolate_lru_page(page)) 5535 goto put; 5536 pc = lookup_page_cgroup(page); 5537 if (!mem_cgroup_move_account(page, 1, pc, 5538 mc.from, mc.to, false)) { 5539 mc.precharge--; 5540 /* we uncharge from mc.from later. */ 5541 mc.moved_charge++; 5542 } 5543 putback_lru_page(page); 5544put: /* is_target_pte_for_mc() gets the page */ 5545 put_page(page); 5546 break; 5547 case MC_TARGET_SWAP: 5548 ent = target.ent; 5549 if (!mem_cgroup_move_swap_account(ent, 5550 mc.from, mc.to, false)) { 5551 mc.precharge--; 5552 /* we fixup refcnts and charges later. */ 5553 mc.moved_swap++; 5554 } 5555 break; 5556 default: 5557 break; 5558 } 5559 } 5560 pte_unmap_unlock(pte - 1, ptl); 5561 cond_resched(); 5562 5563 if (addr != end) { 5564 /* 5565 * We have consumed all precharges we got in can_attach(). 5566 * We try charge one by one, but don't do any additional 5567 * charges to mc.to if we have failed in charge once in attach() 5568 * phase. 5569 */ 5570 ret = mem_cgroup_do_precharge(1); 5571 if (!ret) 5572 goto retry; 5573 } 5574 5575 return ret; 5576} 5577 5578static void mem_cgroup_move_charge(struct mm_struct *mm) 5579{ 5580 struct vm_area_struct *vma; 5581 5582 lru_add_drain_all(); 5583retry: 5584 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5585 /* 5586 * Someone who are holding the mmap_sem might be waiting in 5587 * waitq. So we cancel all extra charges, wake up all waiters, 5588 * and retry. Because we cancel precharges, we might not be able 5589 * to move enough charges, but moving charge is a best-effort 5590 * feature anyway, so it wouldn't be a big problem. 5591 */ 5592 __mem_cgroup_clear_mc(); 5593 cond_resched(); 5594 goto retry; 5595 } 5596 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5597 int ret; 5598 struct mm_walk mem_cgroup_move_charge_walk = { 5599 .pmd_entry = mem_cgroup_move_charge_pte_range, 5600 .mm = mm, 5601 .private = vma, 5602 }; 5603 if (is_vm_hugetlb_page(vma)) 5604 continue; 5605 ret = walk_page_range(vma->vm_start, vma->vm_end, 5606 &mem_cgroup_move_charge_walk); 5607 if (ret) 5608 /* 5609 * means we have consumed all precharges and failed in 5610 * doing additional charge. Just abandon here. 5611 */ 5612 break; 5613 } 5614 up_read(&mm->mmap_sem); 5615} 5616 5617static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5618 struct cgroup *cont, 5619 struct cgroup *old_cont, 5620 struct task_struct *p) 5621{ 5622 struct mm_struct *mm = get_task_mm(p); 5623 5624 if (mm) { 5625 if (mc.to) 5626 mem_cgroup_move_charge(mm); 5627 put_swap_token(mm); 5628 mmput(mm); 5629 } 5630 if (mc.to) 5631 mem_cgroup_clear_mc(); 5632} 5633#else /* !CONFIG_MMU */ 5634static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5635 struct cgroup *cgroup, 5636 struct task_struct *p) 5637{ 5638 return 0; 5639} 5640static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5641 struct cgroup *cgroup, 5642 struct task_struct *p) 5643{ 5644} 5645static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5646 struct cgroup *cont, 5647 struct cgroup *old_cont, 5648 struct task_struct *p) 5649{ 5650} 5651#endif 5652 5653struct cgroup_subsys mem_cgroup_subsys = { 5654 .name = "memory", 5655 .subsys_id = mem_cgroup_subsys_id, 5656 .create = mem_cgroup_create, 5657 .pre_destroy = mem_cgroup_pre_destroy, 5658 .destroy = mem_cgroup_destroy, 5659 .populate = mem_cgroup_populate, 5660 .can_attach = mem_cgroup_can_attach, 5661 .cancel_attach = mem_cgroup_cancel_attach, 5662 .attach = mem_cgroup_move_task, 5663 .early_init = 0, 5664 .use_id = 1, 5665}; 5666 5667#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 5668static int __init enable_swap_account(char *s) 5669{ 5670 /* consider enabled if no parameter or 1 is given */ 5671 if (!strcmp(s, "1")) 5672 really_do_swap_account = 1; 5673 else if (!strcmp(s, "0")) 5674 really_do_swap_account = 0; 5675 return 1; 5676} 5677__setup("swapaccount=", enable_swap_account); 5678 5679#endif 5680