memcontrol.c revision de077d222d5ca6108cab119a09593344c12100ab
1/* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 */ 23 24#include <linux/res_counter.h> 25#include <linux/memcontrol.h> 26#include <linux/cgroup.h> 27#include <linux/mm.h> 28#include <linux/hugetlb.h> 29#include <linux/pagemap.h> 30#include <linux/smp.h> 31#include <linux/page-flags.h> 32#include <linux/backing-dev.h> 33#include <linux/bit_spinlock.h> 34#include <linux/rcupdate.h> 35#include <linux/limits.h> 36#include <linux/export.h> 37#include <linux/mutex.h> 38#include <linux/rbtree.h> 39#include <linux/slab.h> 40#include <linux/swap.h> 41#include <linux/swapops.h> 42#include <linux/spinlock.h> 43#include <linux/eventfd.h> 44#include <linux/sort.h> 45#include <linux/fs.h> 46#include <linux/seq_file.h> 47#include <linux/vmalloc.h> 48#include <linux/mm_inline.h> 49#include <linux/page_cgroup.h> 50#include <linux/cpu.h> 51#include <linux/oom.h> 52#include "internal.h" 53#include <net/sock.h> 54#include <net/tcp_memcontrol.h> 55 56#include <asm/uaccess.h> 57 58#include <trace/events/vmscan.h> 59 60struct cgroup_subsys mem_cgroup_subsys __read_mostly; 61#define MEM_CGROUP_RECLAIM_RETRIES 5 62struct mem_cgroup *root_mem_cgroup __read_mostly; 63 64#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 65/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 66int do_swap_account __read_mostly; 67 68/* for remember boot option*/ 69#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED 70static int really_do_swap_account __initdata = 1; 71#else 72static int really_do_swap_account __initdata = 0; 73#endif 74 75#else 76#define do_swap_account (0) 77#endif 78 79 80/* 81 * Statistics for memory cgroup. 82 */ 83enum mem_cgroup_stat_index { 84 /* 85 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 86 */ 87 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 88 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 89 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 90 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 91 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ 92 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ 93 MEM_CGROUP_STAT_NSTATS, 94}; 95 96enum mem_cgroup_events_index { 97 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 98 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 99 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ 100 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 101 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 102 MEM_CGROUP_EVENTS_NSTATS, 103}; 104/* 105 * Per memcg event counter is incremented at every pagein/pageout. With THP, 106 * it will be incremated by the number of pages. This counter is used for 107 * for trigger some periodic events. This is straightforward and better 108 * than using jiffies etc. to handle periodic memcg event. 109 */ 110enum mem_cgroup_events_target { 111 MEM_CGROUP_TARGET_THRESH, 112 MEM_CGROUP_TARGET_SOFTLIMIT, 113 MEM_CGROUP_TARGET_NUMAINFO, 114 MEM_CGROUP_NTARGETS, 115}; 116#define THRESHOLDS_EVENTS_TARGET (128) 117#define SOFTLIMIT_EVENTS_TARGET (1024) 118#define NUMAINFO_EVENTS_TARGET (1024) 119 120struct mem_cgroup_stat_cpu { 121 long count[MEM_CGROUP_STAT_NSTATS]; 122 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 123 unsigned long targets[MEM_CGROUP_NTARGETS]; 124}; 125 126struct mem_cgroup_reclaim_iter { 127 /* css_id of the last scanned hierarchy member */ 128 int position; 129 /* scan generation, increased every round-trip */ 130 unsigned int generation; 131}; 132 133/* 134 * per-zone information in memory controller. 135 */ 136struct mem_cgroup_per_zone { 137 struct lruvec lruvec; 138 unsigned long count[NR_LRU_LISTS]; 139 140 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 141 142 struct zone_reclaim_stat reclaim_stat; 143 struct rb_node tree_node; /* RB tree node */ 144 unsigned long long usage_in_excess;/* Set to the value by which */ 145 /* the soft limit is exceeded*/ 146 bool on_tree; 147 struct mem_cgroup *mem; /* Back pointer, we cannot */ 148 /* use container_of */ 149}; 150/* Macro for accessing counter */ 151#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 152 153struct mem_cgroup_per_node { 154 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 155}; 156 157struct mem_cgroup_lru_info { 158 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; 159}; 160 161/* 162 * Cgroups above their limits are maintained in a RB-Tree, independent of 163 * their hierarchy representation 164 */ 165 166struct mem_cgroup_tree_per_zone { 167 struct rb_root rb_root; 168 spinlock_t lock; 169}; 170 171struct mem_cgroup_tree_per_node { 172 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 173}; 174 175struct mem_cgroup_tree { 176 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 177}; 178 179static struct mem_cgroup_tree soft_limit_tree __read_mostly; 180 181struct mem_cgroup_threshold { 182 struct eventfd_ctx *eventfd; 183 u64 threshold; 184}; 185 186/* For threshold */ 187struct mem_cgroup_threshold_ary { 188 /* An array index points to threshold just below usage. */ 189 int current_threshold; 190 /* Size of entries[] */ 191 unsigned int size; 192 /* Array of thresholds */ 193 struct mem_cgroup_threshold entries[0]; 194}; 195 196struct mem_cgroup_thresholds { 197 /* Primary thresholds array */ 198 struct mem_cgroup_threshold_ary *primary; 199 /* 200 * Spare threshold array. 201 * This is needed to make mem_cgroup_unregister_event() "never fail". 202 * It must be able to store at least primary->size - 1 entries. 203 */ 204 struct mem_cgroup_threshold_ary *spare; 205}; 206 207/* for OOM */ 208struct mem_cgroup_eventfd_list { 209 struct list_head list; 210 struct eventfd_ctx *eventfd; 211}; 212 213static void mem_cgroup_threshold(struct mem_cgroup *memcg); 214static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 215 216/* 217 * The memory controller data structure. The memory controller controls both 218 * page cache and RSS per cgroup. We would eventually like to provide 219 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 220 * to help the administrator determine what knobs to tune. 221 * 222 * TODO: Add a water mark for the memory controller. Reclaim will begin when 223 * we hit the water mark. May be even add a low water mark, such that 224 * no reclaim occurs from a cgroup at it's low water mark, this is 225 * a feature that will be implemented much later in the future. 226 */ 227struct mem_cgroup { 228 struct cgroup_subsys_state css; 229 /* 230 * the counter to account for memory usage 231 */ 232 struct res_counter res; 233 /* 234 * the counter to account for mem+swap usage. 235 */ 236 struct res_counter memsw; 237 /* 238 * Per cgroup active and inactive list, similar to the 239 * per zone LRU lists. 240 */ 241 struct mem_cgroup_lru_info info; 242 int last_scanned_node; 243#if MAX_NUMNODES > 1 244 nodemask_t scan_nodes; 245 atomic_t numainfo_events; 246 atomic_t numainfo_updating; 247#endif 248 /* 249 * Should the accounting and control be hierarchical, per subtree? 250 */ 251 bool use_hierarchy; 252 253 bool oom_lock; 254 atomic_t under_oom; 255 256 atomic_t refcnt; 257 258 int swappiness; 259 /* OOM-Killer disable */ 260 int oom_kill_disable; 261 262 /* set when res.limit == memsw.limit */ 263 bool memsw_is_minimum; 264 265 /* protect arrays of thresholds */ 266 struct mutex thresholds_lock; 267 268 /* thresholds for memory usage. RCU-protected */ 269 struct mem_cgroup_thresholds thresholds; 270 271 /* thresholds for mem+swap usage. RCU-protected */ 272 struct mem_cgroup_thresholds memsw_thresholds; 273 274 /* For oom notifier event fd */ 275 struct list_head oom_notify; 276 277 /* 278 * Should we move charges of a task when a task is moved into this 279 * mem_cgroup ? And what type of charges should we move ? 280 */ 281 unsigned long move_charge_at_immigrate; 282 /* 283 * percpu counter. 284 */ 285 struct mem_cgroup_stat_cpu *stat; 286 /* 287 * used when a cpu is offlined or other synchronizations 288 * See mem_cgroup_read_stat(). 289 */ 290 struct mem_cgroup_stat_cpu nocpu_base; 291 spinlock_t pcp_counter_lock; 292 293#ifdef CONFIG_INET 294 struct tcp_memcontrol tcp_mem; 295#endif 296}; 297 298/* Stuffs for move charges at task migration. */ 299/* 300 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a 301 * left-shifted bitmap of these types. 302 */ 303enum move_type { 304 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 305 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 306 NR_MOVE_TYPE, 307}; 308 309/* "mc" and its members are protected by cgroup_mutex */ 310static struct move_charge_struct { 311 spinlock_t lock; /* for from, to */ 312 struct mem_cgroup *from; 313 struct mem_cgroup *to; 314 unsigned long precharge; 315 unsigned long moved_charge; 316 unsigned long moved_swap; 317 struct task_struct *moving_task; /* a task moving charges */ 318 wait_queue_head_t waitq; /* a waitq for other context */ 319} mc = { 320 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 321 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 322}; 323 324static bool move_anon(void) 325{ 326 return test_bit(MOVE_CHARGE_TYPE_ANON, 327 &mc.to->move_charge_at_immigrate); 328} 329 330static bool move_file(void) 331{ 332 return test_bit(MOVE_CHARGE_TYPE_FILE, 333 &mc.to->move_charge_at_immigrate); 334} 335 336/* 337 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 338 * limit reclaim to prevent infinite loops, if they ever occur. 339 */ 340#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) 341#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) 342 343enum charge_type { 344 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 345 MEM_CGROUP_CHARGE_TYPE_MAPPED, 346 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 347 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 348 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 349 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 350 NR_CHARGE_TYPE, 351}; 352 353/* for encoding cft->private value on file */ 354#define _MEM (0) 355#define _MEMSWAP (1) 356#define _OOM_TYPE (2) 357#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 358#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 359#define MEMFILE_ATTR(val) ((val) & 0xffff) 360/* Used for OOM nofiier */ 361#define OOM_CONTROL (0) 362 363/* 364 * Reclaim flags for mem_cgroup_hierarchical_reclaim 365 */ 366#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 367#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 368#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 369#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 370 371static void mem_cgroup_get(struct mem_cgroup *memcg); 372static void mem_cgroup_put(struct mem_cgroup *memcg); 373 374/* Writing them here to avoid exposing memcg's inner layout */ 375#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 376#ifdef CONFIG_INET 377#include <net/sock.h> 378#include <net/ip.h> 379 380static bool mem_cgroup_is_root(struct mem_cgroup *memcg); 381void sock_update_memcg(struct sock *sk) 382{ 383 if (static_branch(&memcg_socket_limit_enabled)) { 384 struct mem_cgroup *memcg; 385 386 BUG_ON(!sk->sk_prot->proto_cgroup); 387 388 /* Socket cloning can throw us here with sk_cgrp already 389 * filled. It won't however, necessarily happen from 390 * process context. So the test for root memcg given 391 * the current task's memcg won't help us in this case. 392 * 393 * Respecting the original socket's memcg is a better 394 * decision in this case. 395 */ 396 if (sk->sk_cgrp) { 397 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 398 mem_cgroup_get(sk->sk_cgrp->memcg); 399 return; 400 } 401 402 rcu_read_lock(); 403 memcg = mem_cgroup_from_task(current); 404 if (!mem_cgroup_is_root(memcg)) { 405 mem_cgroup_get(memcg); 406 sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg); 407 } 408 rcu_read_unlock(); 409 } 410} 411EXPORT_SYMBOL(sock_update_memcg); 412 413void sock_release_memcg(struct sock *sk) 414{ 415 if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { 416 struct mem_cgroup *memcg; 417 WARN_ON(!sk->sk_cgrp->memcg); 418 memcg = sk->sk_cgrp->memcg; 419 mem_cgroup_put(memcg); 420 } 421} 422 423struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 424{ 425 if (!memcg || mem_cgroup_is_root(memcg)) 426 return NULL; 427 428 return &memcg->tcp_mem.cg_proto; 429} 430EXPORT_SYMBOL(tcp_proto_cgroup); 431#endif /* CONFIG_INET */ 432#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ 433 434static void drain_all_stock_async(struct mem_cgroup *memcg); 435 436static struct mem_cgroup_per_zone * 437mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) 438{ 439 return &memcg->info.nodeinfo[nid]->zoneinfo[zid]; 440} 441 442struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) 443{ 444 return &memcg->css; 445} 446 447static struct mem_cgroup_per_zone * 448page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page) 449{ 450 int nid = page_to_nid(page); 451 int zid = page_zonenum(page); 452 453 return mem_cgroup_zoneinfo(memcg, nid, zid); 454} 455 456static struct mem_cgroup_tree_per_zone * 457soft_limit_tree_node_zone(int nid, int zid) 458{ 459 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 460} 461 462static struct mem_cgroup_tree_per_zone * 463soft_limit_tree_from_page(struct page *page) 464{ 465 int nid = page_to_nid(page); 466 int zid = page_zonenum(page); 467 468 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 469} 470 471static void 472__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg, 473 struct mem_cgroup_per_zone *mz, 474 struct mem_cgroup_tree_per_zone *mctz, 475 unsigned long long new_usage_in_excess) 476{ 477 struct rb_node **p = &mctz->rb_root.rb_node; 478 struct rb_node *parent = NULL; 479 struct mem_cgroup_per_zone *mz_node; 480 481 if (mz->on_tree) 482 return; 483 484 mz->usage_in_excess = new_usage_in_excess; 485 if (!mz->usage_in_excess) 486 return; 487 while (*p) { 488 parent = *p; 489 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 490 tree_node); 491 if (mz->usage_in_excess < mz_node->usage_in_excess) 492 p = &(*p)->rb_left; 493 /* 494 * We can't avoid mem cgroups that are over their soft 495 * limit by the same amount 496 */ 497 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 498 p = &(*p)->rb_right; 499 } 500 rb_link_node(&mz->tree_node, parent, p); 501 rb_insert_color(&mz->tree_node, &mctz->rb_root); 502 mz->on_tree = true; 503} 504 505static void 506__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 507 struct mem_cgroup_per_zone *mz, 508 struct mem_cgroup_tree_per_zone *mctz) 509{ 510 if (!mz->on_tree) 511 return; 512 rb_erase(&mz->tree_node, &mctz->rb_root); 513 mz->on_tree = false; 514} 515 516static void 517mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 518 struct mem_cgroup_per_zone *mz, 519 struct mem_cgroup_tree_per_zone *mctz) 520{ 521 spin_lock(&mctz->lock); 522 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 523 spin_unlock(&mctz->lock); 524} 525 526 527static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 528{ 529 unsigned long long excess; 530 struct mem_cgroup_per_zone *mz; 531 struct mem_cgroup_tree_per_zone *mctz; 532 int nid = page_to_nid(page); 533 int zid = page_zonenum(page); 534 mctz = soft_limit_tree_from_page(page); 535 536 /* 537 * Necessary to update all ancestors when hierarchy is used. 538 * because their event counter is not touched. 539 */ 540 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 541 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 542 excess = res_counter_soft_limit_excess(&memcg->res); 543 /* 544 * We have to update the tree if mz is on RB-tree or 545 * mem is over its softlimit. 546 */ 547 if (excess || mz->on_tree) { 548 spin_lock(&mctz->lock); 549 /* if on-tree, remove it */ 550 if (mz->on_tree) 551 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 552 /* 553 * Insert again. mz->usage_in_excess will be updated. 554 * If excess is 0, no tree ops. 555 */ 556 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess); 557 spin_unlock(&mctz->lock); 558 } 559 } 560} 561 562static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 563{ 564 int node, zone; 565 struct mem_cgroup_per_zone *mz; 566 struct mem_cgroup_tree_per_zone *mctz; 567 568 for_each_node_state(node, N_POSSIBLE) { 569 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 570 mz = mem_cgroup_zoneinfo(memcg, node, zone); 571 mctz = soft_limit_tree_node_zone(node, zone); 572 mem_cgroup_remove_exceeded(memcg, mz, mctz); 573 } 574 } 575} 576 577static struct mem_cgroup_per_zone * 578__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 579{ 580 struct rb_node *rightmost = NULL; 581 struct mem_cgroup_per_zone *mz; 582 583retry: 584 mz = NULL; 585 rightmost = rb_last(&mctz->rb_root); 586 if (!rightmost) 587 goto done; /* Nothing to reclaim from */ 588 589 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 590 /* 591 * Remove the node now but someone else can add it back, 592 * we will to add it back at the end of reclaim to its correct 593 * position in the tree. 594 */ 595 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 596 if (!res_counter_soft_limit_excess(&mz->mem->res) || 597 !css_tryget(&mz->mem->css)) 598 goto retry; 599done: 600 return mz; 601} 602 603static struct mem_cgroup_per_zone * 604mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 605{ 606 struct mem_cgroup_per_zone *mz; 607 608 spin_lock(&mctz->lock); 609 mz = __mem_cgroup_largest_soft_limit_node(mctz); 610 spin_unlock(&mctz->lock); 611 return mz; 612} 613 614/* 615 * Implementation Note: reading percpu statistics for memcg. 616 * 617 * Both of vmstat[] and percpu_counter has threshold and do periodic 618 * synchronization to implement "quick" read. There are trade-off between 619 * reading cost and precision of value. Then, we may have a chance to implement 620 * a periodic synchronizion of counter in memcg's counter. 621 * 622 * But this _read() function is used for user interface now. The user accounts 623 * memory usage by memory cgroup and he _always_ requires exact value because 624 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 625 * have to visit all online cpus and make sum. So, for now, unnecessary 626 * synchronization is not implemented. (just implemented for cpu hotplug) 627 * 628 * If there are kernel internal actions which can make use of some not-exact 629 * value, and reading all cpu value can be performance bottleneck in some 630 * common workload, threashold and synchonization as vmstat[] should be 631 * implemented. 632 */ 633static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 634 enum mem_cgroup_stat_index idx) 635{ 636 long val = 0; 637 int cpu; 638 639 get_online_cpus(); 640 for_each_online_cpu(cpu) 641 val += per_cpu(memcg->stat->count[idx], cpu); 642#ifdef CONFIG_HOTPLUG_CPU 643 spin_lock(&memcg->pcp_counter_lock); 644 val += memcg->nocpu_base.count[idx]; 645 spin_unlock(&memcg->pcp_counter_lock); 646#endif 647 put_online_cpus(); 648 return val; 649} 650 651static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 652 bool charge) 653{ 654 int val = (charge) ? 1 : -1; 655 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); 656} 657 658static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 659 enum mem_cgroup_events_index idx) 660{ 661 unsigned long val = 0; 662 int cpu; 663 664 for_each_online_cpu(cpu) 665 val += per_cpu(memcg->stat->events[idx], cpu); 666#ifdef CONFIG_HOTPLUG_CPU 667 spin_lock(&memcg->pcp_counter_lock); 668 val += memcg->nocpu_base.events[idx]; 669 spin_unlock(&memcg->pcp_counter_lock); 670#endif 671 return val; 672} 673 674static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 675 bool file, int nr_pages) 676{ 677 preempt_disable(); 678 679 if (file) 680 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 681 nr_pages); 682 else 683 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 684 nr_pages); 685 686 /* pagein of a big page is an event. So, ignore page size */ 687 if (nr_pages > 0) 688 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 689 else { 690 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 691 nr_pages = -nr_pages; /* for event */ 692 } 693 694 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); 695 696 preempt_enable(); 697} 698 699unsigned long 700mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 701 unsigned int lru_mask) 702{ 703 struct mem_cgroup_per_zone *mz; 704 enum lru_list l; 705 unsigned long ret = 0; 706 707 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 708 709 for_each_lru(l) { 710 if (BIT(l) & lru_mask) 711 ret += MEM_CGROUP_ZSTAT(mz, l); 712 } 713 return ret; 714} 715 716static unsigned long 717mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 718 int nid, unsigned int lru_mask) 719{ 720 u64 total = 0; 721 int zid; 722 723 for (zid = 0; zid < MAX_NR_ZONES; zid++) 724 total += mem_cgroup_zone_nr_lru_pages(memcg, 725 nid, zid, lru_mask); 726 727 return total; 728} 729 730static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 731 unsigned int lru_mask) 732{ 733 int nid; 734 u64 total = 0; 735 736 for_each_node_state(nid, N_HIGH_MEMORY) 737 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 738 return total; 739} 740 741static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 742 enum mem_cgroup_events_target target) 743{ 744 unsigned long val, next; 745 746 val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); 747 next = __this_cpu_read(memcg->stat->targets[target]); 748 /* from time_after() in jiffies.h */ 749 if ((long)next - (long)val < 0) { 750 switch (target) { 751 case MEM_CGROUP_TARGET_THRESH: 752 next = val + THRESHOLDS_EVENTS_TARGET; 753 break; 754 case MEM_CGROUP_TARGET_SOFTLIMIT: 755 next = val + SOFTLIMIT_EVENTS_TARGET; 756 break; 757 case MEM_CGROUP_TARGET_NUMAINFO: 758 next = val + NUMAINFO_EVENTS_TARGET; 759 break; 760 default: 761 break; 762 } 763 __this_cpu_write(memcg->stat->targets[target], next); 764 return true; 765 } 766 return false; 767} 768 769/* 770 * Check events in order. 771 * 772 */ 773static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 774{ 775 preempt_disable(); 776 /* threshold event is triggered in finer grain than soft limit */ 777 if (unlikely(mem_cgroup_event_ratelimit(memcg, 778 MEM_CGROUP_TARGET_THRESH))) { 779 bool do_softlimit, do_numainfo; 780 781 do_softlimit = mem_cgroup_event_ratelimit(memcg, 782 MEM_CGROUP_TARGET_SOFTLIMIT); 783#if MAX_NUMNODES > 1 784 do_numainfo = mem_cgroup_event_ratelimit(memcg, 785 MEM_CGROUP_TARGET_NUMAINFO); 786#endif 787 preempt_enable(); 788 789 mem_cgroup_threshold(memcg); 790 if (unlikely(do_softlimit)) 791 mem_cgroup_update_tree(memcg, page); 792#if MAX_NUMNODES > 1 793 if (unlikely(do_numainfo)) 794 atomic_inc(&memcg->numainfo_events); 795#endif 796 } else 797 preempt_enable(); 798} 799 800struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 801{ 802 return container_of(cgroup_subsys_state(cont, 803 mem_cgroup_subsys_id), struct mem_cgroup, 804 css); 805} 806 807struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 808{ 809 /* 810 * mm_update_next_owner() may clear mm->owner to NULL 811 * if it races with swapoff, page migration, etc. 812 * So this can be called with p == NULL. 813 */ 814 if (unlikely(!p)) 815 return NULL; 816 817 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 818 struct mem_cgroup, css); 819} 820 821struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 822{ 823 struct mem_cgroup *memcg = NULL; 824 825 if (!mm) 826 return NULL; 827 /* 828 * Because we have no locks, mm->owner's may be being moved to other 829 * cgroup. We use css_tryget() here even if this looks 830 * pessimistic (rather than adding locks here). 831 */ 832 rcu_read_lock(); 833 do { 834 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 835 if (unlikely(!memcg)) 836 break; 837 } while (!css_tryget(&memcg->css)); 838 rcu_read_unlock(); 839 return memcg; 840} 841 842/** 843 * mem_cgroup_iter - iterate over memory cgroup hierarchy 844 * @root: hierarchy root 845 * @prev: previously returned memcg, NULL on first invocation 846 * @reclaim: cookie for shared reclaim walks, NULL for full walks 847 * 848 * Returns references to children of the hierarchy below @root, or 849 * @root itself, or %NULL after a full round-trip. 850 * 851 * Caller must pass the return value in @prev on subsequent 852 * invocations for reference counting, or use mem_cgroup_iter_break() 853 * to cancel a hierarchy walk before the round-trip is complete. 854 * 855 * Reclaimers can specify a zone and a priority level in @reclaim to 856 * divide up the memcgs in the hierarchy among all concurrent 857 * reclaimers operating on the same zone and priority. 858 */ 859struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 860 struct mem_cgroup *prev, 861 struct mem_cgroup_reclaim_cookie *reclaim) 862{ 863 struct mem_cgroup *memcg = NULL; 864 int id = 0; 865 866 if (mem_cgroup_disabled()) 867 return NULL; 868 869 if (!root) 870 root = root_mem_cgroup; 871 872 if (prev && !reclaim) 873 id = css_id(&prev->css); 874 875 if (prev && prev != root) 876 css_put(&prev->css); 877 878 if (!root->use_hierarchy && root != root_mem_cgroup) { 879 if (prev) 880 return NULL; 881 return root; 882 } 883 884 while (!memcg) { 885 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 886 struct cgroup_subsys_state *css; 887 888 if (reclaim) { 889 int nid = zone_to_nid(reclaim->zone); 890 int zid = zone_idx(reclaim->zone); 891 struct mem_cgroup_per_zone *mz; 892 893 mz = mem_cgroup_zoneinfo(root, nid, zid); 894 iter = &mz->reclaim_iter[reclaim->priority]; 895 if (prev && reclaim->generation != iter->generation) 896 return NULL; 897 id = iter->position; 898 } 899 900 rcu_read_lock(); 901 css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id); 902 if (css) { 903 if (css == &root->css || css_tryget(css)) 904 memcg = container_of(css, 905 struct mem_cgroup, css); 906 } else 907 id = 0; 908 rcu_read_unlock(); 909 910 if (reclaim) { 911 iter->position = id; 912 if (!css) 913 iter->generation++; 914 else if (!prev && memcg) 915 reclaim->generation = iter->generation; 916 } 917 918 if (prev && !css) 919 return NULL; 920 } 921 return memcg; 922} 923 924/** 925 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 926 * @root: hierarchy root 927 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 928 */ 929void mem_cgroup_iter_break(struct mem_cgroup *root, 930 struct mem_cgroup *prev) 931{ 932 if (!root) 933 root = root_mem_cgroup; 934 if (prev && prev != root) 935 css_put(&prev->css); 936} 937 938/* 939 * Iteration constructs for visiting all cgroups (under a tree). If 940 * loops are exited prematurely (break), mem_cgroup_iter_break() must 941 * be used for reference counting. 942 */ 943#define for_each_mem_cgroup_tree(iter, root) \ 944 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 945 iter != NULL; \ 946 iter = mem_cgroup_iter(root, iter, NULL)) 947 948#define for_each_mem_cgroup(iter) \ 949 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 950 iter != NULL; \ 951 iter = mem_cgroup_iter(NULL, iter, NULL)) 952 953static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 954{ 955 return (memcg == root_mem_cgroup); 956} 957 958void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 959{ 960 struct mem_cgroup *memcg; 961 962 if (!mm) 963 return; 964 965 rcu_read_lock(); 966 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 967 if (unlikely(!memcg)) 968 goto out; 969 970 switch (idx) { 971 case PGFAULT: 972 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); 973 break; 974 case PGMAJFAULT: 975 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 976 break; 977 default: 978 BUG(); 979 } 980out: 981 rcu_read_unlock(); 982} 983EXPORT_SYMBOL(mem_cgroup_count_vm_event); 984 985/** 986 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 987 * @zone: zone of the wanted lruvec 988 * @mem: memcg of the wanted lruvec 989 * 990 * Returns the lru list vector holding pages for the given @zone and 991 * @mem. This can be the global zone lruvec, if the memory controller 992 * is disabled. 993 */ 994struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 995 struct mem_cgroup *memcg) 996{ 997 struct mem_cgroup_per_zone *mz; 998 999 if (mem_cgroup_disabled()) 1000 return &zone->lruvec; 1001 1002 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); 1003 return &mz->lruvec; 1004} 1005 1006/* 1007 * Following LRU functions are allowed to be used without PCG_LOCK. 1008 * Operations are called by routine of global LRU independently from memcg. 1009 * What we have to take care of here is validness of pc->mem_cgroup. 1010 * 1011 * Changes to pc->mem_cgroup happens when 1012 * 1. charge 1013 * 2. moving account 1014 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 1015 * It is added to LRU before charge. 1016 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 1017 * When moving account, the page is not on LRU. It's isolated. 1018 */ 1019 1020/** 1021 * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec 1022 * @zone: zone of the page 1023 * @page: the page 1024 * @lru: current lru 1025 * 1026 * This function accounts for @page being added to @lru, and returns 1027 * the lruvec for the given @zone and the memcg @page is charged to. 1028 * 1029 * The callsite is then responsible for physically linking the page to 1030 * the returned lruvec->lists[@lru]. 1031 */ 1032struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, 1033 enum lru_list lru) 1034{ 1035 struct mem_cgroup_per_zone *mz; 1036 struct mem_cgroup *memcg; 1037 struct page_cgroup *pc; 1038 1039 if (mem_cgroup_disabled()) 1040 return &zone->lruvec; 1041 1042 pc = lookup_page_cgroup(page); 1043 VM_BUG_ON(PageCgroupAcctLRU(pc)); 1044 /* 1045 * putback: charge: 1046 * SetPageLRU SetPageCgroupUsed 1047 * smp_mb smp_mb 1048 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU 1049 * 1050 * Ensure that one of the two sides adds the page to the memcg 1051 * LRU during a race. 1052 */ 1053 smp_mb(); 1054 /* 1055 * If the page is uncharged, it may be freed soon, but it 1056 * could also be swap cache (readahead, swapoff) that needs to 1057 * be reclaimable in the future. root_mem_cgroup will babysit 1058 * it for the time being. 1059 */ 1060 if (PageCgroupUsed(pc)) { 1061 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1062 smp_rmb(); 1063 memcg = pc->mem_cgroup; 1064 SetPageCgroupAcctLRU(pc); 1065 } else 1066 memcg = root_mem_cgroup; 1067 mz = page_cgroup_zoneinfo(memcg, page); 1068 /* compound_order() is stabilized through lru_lock */ 1069 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1070 return &mz->lruvec; 1071} 1072 1073/** 1074 * mem_cgroup_lru_del_list - account for removing an lru page 1075 * @page: the page 1076 * @lru: target lru 1077 * 1078 * This function accounts for @page being removed from @lru. 1079 * 1080 * The callsite is then responsible for physically unlinking 1081 * @page->lru. 1082 */ 1083void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) 1084{ 1085 struct mem_cgroup_per_zone *mz; 1086 struct mem_cgroup *memcg; 1087 struct page_cgroup *pc; 1088 1089 if (mem_cgroup_disabled()) 1090 return; 1091 1092 pc = lookup_page_cgroup(page); 1093 /* 1094 * root_mem_cgroup babysits uncharged LRU pages, but 1095 * PageCgroupUsed is cleared when the page is about to get 1096 * freed. PageCgroupAcctLRU remembers whether the 1097 * LRU-accounting happened against pc->mem_cgroup or 1098 * root_mem_cgroup. 1099 */ 1100 if (TestClearPageCgroupAcctLRU(pc)) { 1101 VM_BUG_ON(!pc->mem_cgroup); 1102 memcg = pc->mem_cgroup; 1103 } else 1104 memcg = root_mem_cgroup; 1105 mz = page_cgroup_zoneinfo(memcg, page); 1106 /* huge page split is done under lru_lock. so, we have no races. */ 1107 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); 1108} 1109 1110void mem_cgroup_lru_del(struct page *page) 1111{ 1112 mem_cgroup_lru_del_list(page, page_lru(page)); 1113} 1114 1115/** 1116 * mem_cgroup_lru_move_lists - account for moving a page between lrus 1117 * @zone: zone of the page 1118 * @page: the page 1119 * @from: current lru 1120 * @to: target lru 1121 * 1122 * This function accounts for @page being moved between the lrus @from 1123 * and @to, and returns the lruvec for the given @zone and the memcg 1124 * @page is charged to. 1125 * 1126 * The callsite is then responsible for physically relinking 1127 * @page->lru to the returned lruvec->lists[@to]. 1128 */ 1129struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, 1130 struct page *page, 1131 enum lru_list from, 1132 enum lru_list to) 1133{ 1134 /* XXX: Optimize this, especially for @from == @to */ 1135 mem_cgroup_lru_del_list(page, from); 1136 return mem_cgroup_lru_add_list(zone, page, to); 1137} 1138 1139/* 1140 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed 1141 * while it's linked to lru because the page may be reused after it's fully 1142 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again. 1143 * It's done under lock_page and expected that zone->lru_lock isnever held. 1144 */ 1145static void mem_cgroup_lru_del_before_commit(struct page *page) 1146{ 1147 enum lru_list lru; 1148 unsigned long flags; 1149 struct zone *zone = page_zone(page); 1150 struct page_cgroup *pc = lookup_page_cgroup(page); 1151 1152 /* 1153 * Doing this check without taking ->lru_lock seems wrong but this 1154 * is safe. Because if page_cgroup's USED bit is unset, the page 1155 * will not be added to any memcg's LRU. If page_cgroup's USED bit is 1156 * set, the commit after this will fail, anyway. 1157 * This all charge/uncharge is done under some mutual execustion. 1158 * So, we don't need to taking care of changes in USED bit. 1159 */ 1160 if (likely(!PageLRU(page))) 1161 return; 1162 1163 spin_lock_irqsave(&zone->lru_lock, flags); 1164 lru = page_lru(page); 1165 /* 1166 * The uncharged page could still be registered to the LRU of 1167 * the stale pc->mem_cgroup. 1168 * 1169 * As pc->mem_cgroup is about to get overwritten, the old LRU 1170 * accounting needs to be taken care of. Let root_mem_cgroup 1171 * babysit the page until the new memcg is responsible for it. 1172 * 1173 * The PCG_USED bit is guarded by lock_page() as the page is 1174 * swapcache/pagecache. 1175 */ 1176 if (PageLRU(page) && PageCgroupAcctLRU(pc) && !PageCgroupUsed(pc)) { 1177 del_page_from_lru_list(zone, page, lru); 1178 add_page_to_lru_list(zone, page, lru); 1179 } 1180 spin_unlock_irqrestore(&zone->lru_lock, flags); 1181} 1182 1183static void mem_cgroup_lru_add_after_commit(struct page *page) 1184{ 1185 enum lru_list lru; 1186 unsigned long flags; 1187 struct zone *zone = page_zone(page); 1188 struct page_cgroup *pc = lookup_page_cgroup(page); 1189 /* 1190 * putback: charge: 1191 * SetPageLRU SetPageCgroupUsed 1192 * smp_mb smp_mb 1193 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU 1194 * 1195 * Ensure that one of the two sides adds the page to the memcg 1196 * LRU during a race. 1197 */ 1198 smp_mb(); 1199 /* taking care of that the page is added to LRU while we commit it */ 1200 if (likely(!PageLRU(page))) 1201 return; 1202 spin_lock_irqsave(&zone->lru_lock, flags); 1203 lru = page_lru(page); 1204 /* 1205 * If the page is not on the LRU, someone will soon put it 1206 * there. If it is, and also already accounted for on the 1207 * memcg-side, it must be on the right lruvec as setting 1208 * pc->mem_cgroup and PageCgroupUsed is properly ordered. 1209 * Otherwise, root_mem_cgroup has been babysitting the page 1210 * during the charge. Move it to the new memcg now. 1211 */ 1212 if (PageLRU(page) && !PageCgroupAcctLRU(pc)) { 1213 del_page_from_lru_list(zone, page, lru); 1214 add_page_to_lru_list(zone, page, lru); 1215 } 1216 spin_unlock_irqrestore(&zone->lru_lock, flags); 1217} 1218 1219/* 1220 * Checks whether given mem is same or in the root_mem_cgroup's 1221 * hierarchy subtree 1222 */ 1223static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1224 struct mem_cgroup *memcg) 1225{ 1226 if (root_memcg != memcg) { 1227 return (root_memcg->use_hierarchy && 1228 css_is_ancestor(&memcg->css, &root_memcg->css)); 1229 } 1230 1231 return true; 1232} 1233 1234int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) 1235{ 1236 int ret; 1237 struct mem_cgroup *curr = NULL; 1238 struct task_struct *p; 1239 1240 p = find_lock_task_mm(task); 1241 if (p) { 1242 curr = try_get_mem_cgroup_from_mm(p->mm); 1243 task_unlock(p); 1244 } else { 1245 /* 1246 * All threads may have already detached their mm's, but the oom 1247 * killer still needs to detect if they have already been oom 1248 * killed to prevent needlessly killing additional tasks. 1249 */ 1250 task_lock(task); 1251 curr = mem_cgroup_from_task(task); 1252 if (curr) 1253 css_get(&curr->css); 1254 task_unlock(task); 1255 } 1256 if (!curr) 1257 return 0; 1258 /* 1259 * We should check use_hierarchy of "memcg" not "curr". Because checking 1260 * use_hierarchy of "curr" here make this function true if hierarchy is 1261 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup* 1262 * hierarchy(even if use_hierarchy is disabled in "memcg"). 1263 */ 1264 ret = mem_cgroup_same_or_subtree(memcg, curr); 1265 css_put(&curr->css); 1266 return ret; 1267} 1268 1269int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) 1270{ 1271 unsigned long inactive_ratio; 1272 int nid = zone_to_nid(zone); 1273 int zid = zone_idx(zone); 1274 unsigned long inactive; 1275 unsigned long active; 1276 unsigned long gb; 1277 1278 inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1279 BIT(LRU_INACTIVE_ANON)); 1280 active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1281 BIT(LRU_ACTIVE_ANON)); 1282 1283 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1284 if (gb) 1285 inactive_ratio = int_sqrt(10 * gb); 1286 else 1287 inactive_ratio = 1; 1288 1289 return inactive * inactive_ratio < active; 1290} 1291 1292int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) 1293{ 1294 unsigned long active; 1295 unsigned long inactive; 1296 int zid = zone_idx(zone); 1297 int nid = zone_to_nid(zone); 1298 1299 inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1300 BIT(LRU_INACTIVE_FILE)); 1301 active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1302 BIT(LRU_ACTIVE_FILE)); 1303 1304 return (active > inactive); 1305} 1306 1307struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 1308 struct zone *zone) 1309{ 1310 int nid = zone_to_nid(zone); 1311 int zid = zone_idx(zone); 1312 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); 1313 1314 return &mz->reclaim_stat; 1315} 1316 1317struct zone_reclaim_stat * 1318mem_cgroup_get_reclaim_stat_from_page(struct page *page) 1319{ 1320 struct page_cgroup *pc; 1321 struct mem_cgroup_per_zone *mz; 1322 1323 if (mem_cgroup_disabled()) 1324 return NULL; 1325 1326 pc = lookup_page_cgroup(page); 1327 if (!PageCgroupUsed(pc)) 1328 return NULL; 1329 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1330 smp_rmb(); 1331 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1332 return &mz->reclaim_stat; 1333} 1334 1335#define mem_cgroup_from_res_counter(counter, member) \ 1336 container_of(counter, struct mem_cgroup, member) 1337 1338/** 1339 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1340 * @mem: the memory cgroup 1341 * 1342 * Returns the maximum amount of memory @mem can be charged with, in 1343 * pages. 1344 */ 1345static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1346{ 1347 unsigned long long margin; 1348 1349 margin = res_counter_margin(&memcg->res); 1350 if (do_swap_account) 1351 margin = min(margin, res_counter_margin(&memcg->memsw)); 1352 return margin >> PAGE_SHIFT; 1353} 1354 1355int mem_cgroup_swappiness(struct mem_cgroup *memcg) 1356{ 1357 struct cgroup *cgrp = memcg->css.cgroup; 1358 1359 /* root ? */ 1360 if (cgrp->parent == NULL) 1361 return vm_swappiness; 1362 1363 return memcg->swappiness; 1364} 1365 1366static void mem_cgroup_start_move(struct mem_cgroup *memcg) 1367{ 1368 int cpu; 1369 1370 get_online_cpus(); 1371 spin_lock(&memcg->pcp_counter_lock); 1372 for_each_online_cpu(cpu) 1373 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; 1374 memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; 1375 spin_unlock(&memcg->pcp_counter_lock); 1376 put_online_cpus(); 1377 1378 synchronize_rcu(); 1379} 1380 1381static void mem_cgroup_end_move(struct mem_cgroup *memcg) 1382{ 1383 int cpu; 1384 1385 if (!memcg) 1386 return; 1387 get_online_cpus(); 1388 spin_lock(&memcg->pcp_counter_lock); 1389 for_each_online_cpu(cpu) 1390 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; 1391 memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; 1392 spin_unlock(&memcg->pcp_counter_lock); 1393 put_online_cpus(); 1394} 1395/* 1396 * 2 routines for checking "mem" is under move_account() or not. 1397 * 1398 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used 1399 * for avoiding race in accounting. If true, 1400 * pc->mem_cgroup may be overwritten. 1401 * 1402 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1403 * under hierarchy of moving cgroups. This is for 1404 * waiting at hith-memory prressure caused by "move". 1405 */ 1406 1407static bool mem_cgroup_stealed(struct mem_cgroup *memcg) 1408{ 1409 VM_BUG_ON(!rcu_read_lock_held()); 1410 return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0; 1411} 1412 1413static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1414{ 1415 struct mem_cgroup *from; 1416 struct mem_cgroup *to; 1417 bool ret = false; 1418 /* 1419 * Unlike task_move routines, we access mc.to, mc.from not under 1420 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1421 */ 1422 spin_lock(&mc.lock); 1423 from = mc.from; 1424 to = mc.to; 1425 if (!from) 1426 goto unlock; 1427 1428 ret = mem_cgroup_same_or_subtree(memcg, from) 1429 || mem_cgroup_same_or_subtree(memcg, to); 1430unlock: 1431 spin_unlock(&mc.lock); 1432 return ret; 1433} 1434 1435static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1436{ 1437 if (mc.moving_task && current != mc.moving_task) { 1438 if (mem_cgroup_under_move(memcg)) { 1439 DEFINE_WAIT(wait); 1440 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1441 /* moving charge context might have finished. */ 1442 if (mc.moving_task) 1443 schedule(); 1444 finish_wait(&mc.waitq, &wait); 1445 return true; 1446 } 1447 } 1448 return false; 1449} 1450 1451/** 1452 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. 1453 * @memcg: The memory cgroup that went over limit 1454 * @p: Task that is going to be killed 1455 * 1456 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1457 * enabled 1458 */ 1459void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1460{ 1461 struct cgroup *task_cgrp; 1462 struct cgroup *mem_cgrp; 1463 /* 1464 * Need a buffer in BSS, can't rely on allocations. The code relies 1465 * on the assumption that OOM is serialized for memory controller. 1466 * If this assumption is broken, revisit this code. 1467 */ 1468 static char memcg_name[PATH_MAX]; 1469 int ret; 1470 1471 if (!memcg || !p) 1472 return; 1473 1474 1475 rcu_read_lock(); 1476 1477 mem_cgrp = memcg->css.cgroup; 1478 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1479 1480 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1481 if (ret < 0) { 1482 /* 1483 * Unfortunately, we are unable to convert to a useful name 1484 * But we'll still print out the usage information 1485 */ 1486 rcu_read_unlock(); 1487 goto done; 1488 } 1489 rcu_read_unlock(); 1490 1491 printk(KERN_INFO "Task in %s killed", memcg_name); 1492 1493 rcu_read_lock(); 1494 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1495 if (ret < 0) { 1496 rcu_read_unlock(); 1497 goto done; 1498 } 1499 rcu_read_unlock(); 1500 1501 /* 1502 * Continues from above, so we don't need an KERN_ level 1503 */ 1504 printk(KERN_CONT " as a result of limit of %s\n", memcg_name); 1505done: 1506 1507 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n", 1508 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1509 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1510 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1511 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, " 1512 "failcnt %llu\n", 1513 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1514 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1515 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1516} 1517 1518/* 1519 * This function returns the number of memcg under hierarchy tree. Returns 1520 * 1(self count) if no children. 1521 */ 1522static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1523{ 1524 int num = 0; 1525 struct mem_cgroup *iter; 1526 1527 for_each_mem_cgroup_tree(iter, memcg) 1528 num++; 1529 return num; 1530} 1531 1532/* 1533 * Return the memory (and swap, if configured) limit for a memcg. 1534 */ 1535u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1536{ 1537 u64 limit; 1538 u64 memsw; 1539 1540 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1541 limit += total_swap_pages << PAGE_SHIFT; 1542 1543 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1544 /* 1545 * If memsw is finite and limits the amount of swap space available 1546 * to this memcg, return that limit. 1547 */ 1548 return min(limit, memsw); 1549} 1550 1551static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, 1552 gfp_t gfp_mask, 1553 unsigned long flags) 1554{ 1555 unsigned long total = 0; 1556 bool noswap = false; 1557 int loop; 1558 1559 if (flags & MEM_CGROUP_RECLAIM_NOSWAP) 1560 noswap = true; 1561 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum) 1562 noswap = true; 1563 1564 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { 1565 if (loop) 1566 drain_all_stock_async(memcg); 1567 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap); 1568 /* 1569 * Allow limit shrinkers, which are triggered directly 1570 * by userspace, to catch signals and stop reclaim 1571 * after minimal progress, regardless of the margin. 1572 */ 1573 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK)) 1574 break; 1575 if (mem_cgroup_margin(memcg)) 1576 break; 1577 /* 1578 * If nothing was reclaimed after two attempts, there 1579 * may be no reclaimable pages in this hierarchy. 1580 */ 1581 if (loop && !total) 1582 break; 1583 } 1584 return total; 1585} 1586 1587/** 1588 * test_mem_cgroup_node_reclaimable 1589 * @mem: the target memcg 1590 * @nid: the node ID to be checked. 1591 * @noswap : specify true here if the user wants flle only information. 1592 * 1593 * This function returns whether the specified memcg contains any 1594 * reclaimable pages on a node. Returns true if there are any reclaimable 1595 * pages in the node. 1596 */ 1597static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1598 int nid, bool noswap) 1599{ 1600 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1601 return true; 1602 if (noswap || !total_swap_pages) 1603 return false; 1604 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1605 return true; 1606 return false; 1607 1608} 1609#if MAX_NUMNODES > 1 1610 1611/* 1612 * Always updating the nodemask is not very good - even if we have an empty 1613 * list or the wrong list here, we can start from some node and traverse all 1614 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1615 * 1616 */ 1617static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1618{ 1619 int nid; 1620 /* 1621 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1622 * pagein/pageout changes since the last update. 1623 */ 1624 if (!atomic_read(&memcg->numainfo_events)) 1625 return; 1626 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1627 return; 1628 1629 /* make a nodemask where this memcg uses memory from */ 1630 memcg->scan_nodes = node_states[N_HIGH_MEMORY]; 1631 1632 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { 1633 1634 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1635 node_clear(nid, memcg->scan_nodes); 1636 } 1637 1638 atomic_set(&memcg->numainfo_events, 0); 1639 atomic_set(&memcg->numainfo_updating, 0); 1640} 1641 1642/* 1643 * Selecting a node where we start reclaim from. Because what we need is just 1644 * reducing usage counter, start from anywhere is O,K. Considering 1645 * memory reclaim from current node, there are pros. and cons. 1646 * 1647 * Freeing memory from current node means freeing memory from a node which 1648 * we'll use or we've used. So, it may make LRU bad. And if several threads 1649 * hit limits, it will see a contention on a node. But freeing from remote 1650 * node means more costs for memory reclaim because of memory latency. 1651 * 1652 * Now, we use round-robin. Better algorithm is welcomed. 1653 */ 1654int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1655{ 1656 int node; 1657 1658 mem_cgroup_may_update_nodemask(memcg); 1659 node = memcg->last_scanned_node; 1660 1661 node = next_node(node, memcg->scan_nodes); 1662 if (node == MAX_NUMNODES) 1663 node = first_node(memcg->scan_nodes); 1664 /* 1665 * We call this when we hit limit, not when pages are added to LRU. 1666 * No LRU may hold pages because all pages are UNEVICTABLE or 1667 * memcg is too small and all pages are not on LRU. In that case, 1668 * we use curret node. 1669 */ 1670 if (unlikely(node == MAX_NUMNODES)) 1671 node = numa_node_id(); 1672 1673 memcg->last_scanned_node = node; 1674 return node; 1675} 1676 1677/* 1678 * Check all nodes whether it contains reclaimable pages or not. 1679 * For quick scan, we make use of scan_nodes. This will allow us to skip 1680 * unused nodes. But scan_nodes is lazily updated and may not cotain 1681 * enough new information. We need to do double check. 1682 */ 1683bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1684{ 1685 int nid; 1686 1687 /* 1688 * quick check...making use of scan_node. 1689 * We can skip unused nodes. 1690 */ 1691 if (!nodes_empty(memcg->scan_nodes)) { 1692 for (nid = first_node(memcg->scan_nodes); 1693 nid < MAX_NUMNODES; 1694 nid = next_node(nid, memcg->scan_nodes)) { 1695 1696 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 1697 return true; 1698 } 1699 } 1700 /* 1701 * Check rest of nodes. 1702 */ 1703 for_each_node_state(nid, N_HIGH_MEMORY) { 1704 if (node_isset(nid, memcg->scan_nodes)) 1705 continue; 1706 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 1707 return true; 1708 } 1709 return false; 1710} 1711 1712#else 1713int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1714{ 1715 return 0; 1716} 1717 1718bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1719{ 1720 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); 1721} 1722#endif 1723 1724static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1725 struct zone *zone, 1726 gfp_t gfp_mask, 1727 unsigned long *total_scanned) 1728{ 1729 struct mem_cgroup *victim = NULL; 1730 int total = 0; 1731 int loop = 0; 1732 unsigned long excess; 1733 unsigned long nr_scanned; 1734 struct mem_cgroup_reclaim_cookie reclaim = { 1735 .zone = zone, 1736 .priority = 0, 1737 }; 1738 1739 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; 1740 1741 while (1) { 1742 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1743 if (!victim) { 1744 loop++; 1745 if (loop >= 2) { 1746 /* 1747 * If we have not been able to reclaim 1748 * anything, it might because there are 1749 * no reclaimable pages under this hierarchy 1750 */ 1751 if (!total) 1752 break; 1753 /* 1754 * We want to do more targeted reclaim. 1755 * excess >> 2 is not to excessive so as to 1756 * reclaim too much, nor too less that we keep 1757 * coming back to reclaim from this cgroup 1758 */ 1759 if (total >= (excess >> 2) || 1760 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1761 break; 1762 } 1763 continue; 1764 } 1765 if (!mem_cgroup_reclaimable(victim, false)) 1766 continue; 1767 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 1768 zone, &nr_scanned); 1769 *total_scanned += nr_scanned; 1770 if (!res_counter_soft_limit_excess(&root_memcg->res)) 1771 break; 1772 } 1773 mem_cgroup_iter_break(root_memcg, victim); 1774 return total; 1775} 1776 1777/* 1778 * Check OOM-Killer is already running under our hierarchy. 1779 * If someone is running, return false. 1780 * Has to be called with memcg_oom_lock 1781 */ 1782static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg) 1783{ 1784 struct mem_cgroup *iter, *failed = NULL; 1785 1786 for_each_mem_cgroup_tree(iter, memcg) { 1787 if (iter->oom_lock) { 1788 /* 1789 * this subtree of our hierarchy is already locked 1790 * so we cannot give a lock. 1791 */ 1792 failed = iter; 1793 mem_cgroup_iter_break(memcg, iter); 1794 break; 1795 } else 1796 iter->oom_lock = true; 1797 } 1798 1799 if (!failed) 1800 return true; 1801 1802 /* 1803 * OK, we failed to lock the whole subtree so we have to clean up 1804 * what we set up to the failing subtree 1805 */ 1806 for_each_mem_cgroup_tree(iter, memcg) { 1807 if (iter == failed) { 1808 mem_cgroup_iter_break(memcg, iter); 1809 break; 1810 } 1811 iter->oom_lock = false; 1812 } 1813 return false; 1814} 1815 1816/* 1817 * Has to be called with memcg_oom_lock 1818 */ 1819static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1820{ 1821 struct mem_cgroup *iter; 1822 1823 for_each_mem_cgroup_tree(iter, memcg) 1824 iter->oom_lock = false; 1825 return 0; 1826} 1827 1828static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1829{ 1830 struct mem_cgroup *iter; 1831 1832 for_each_mem_cgroup_tree(iter, memcg) 1833 atomic_inc(&iter->under_oom); 1834} 1835 1836static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1837{ 1838 struct mem_cgroup *iter; 1839 1840 /* 1841 * When a new child is created while the hierarchy is under oom, 1842 * mem_cgroup_oom_lock() may not be called. We have to use 1843 * atomic_add_unless() here. 1844 */ 1845 for_each_mem_cgroup_tree(iter, memcg) 1846 atomic_add_unless(&iter->under_oom, -1, 0); 1847} 1848 1849static DEFINE_SPINLOCK(memcg_oom_lock); 1850static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1851 1852struct oom_wait_info { 1853 struct mem_cgroup *mem; 1854 wait_queue_t wait; 1855}; 1856 1857static int memcg_oom_wake_function(wait_queue_t *wait, 1858 unsigned mode, int sync, void *arg) 1859{ 1860 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg, 1861 *oom_wait_memcg; 1862 struct oom_wait_info *oom_wait_info; 1863 1864 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1865 oom_wait_memcg = oom_wait_info->mem; 1866 1867 /* 1868 * Both of oom_wait_info->mem and wake_mem are stable under us. 1869 * Then we can use css_is_ancestor without taking care of RCU. 1870 */ 1871 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) 1872 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg)) 1873 return 0; 1874 return autoremove_wake_function(wait, mode, sync, arg); 1875} 1876 1877static void memcg_wakeup_oom(struct mem_cgroup *memcg) 1878{ 1879 /* for filtering, pass "memcg" as argument. */ 1880 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1881} 1882 1883static void memcg_oom_recover(struct mem_cgroup *memcg) 1884{ 1885 if (memcg && atomic_read(&memcg->under_oom)) 1886 memcg_wakeup_oom(memcg); 1887} 1888 1889/* 1890 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1891 */ 1892bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask) 1893{ 1894 struct oom_wait_info owait; 1895 bool locked, need_to_kill; 1896 1897 owait.mem = memcg; 1898 owait.wait.flags = 0; 1899 owait.wait.func = memcg_oom_wake_function; 1900 owait.wait.private = current; 1901 INIT_LIST_HEAD(&owait.wait.task_list); 1902 need_to_kill = true; 1903 mem_cgroup_mark_under_oom(memcg); 1904 1905 /* At first, try to OOM lock hierarchy under memcg.*/ 1906 spin_lock(&memcg_oom_lock); 1907 locked = mem_cgroup_oom_lock(memcg); 1908 /* 1909 * Even if signal_pending(), we can't quit charge() loop without 1910 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 1911 * under OOM is always welcomed, use TASK_KILLABLE here. 1912 */ 1913 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1914 if (!locked || memcg->oom_kill_disable) 1915 need_to_kill = false; 1916 if (locked) 1917 mem_cgroup_oom_notify(memcg); 1918 spin_unlock(&memcg_oom_lock); 1919 1920 if (need_to_kill) { 1921 finish_wait(&memcg_oom_waitq, &owait.wait); 1922 mem_cgroup_out_of_memory(memcg, mask); 1923 } else { 1924 schedule(); 1925 finish_wait(&memcg_oom_waitq, &owait.wait); 1926 } 1927 spin_lock(&memcg_oom_lock); 1928 if (locked) 1929 mem_cgroup_oom_unlock(memcg); 1930 memcg_wakeup_oom(memcg); 1931 spin_unlock(&memcg_oom_lock); 1932 1933 mem_cgroup_unmark_under_oom(memcg); 1934 1935 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 1936 return false; 1937 /* Give chance to dying process */ 1938 schedule_timeout_uninterruptible(1); 1939 return true; 1940} 1941 1942/* 1943 * Currently used to update mapped file statistics, but the routine can be 1944 * generalized to update other statistics as well. 1945 * 1946 * Notes: Race condition 1947 * 1948 * We usually use page_cgroup_lock() for accessing page_cgroup member but 1949 * it tends to be costly. But considering some conditions, we doesn't need 1950 * to do so _always_. 1951 * 1952 * Considering "charge", lock_page_cgroup() is not required because all 1953 * file-stat operations happen after a page is attached to radix-tree. There 1954 * are no race with "charge". 1955 * 1956 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 1957 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 1958 * if there are race with "uncharge". Statistics itself is properly handled 1959 * by flags. 1960 * 1961 * Considering "move", this is an only case we see a race. To make the race 1962 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are 1963 * possibility of race condition. If there is, we take a lock. 1964 */ 1965 1966void mem_cgroup_update_page_stat(struct page *page, 1967 enum mem_cgroup_page_stat_item idx, int val) 1968{ 1969 struct mem_cgroup *memcg; 1970 struct page_cgroup *pc = lookup_page_cgroup(page); 1971 bool need_unlock = false; 1972 unsigned long uninitialized_var(flags); 1973 1974 if (mem_cgroup_disabled()) 1975 return; 1976 1977 rcu_read_lock(); 1978 memcg = pc->mem_cgroup; 1979 if (unlikely(!memcg || !PageCgroupUsed(pc))) 1980 goto out; 1981 /* pc->mem_cgroup is unstable ? */ 1982 if (unlikely(mem_cgroup_stealed(memcg)) || PageTransHuge(page)) { 1983 /* take a lock against to access pc->mem_cgroup */ 1984 move_lock_page_cgroup(pc, &flags); 1985 need_unlock = true; 1986 memcg = pc->mem_cgroup; 1987 if (!memcg || !PageCgroupUsed(pc)) 1988 goto out; 1989 } 1990 1991 switch (idx) { 1992 case MEMCG_NR_FILE_MAPPED: 1993 if (val > 0) 1994 SetPageCgroupFileMapped(pc); 1995 else if (!page_mapped(page)) 1996 ClearPageCgroupFileMapped(pc); 1997 idx = MEM_CGROUP_STAT_FILE_MAPPED; 1998 break; 1999 default: 2000 BUG(); 2001 } 2002 2003 this_cpu_add(memcg->stat->count[idx], val); 2004 2005out: 2006 if (unlikely(need_unlock)) 2007 move_unlock_page_cgroup(pc, &flags); 2008 rcu_read_unlock(); 2009 return; 2010} 2011EXPORT_SYMBOL(mem_cgroup_update_page_stat); 2012 2013/* 2014 * size of first charge trial. "32" comes from vmscan.c's magic value. 2015 * TODO: maybe necessary to use big numbers in big irons. 2016 */ 2017#define CHARGE_BATCH 32U 2018struct memcg_stock_pcp { 2019 struct mem_cgroup *cached; /* this never be root cgroup */ 2020 unsigned int nr_pages; 2021 struct work_struct work; 2022 unsigned long flags; 2023#define FLUSHING_CACHED_CHARGE (0) 2024}; 2025static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2026static DEFINE_MUTEX(percpu_charge_mutex); 2027 2028/* 2029 * Try to consume stocked charge on this cpu. If success, one page is consumed 2030 * from local stock and true is returned. If the stock is 0 or charges from a 2031 * cgroup which is not current target, returns false. This stock will be 2032 * refilled. 2033 */ 2034static bool consume_stock(struct mem_cgroup *memcg) 2035{ 2036 struct memcg_stock_pcp *stock; 2037 bool ret = true; 2038 2039 stock = &get_cpu_var(memcg_stock); 2040 if (memcg == stock->cached && stock->nr_pages) 2041 stock->nr_pages--; 2042 else /* need to call res_counter_charge */ 2043 ret = false; 2044 put_cpu_var(memcg_stock); 2045 return ret; 2046} 2047 2048/* 2049 * Returns stocks cached in percpu to res_counter and reset cached information. 2050 */ 2051static void drain_stock(struct memcg_stock_pcp *stock) 2052{ 2053 struct mem_cgroup *old = stock->cached; 2054 2055 if (stock->nr_pages) { 2056 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 2057 2058 res_counter_uncharge(&old->res, bytes); 2059 if (do_swap_account) 2060 res_counter_uncharge(&old->memsw, bytes); 2061 stock->nr_pages = 0; 2062 } 2063 stock->cached = NULL; 2064} 2065 2066/* 2067 * This must be called under preempt disabled or must be called by 2068 * a thread which is pinned to local cpu. 2069 */ 2070static void drain_local_stock(struct work_struct *dummy) 2071{ 2072 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 2073 drain_stock(stock); 2074 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2075} 2076 2077/* 2078 * Cache charges(val) which is from res_counter, to local per_cpu area. 2079 * This will be consumed by consume_stock() function, later. 2080 */ 2081static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2082{ 2083 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2084 2085 if (stock->cached != memcg) { /* reset if necessary */ 2086 drain_stock(stock); 2087 stock->cached = memcg; 2088 } 2089 stock->nr_pages += nr_pages; 2090 put_cpu_var(memcg_stock); 2091} 2092 2093/* 2094 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2095 * of the hierarchy under it. sync flag says whether we should block 2096 * until the work is done. 2097 */ 2098static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) 2099{ 2100 int cpu, curcpu; 2101 2102 /* Notify other cpus that system-wide "drain" is running */ 2103 get_online_cpus(); 2104 curcpu = get_cpu(); 2105 for_each_online_cpu(cpu) { 2106 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2107 struct mem_cgroup *memcg; 2108 2109 memcg = stock->cached; 2110 if (!memcg || !stock->nr_pages) 2111 continue; 2112 if (!mem_cgroup_same_or_subtree(root_memcg, memcg)) 2113 continue; 2114 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2115 if (cpu == curcpu) 2116 drain_local_stock(&stock->work); 2117 else 2118 schedule_work_on(cpu, &stock->work); 2119 } 2120 } 2121 put_cpu(); 2122 2123 if (!sync) 2124 goto out; 2125 2126 for_each_online_cpu(cpu) { 2127 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2128 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2129 flush_work(&stock->work); 2130 } 2131out: 2132 put_online_cpus(); 2133} 2134 2135/* 2136 * Tries to drain stocked charges in other cpus. This function is asynchronous 2137 * and just put a work per cpu for draining localy on each cpu. Caller can 2138 * expects some charges will be back to res_counter later but cannot wait for 2139 * it. 2140 */ 2141static void drain_all_stock_async(struct mem_cgroup *root_memcg) 2142{ 2143 /* 2144 * If someone calls draining, avoid adding more kworker runs. 2145 */ 2146 if (!mutex_trylock(&percpu_charge_mutex)) 2147 return; 2148 drain_all_stock(root_memcg, false); 2149 mutex_unlock(&percpu_charge_mutex); 2150} 2151 2152/* This is a synchronous drain interface. */ 2153static void drain_all_stock_sync(struct mem_cgroup *root_memcg) 2154{ 2155 /* called when force_empty is called */ 2156 mutex_lock(&percpu_charge_mutex); 2157 drain_all_stock(root_memcg, true); 2158 mutex_unlock(&percpu_charge_mutex); 2159} 2160 2161/* 2162 * This function drains percpu counter value from DEAD cpu and 2163 * move it to local cpu. Note that this function can be preempted. 2164 */ 2165static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) 2166{ 2167 int i; 2168 2169 spin_lock(&memcg->pcp_counter_lock); 2170 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { 2171 long x = per_cpu(memcg->stat->count[i], cpu); 2172 2173 per_cpu(memcg->stat->count[i], cpu) = 0; 2174 memcg->nocpu_base.count[i] += x; 2175 } 2176 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2177 unsigned long x = per_cpu(memcg->stat->events[i], cpu); 2178 2179 per_cpu(memcg->stat->events[i], cpu) = 0; 2180 memcg->nocpu_base.events[i] += x; 2181 } 2182 /* need to clear ON_MOVE value, works as a kind of lock. */ 2183 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; 2184 spin_unlock(&memcg->pcp_counter_lock); 2185} 2186 2187static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu) 2188{ 2189 int idx = MEM_CGROUP_ON_MOVE; 2190 2191 spin_lock(&memcg->pcp_counter_lock); 2192 per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx]; 2193 spin_unlock(&memcg->pcp_counter_lock); 2194} 2195 2196static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 2197 unsigned long action, 2198 void *hcpu) 2199{ 2200 int cpu = (unsigned long)hcpu; 2201 struct memcg_stock_pcp *stock; 2202 struct mem_cgroup *iter; 2203 2204 if ((action == CPU_ONLINE)) { 2205 for_each_mem_cgroup(iter) 2206 synchronize_mem_cgroup_on_move(iter, cpu); 2207 return NOTIFY_OK; 2208 } 2209 2210 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) 2211 return NOTIFY_OK; 2212 2213 for_each_mem_cgroup(iter) 2214 mem_cgroup_drain_pcp_counter(iter, cpu); 2215 2216 stock = &per_cpu(memcg_stock, cpu); 2217 drain_stock(stock); 2218 return NOTIFY_OK; 2219} 2220 2221 2222/* See __mem_cgroup_try_charge() for details */ 2223enum { 2224 CHARGE_OK, /* success */ 2225 CHARGE_RETRY, /* need to retry but retry is not bad */ 2226 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 2227 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 2228 CHARGE_OOM_DIE, /* the current is killed because of OOM */ 2229}; 2230 2231static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2232 unsigned int nr_pages, bool oom_check) 2233{ 2234 unsigned long csize = nr_pages * PAGE_SIZE; 2235 struct mem_cgroup *mem_over_limit; 2236 struct res_counter *fail_res; 2237 unsigned long flags = 0; 2238 int ret; 2239 2240 ret = res_counter_charge(&memcg->res, csize, &fail_res); 2241 2242 if (likely(!ret)) { 2243 if (!do_swap_account) 2244 return CHARGE_OK; 2245 ret = res_counter_charge(&memcg->memsw, csize, &fail_res); 2246 if (likely(!ret)) 2247 return CHARGE_OK; 2248 2249 res_counter_uncharge(&memcg->res, csize); 2250 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 2251 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 2252 } else 2253 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2254 /* 2255 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch 2256 * of regular pages (CHARGE_BATCH), or a single regular page (1). 2257 * 2258 * Never reclaim on behalf of optional batching, retry with a 2259 * single page instead. 2260 */ 2261 if (nr_pages == CHARGE_BATCH) 2262 return CHARGE_RETRY; 2263 2264 if (!(gfp_mask & __GFP_WAIT)) 2265 return CHARGE_WOULDBLOCK; 2266 2267 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags); 2268 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2269 return CHARGE_RETRY; 2270 /* 2271 * Even though the limit is exceeded at this point, reclaim 2272 * may have been able to free some pages. Retry the charge 2273 * before killing the task. 2274 * 2275 * Only for regular pages, though: huge pages are rather 2276 * unlikely to succeed so close to the limit, and we fall back 2277 * to regular pages anyway in case of failure. 2278 */ 2279 if (nr_pages == 1 && ret) 2280 return CHARGE_RETRY; 2281 2282 /* 2283 * At task move, charge accounts can be doubly counted. So, it's 2284 * better to wait until the end of task_move if something is going on. 2285 */ 2286 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2287 return CHARGE_RETRY; 2288 2289 /* If we don't need to call oom-killer at el, return immediately */ 2290 if (!oom_check) 2291 return CHARGE_NOMEM; 2292 /* check OOM */ 2293 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) 2294 return CHARGE_OOM_DIE; 2295 2296 return CHARGE_RETRY; 2297} 2298 2299/* 2300 * Unlike exported interface, "oom" parameter is added. if oom==true, 2301 * oom-killer can be invoked. 2302 */ 2303static int __mem_cgroup_try_charge(struct mm_struct *mm, 2304 gfp_t gfp_mask, 2305 unsigned int nr_pages, 2306 struct mem_cgroup **ptr, 2307 bool oom) 2308{ 2309 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2310 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2311 struct mem_cgroup *memcg = NULL; 2312 int ret; 2313 2314 /* 2315 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 2316 * in system level. So, allow to go ahead dying process in addition to 2317 * MEMDIE process. 2318 */ 2319 if (unlikely(test_thread_flag(TIF_MEMDIE) 2320 || fatal_signal_pending(current))) 2321 goto bypass; 2322 2323 /* 2324 * We always charge the cgroup the mm_struct belongs to. 2325 * The mm_struct's mem_cgroup changes on task migration if the 2326 * thread group leader migrates. It's possible that mm is not 2327 * set, if so charge the init_mm (happens for pagecache usage). 2328 */ 2329 if (!*ptr && !mm) 2330 goto bypass; 2331again: 2332 if (*ptr) { /* css should be a valid one */ 2333 memcg = *ptr; 2334 VM_BUG_ON(css_is_removed(&memcg->css)); 2335 if (mem_cgroup_is_root(memcg)) 2336 goto done; 2337 if (nr_pages == 1 && consume_stock(memcg)) 2338 goto done; 2339 css_get(&memcg->css); 2340 } else { 2341 struct task_struct *p; 2342 2343 rcu_read_lock(); 2344 p = rcu_dereference(mm->owner); 2345 /* 2346 * Because we don't have task_lock(), "p" can exit. 2347 * In that case, "memcg" can point to root or p can be NULL with 2348 * race with swapoff. Then, we have small risk of mis-accouning. 2349 * But such kind of mis-account by race always happens because 2350 * we don't have cgroup_mutex(). It's overkill and we allo that 2351 * small race, here. 2352 * (*) swapoff at el will charge against mm-struct not against 2353 * task-struct. So, mm->owner can be NULL. 2354 */ 2355 memcg = mem_cgroup_from_task(p); 2356 if (!memcg || mem_cgroup_is_root(memcg)) { 2357 rcu_read_unlock(); 2358 goto done; 2359 } 2360 if (nr_pages == 1 && consume_stock(memcg)) { 2361 /* 2362 * It seems dagerous to access memcg without css_get(). 2363 * But considering how consume_stok works, it's not 2364 * necessary. If consume_stock success, some charges 2365 * from this memcg are cached on this cpu. So, we 2366 * don't need to call css_get()/css_tryget() before 2367 * calling consume_stock(). 2368 */ 2369 rcu_read_unlock(); 2370 goto done; 2371 } 2372 /* after here, we may be blocked. we need to get refcnt */ 2373 if (!css_tryget(&memcg->css)) { 2374 rcu_read_unlock(); 2375 goto again; 2376 } 2377 rcu_read_unlock(); 2378 } 2379 2380 do { 2381 bool oom_check; 2382 2383 /* If killed, bypass charge */ 2384 if (fatal_signal_pending(current)) { 2385 css_put(&memcg->css); 2386 goto bypass; 2387 } 2388 2389 oom_check = false; 2390 if (oom && !nr_oom_retries) { 2391 oom_check = true; 2392 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2393 } 2394 2395 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check); 2396 switch (ret) { 2397 case CHARGE_OK: 2398 break; 2399 case CHARGE_RETRY: /* not in OOM situation but retry */ 2400 batch = nr_pages; 2401 css_put(&memcg->css); 2402 memcg = NULL; 2403 goto again; 2404 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2405 css_put(&memcg->css); 2406 goto nomem; 2407 case CHARGE_NOMEM: /* OOM routine works */ 2408 if (!oom) { 2409 css_put(&memcg->css); 2410 goto nomem; 2411 } 2412 /* If oom, we never return -ENOMEM */ 2413 nr_oom_retries--; 2414 break; 2415 case CHARGE_OOM_DIE: /* Killed by OOM Killer */ 2416 css_put(&memcg->css); 2417 goto bypass; 2418 } 2419 } while (ret != CHARGE_OK); 2420 2421 if (batch > nr_pages) 2422 refill_stock(memcg, batch - nr_pages); 2423 css_put(&memcg->css); 2424done: 2425 *ptr = memcg; 2426 return 0; 2427nomem: 2428 *ptr = NULL; 2429 return -ENOMEM; 2430bypass: 2431 *ptr = NULL; 2432 return 0; 2433} 2434 2435/* 2436 * Somemtimes we have to undo a charge we got by try_charge(). 2437 * This function is for that and do uncharge, put css's refcnt. 2438 * gotten by try_charge(). 2439 */ 2440static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg, 2441 unsigned int nr_pages) 2442{ 2443 if (!mem_cgroup_is_root(memcg)) { 2444 unsigned long bytes = nr_pages * PAGE_SIZE; 2445 2446 res_counter_uncharge(&memcg->res, bytes); 2447 if (do_swap_account) 2448 res_counter_uncharge(&memcg->memsw, bytes); 2449 } 2450} 2451 2452/* 2453 * A helper function to get mem_cgroup from ID. must be called under 2454 * rcu_read_lock(). The caller must check css_is_removed() or some if 2455 * it's concern. (dropping refcnt from swap can be called against removed 2456 * memcg.) 2457 */ 2458static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2459{ 2460 struct cgroup_subsys_state *css; 2461 2462 /* ID 0 is unused ID */ 2463 if (!id) 2464 return NULL; 2465 css = css_lookup(&mem_cgroup_subsys, id); 2466 if (!css) 2467 return NULL; 2468 return container_of(css, struct mem_cgroup, css); 2469} 2470 2471struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2472{ 2473 struct mem_cgroup *memcg = NULL; 2474 struct page_cgroup *pc; 2475 unsigned short id; 2476 swp_entry_t ent; 2477 2478 VM_BUG_ON(!PageLocked(page)); 2479 2480 pc = lookup_page_cgroup(page); 2481 lock_page_cgroup(pc); 2482 if (PageCgroupUsed(pc)) { 2483 memcg = pc->mem_cgroup; 2484 if (memcg && !css_tryget(&memcg->css)) 2485 memcg = NULL; 2486 } else if (PageSwapCache(page)) { 2487 ent.val = page_private(page); 2488 id = lookup_swap_cgroup_id(ent); 2489 rcu_read_lock(); 2490 memcg = mem_cgroup_lookup(id); 2491 if (memcg && !css_tryget(&memcg->css)) 2492 memcg = NULL; 2493 rcu_read_unlock(); 2494 } 2495 unlock_page_cgroup(pc); 2496 return memcg; 2497} 2498 2499static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, 2500 struct page *page, 2501 unsigned int nr_pages, 2502 struct page_cgroup *pc, 2503 enum charge_type ctype) 2504{ 2505 lock_page_cgroup(pc); 2506 if (unlikely(PageCgroupUsed(pc))) { 2507 unlock_page_cgroup(pc); 2508 __mem_cgroup_cancel_charge(memcg, nr_pages); 2509 return; 2510 } 2511 /* 2512 * we don't need page_cgroup_lock about tail pages, becase they are not 2513 * accessed by any other context at this point. 2514 */ 2515 pc->mem_cgroup = memcg; 2516 /* 2517 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2518 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2519 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2520 * before USED bit, we need memory barrier here. 2521 * See mem_cgroup_add_lru_list(), etc. 2522 */ 2523 smp_wmb(); 2524 switch (ctype) { 2525 case MEM_CGROUP_CHARGE_TYPE_CACHE: 2526 case MEM_CGROUP_CHARGE_TYPE_SHMEM: 2527 SetPageCgroupCache(pc); 2528 SetPageCgroupUsed(pc); 2529 break; 2530 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2531 ClearPageCgroupCache(pc); 2532 SetPageCgroupUsed(pc); 2533 break; 2534 default: 2535 break; 2536 } 2537 2538 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); 2539 unlock_page_cgroup(pc); 2540 /* 2541 * "charge_statistics" updated event counter. Then, check it. 2542 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2543 * if they exceeds softlimit. 2544 */ 2545 memcg_check_events(memcg, page); 2546} 2547 2548#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2549 2550#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ 2551 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) 2552/* 2553 * Because tail pages are not marked as "used", set it. We're under 2554 * zone->lru_lock, 'splitting on pmd' and compound_lock. 2555 * charge/uncharge will be never happen and move_account() is done under 2556 * compound_lock(), so we don't have to take care of races. 2557 */ 2558void mem_cgroup_split_huge_fixup(struct page *head) 2559{ 2560 struct page_cgroup *head_pc = lookup_page_cgroup(head); 2561 struct page_cgroup *pc; 2562 int i; 2563 2564 if (mem_cgroup_disabled()) 2565 return; 2566 for (i = 1; i < HPAGE_PMD_NR; i++) { 2567 pc = head_pc + i; 2568 pc->mem_cgroup = head_pc->mem_cgroup; 2569 smp_wmb();/* see __commit_charge() */ 2570 /* 2571 * LRU flags cannot be copied because we need to add tail 2572 * page to LRU by generic call and our hooks will be called. 2573 */ 2574 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 2575 } 2576 2577 if (PageCgroupAcctLRU(head_pc)) { 2578 enum lru_list lru; 2579 struct mem_cgroup_per_zone *mz; 2580 /* 2581 * We hold lru_lock, then, reduce counter directly. 2582 */ 2583 lru = page_lru(head); 2584 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); 2585 MEM_CGROUP_ZSTAT(mz, lru) -= HPAGE_PMD_NR - 1; 2586 } 2587} 2588#endif 2589 2590/** 2591 * mem_cgroup_move_account - move account of the page 2592 * @page: the page 2593 * @nr_pages: number of regular pages (>1 for huge pages) 2594 * @pc: page_cgroup of the page. 2595 * @from: mem_cgroup which the page is moved from. 2596 * @to: mem_cgroup which the page is moved to. @from != @to. 2597 * @uncharge: whether we should call uncharge and css_put against @from. 2598 * 2599 * The caller must confirm following. 2600 * - page is not on LRU (isolate_page() is useful.) 2601 * - compound_lock is held when nr_pages > 1 2602 * 2603 * This function doesn't do "charge" nor css_get to new cgroup. It should be 2604 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is 2605 * true, this function does "uncharge" from old cgroup, but it doesn't if 2606 * @uncharge is false, so a caller should do "uncharge". 2607 */ 2608static int mem_cgroup_move_account(struct page *page, 2609 unsigned int nr_pages, 2610 struct page_cgroup *pc, 2611 struct mem_cgroup *from, 2612 struct mem_cgroup *to, 2613 bool uncharge) 2614{ 2615 unsigned long flags; 2616 int ret; 2617 2618 VM_BUG_ON(from == to); 2619 VM_BUG_ON(PageLRU(page)); 2620 /* 2621 * The page is isolated from LRU. So, collapse function 2622 * will not handle this page. But page splitting can happen. 2623 * Do this check under compound_page_lock(). The caller should 2624 * hold it. 2625 */ 2626 ret = -EBUSY; 2627 if (nr_pages > 1 && !PageTransHuge(page)) 2628 goto out; 2629 2630 lock_page_cgroup(pc); 2631 2632 ret = -EINVAL; 2633 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 2634 goto unlock; 2635 2636 move_lock_page_cgroup(pc, &flags); 2637 2638 if (PageCgroupFileMapped(pc)) { 2639 /* Update mapped_file data for mem_cgroup */ 2640 preempt_disable(); 2641 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2642 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2643 preempt_enable(); 2644 } 2645 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); 2646 if (uncharge) 2647 /* This is not "cancel", but cancel_charge does all we need. */ 2648 __mem_cgroup_cancel_charge(from, nr_pages); 2649 2650 /* caller should have done css_get */ 2651 pc->mem_cgroup = to; 2652 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); 2653 /* 2654 * We charges against "to" which may not have any tasks. Then, "to" 2655 * can be under rmdir(). But in current implementation, caller of 2656 * this function is just force_empty() and move charge, so it's 2657 * guaranteed that "to" is never removed. So, we don't check rmdir 2658 * status here. 2659 */ 2660 move_unlock_page_cgroup(pc, &flags); 2661 ret = 0; 2662unlock: 2663 unlock_page_cgroup(pc); 2664 /* 2665 * check events 2666 */ 2667 memcg_check_events(to, page); 2668 memcg_check_events(from, page); 2669out: 2670 return ret; 2671} 2672 2673/* 2674 * move charges to its parent. 2675 */ 2676 2677static int mem_cgroup_move_parent(struct page *page, 2678 struct page_cgroup *pc, 2679 struct mem_cgroup *child, 2680 gfp_t gfp_mask) 2681{ 2682 struct cgroup *cg = child->css.cgroup; 2683 struct cgroup *pcg = cg->parent; 2684 struct mem_cgroup *parent; 2685 unsigned int nr_pages; 2686 unsigned long uninitialized_var(flags); 2687 int ret; 2688 2689 /* Is ROOT ? */ 2690 if (!pcg) 2691 return -EINVAL; 2692 2693 ret = -EBUSY; 2694 if (!get_page_unless_zero(page)) 2695 goto out; 2696 if (isolate_lru_page(page)) 2697 goto put; 2698 2699 nr_pages = hpage_nr_pages(page); 2700 2701 parent = mem_cgroup_from_cont(pcg); 2702 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); 2703 if (ret || !parent) 2704 goto put_back; 2705 2706 if (nr_pages > 1) 2707 flags = compound_lock_irqsave(page); 2708 2709 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); 2710 if (ret) 2711 __mem_cgroup_cancel_charge(parent, nr_pages); 2712 2713 if (nr_pages > 1) 2714 compound_unlock_irqrestore(page, flags); 2715put_back: 2716 putback_lru_page(page); 2717put: 2718 put_page(page); 2719out: 2720 return ret; 2721} 2722 2723/* 2724 * Charge the memory controller for page usage. 2725 * Return 2726 * 0 if the charge was successful 2727 * < 0 if the cgroup is over its limit 2728 */ 2729static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 2730 gfp_t gfp_mask, enum charge_type ctype) 2731{ 2732 struct mem_cgroup *memcg = NULL; 2733 unsigned int nr_pages = 1; 2734 struct page_cgroup *pc; 2735 bool oom = true; 2736 int ret; 2737 2738 if (PageTransHuge(page)) { 2739 nr_pages <<= compound_order(page); 2740 VM_BUG_ON(!PageTransHuge(page)); 2741 /* 2742 * Never OOM-kill a process for a huge page. The 2743 * fault handler will fall back to regular pages. 2744 */ 2745 oom = false; 2746 } 2747 2748 pc = lookup_page_cgroup(page); 2749 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 2750 if (ret || !memcg) 2751 return ret; 2752 2753 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); 2754 return 0; 2755} 2756 2757int mem_cgroup_newpage_charge(struct page *page, 2758 struct mm_struct *mm, gfp_t gfp_mask) 2759{ 2760 if (mem_cgroup_disabled()) 2761 return 0; 2762 VM_BUG_ON(page_mapped(page)); 2763 VM_BUG_ON(page->mapping && !PageAnon(page)); 2764 VM_BUG_ON(!mm); 2765 return mem_cgroup_charge_common(page, mm, gfp_mask, 2766 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2767} 2768 2769static void 2770__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2771 enum charge_type ctype); 2772 2773static void 2774__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg, 2775 enum charge_type ctype) 2776{ 2777 struct page_cgroup *pc = lookup_page_cgroup(page); 2778 /* 2779 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page 2780 * is already on LRU. It means the page may on some other page_cgroup's 2781 * LRU. Take care of it. 2782 */ 2783 mem_cgroup_lru_del_before_commit(page); 2784 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); 2785 mem_cgroup_lru_add_after_commit(page); 2786 return; 2787} 2788 2789int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2790 gfp_t gfp_mask) 2791{ 2792 struct mem_cgroup *memcg = NULL; 2793 int ret; 2794 2795 if (mem_cgroup_disabled()) 2796 return 0; 2797 if (PageCompound(page)) 2798 return 0; 2799 2800 if (unlikely(!mm)) 2801 mm = &init_mm; 2802 2803 if (page_is_file_cache(page)) { 2804 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &memcg, true); 2805 if (ret || !memcg) 2806 return ret; 2807 2808 /* 2809 * FUSE reuses pages without going through the final 2810 * put that would remove them from the LRU list, make 2811 * sure that they get relinked properly. 2812 */ 2813 __mem_cgroup_commit_charge_lrucare(page, memcg, 2814 MEM_CGROUP_CHARGE_TYPE_CACHE); 2815 return ret; 2816 } 2817 /* shmem */ 2818 if (PageSwapCache(page)) { 2819 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg); 2820 if (!ret) 2821 __mem_cgroup_commit_charge_swapin(page, memcg, 2822 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2823 } else 2824 ret = mem_cgroup_charge_common(page, mm, gfp_mask, 2825 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2826 2827 return ret; 2828} 2829 2830/* 2831 * While swap-in, try_charge -> commit or cancel, the page is locked. 2832 * And when try_charge() successfully returns, one refcnt to memcg without 2833 * struct page_cgroup is acquired. This refcnt will be consumed by 2834 * "commit()" or removed by "cancel()" 2835 */ 2836int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 2837 struct page *page, 2838 gfp_t mask, struct mem_cgroup **memcgp) 2839{ 2840 struct mem_cgroup *memcg; 2841 int ret; 2842 2843 *memcgp = NULL; 2844 2845 if (mem_cgroup_disabled()) 2846 return 0; 2847 2848 if (!do_swap_account) 2849 goto charge_cur_mm; 2850 /* 2851 * A racing thread's fault, or swapoff, may have already updated 2852 * the pte, and even removed page from swap cache: in those cases 2853 * do_swap_page()'s pte_same() test will fail; but there's also a 2854 * KSM case which does need to charge the page. 2855 */ 2856 if (!PageSwapCache(page)) 2857 goto charge_cur_mm; 2858 memcg = try_get_mem_cgroup_from_page(page); 2859 if (!memcg) 2860 goto charge_cur_mm; 2861 *memcgp = memcg; 2862 ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true); 2863 css_put(&memcg->css); 2864 return ret; 2865charge_cur_mm: 2866 if (unlikely(!mm)) 2867 mm = &init_mm; 2868 return __mem_cgroup_try_charge(mm, mask, 1, memcgp, true); 2869} 2870 2871static void 2872__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 2873 enum charge_type ctype) 2874{ 2875 if (mem_cgroup_disabled()) 2876 return; 2877 if (!memcg) 2878 return; 2879 cgroup_exclude_rmdir(&memcg->css); 2880 2881 __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); 2882 /* 2883 * Now swap is on-memory. This means this page may be 2884 * counted both as mem and swap....double count. 2885 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 2886 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 2887 * may call delete_from_swap_cache() before reach here. 2888 */ 2889 if (do_swap_account && PageSwapCache(page)) { 2890 swp_entry_t ent = {.val = page_private(page)}; 2891 struct mem_cgroup *swap_memcg; 2892 unsigned short id; 2893 2894 id = swap_cgroup_record(ent, 0); 2895 rcu_read_lock(); 2896 swap_memcg = mem_cgroup_lookup(id); 2897 if (swap_memcg) { 2898 /* 2899 * This recorded memcg can be obsolete one. So, avoid 2900 * calling css_tryget 2901 */ 2902 if (!mem_cgroup_is_root(swap_memcg)) 2903 res_counter_uncharge(&swap_memcg->memsw, 2904 PAGE_SIZE); 2905 mem_cgroup_swap_statistics(swap_memcg, false); 2906 mem_cgroup_put(swap_memcg); 2907 } 2908 rcu_read_unlock(); 2909 } 2910 /* 2911 * At swapin, we may charge account against cgroup which has no tasks. 2912 * So, rmdir()->pre_destroy() can be called while we do this charge. 2913 * In that case, we need to call pre_destroy() again. check it here. 2914 */ 2915 cgroup_release_and_wakeup_rmdir(&memcg->css); 2916} 2917 2918void mem_cgroup_commit_charge_swapin(struct page *page, 2919 struct mem_cgroup *memcg) 2920{ 2921 __mem_cgroup_commit_charge_swapin(page, memcg, 2922 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2923} 2924 2925void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 2926{ 2927 if (mem_cgroup_disabled()) 2928 return; 2929 if (!memcg) 2930 return; 2931 __mem_cgroup_cancel_charge(memcg, 1); 2932} 2933 2934static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg, 2935 unsigned int nr_pages, 2936 const enum charge_type ctype) 2937{ 2938 struct memcg_batch_info *batch = NULL; 2939 bool uncharge_memsw = true; 2940 2941 /* If swapout, usage of swap doesn't decrease */ 2942 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2943 uncharge_memsw = false; 2944 2945 batch = ¤t->memcg_batch; 2946 /* 2947 * In usual, we do css_get() when we remember memcg pointer. 2948 * But in this case, we keep res->usage until end of a series of 2949 * uncharges. Then, it's ok to ignore memcg's refcnt. 2950 */ 2951 if (!batch->memcg) 2952 batch->memcg = memcg; 2953 /* 2954 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 2955 * In those cases, all pages freed continuously can be expected to be in 2956 * the same cgroup and we have chance to coalesce uncharges. 2957 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 2958 * because we want to do uncharge as soon as possible. 2959 */ 2960 2961 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 2962 goto direct_uncharge; 2963 2964 if (nr_pages > 1) 2965 goto direct_uncharge; 2966 2967 /* 2968 * In typical case, batch->memcg == mem. This means we can 2969 * merge a series of uncharges to an uncharge of res_counter. 2970 * If not, we uncharge res_counter ony by one. 2971 */ 2972 if (batch->memcg != memcg) 2973 goto direct_uncharge; 2974 /* remember freed charge and uncharge it later */ 2975 batch->nr_pages++; 2976 if (uncharge_memsw) 2977 batch->memsw_nr_pages++; 2978 return; 2979direct_uncharge: 2980 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE); 2981 if (uncharge_memsw) 2982 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE); 2983 if (unlikely(batch->memcg != memcg)) 2984 memcg_oom_recover(memcg); 2985 return; 2986} 2987 2988/* 2989 * uncharge if !page_mapped(page) 2990 */ 2991static struct mem_cgroup * 2992__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 2993{ 2994 struct mem_cgroup *memcg = NULL; 2995 unsigned int nr_pages = 1; 2996 struct page_cgroup *pc; 2997 2998 if (mem_cgroup_disabled()) 2999 return NULL; 3000 3001 if (PageSwapCache(page)) 3002 return NULL; 3003 3004 if (PageTransHuge(page)) { 3005 nr_pages <<= compound_order(page); 3006 VM_BUG_ON(!PageTransHuge(page)); 3007 } 3008 /* 3009 * Check if our page_cgroup is valid 3010 */ 3011 pc = lookup_page_cgroup(page); 3012 if (unlikely(!PageCgroupUsed(pc))) 3013 return NULL; 3014 3015 lock_page_cgroup(pc); 3016 3017 memcg = pc->mem_cgroup; 3018 3019 if (!PageCgroupUsed(pc)) 3020 goto unlock_out; 3021 3022 switch (ctype) { 3023 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 3024 case MEM_CGROUP_CHARGE_TYPE_DROP: 3025 /* See mem_cgroup_prepare_migration() */ 3026 if (page_mapped(page) || PageCgroupMigration(pc)) 3027 goto unlock_out; 3028 break; 3029 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 3030 if (!PageAnon(page)) { /* Shared memory */ 3031 if (page->mapping && !page_is_file_cache(page)) 3032 goto unlock_out; 3033 } else if (page_mapped(page)) /* Anon */ 3034 goto unlock_out; 3035 break; 3036 default: 3037 break; 3038 } 3039 3040 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages); 3041 3042 ClearPageCgroupUsed(pc); 3043 /* 3044 * pc->mem_cgroup is not cleared here. It will be accessed when it's 3045 * freed from LRU. This is safe because uncharged page is expected not 3046 * to be reused (freed soon). Exception is SwapCache, it's handled by 3047 * special functions. 3048 */ 3049 3050 unlock_page_cgroup(pc); 3051 /* 3052 * even after unlock, we have memcg->res.usage here and this memcg 3053 * will never be freed. 3054 */ 3055 memcg_check_events(memcg, page); 3056 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 3057 mem_cgroup_swap_statistics(memcg, true); 3058 mem_cgroup_get(memcg); 3059 } 3060 if (!mem_cgroup_is_root(memcg)) 3061 mem_cgroup_do_uncharge(memcg, nr_pages, ctype); 3062 3063 return memcg; 3064 3065unlock_out: 3066 unlock_page_cgroup(pc); 3067 return NULL; 3068} 3069 3070void mem_cgroup_uncharge_page(struct page *page) 3071{ 3072 /* early check. */ 3073 if (page_mapped(page)) 3074 return; 3075 VM_BUG_ON(page->mapping && !PageAnon(page)); 3076 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); 3077} 3078 3079void mem_cgroup_uncharge_cache_page(struct page *page) 3080{ 3081 VM_BUG_ON(page_mapped(page)); 3082 VM_BUG_ON(page->mapping); 3083 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 3084} 3085 3086/* 3087 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 3088 * In that cases, pages are freed continuously and we can expect pages 3089 * are in the same memcg. All these calls itself limits the number of 3090 * pages freed at once, then uncharge_start/end() is called properly. 3091 * This may be called prural(2) times in a context, 3092 */ 3093 3094void mem_cgroup_uncharge_start(void) 3095{ 3096 current->memcg_batch.do_batch++; 3097 /* We can do nest. */ 3098 if (current->memcg_batch.do_batch == 1) { 3099 current->memcg_batch.memcg = NULL; 3100 current->memcg_batch.nr_pages = 0; 3101 current->memcg_batch.memsw_nr_pages = 0; 3102 } 3103} 3104 3105void mem_cgroup_uncharge_end(void) 3106{ 3107 struct memcg_batch_info *batch = ¤t->memcg_batch; 3108 3109 if (!batch->do_batch) 3110 return; 3111 3112 batch->do_batch--; 3113 if (batch->do_batch) /* If stacked, do nothing. */ 3114 return; 3115 3116 if (!batch->memcg) 3117 return; 3118 /* 3119 * This "batch->memcg" is valid without any css_get/put etc... 3120 * bacause we hide charges behind us. 3121 */ 3122 if (batch->nr_pages) 3123 res_counter_uncharge(&batch->memcg->res, 3124 batch->nr_pages * PAGE_SIZE); 3125 if (batch->memsw_nr_pages) 3126 res_counter_uncharge(&batch->memcg->memsw, 3127 batch->memsw_nr_pages * PAGE_SIZE); 3128 memcg_oom_recover(batch->memcg); 3129 /* forget this pointer (for sanity check) */ 3130 batch->memcg = NULL; 3131} 3132 3133#ifdef CONFIG_SWAP 3134/* 3135 * called after __delete_from_swap_cache() and drop "page" account. 3136 * memcg information is recorded to swap_cgroup of "ent" 3137 */ 3138void 3139mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 3140{ 3141 struct mem_cgroup *memcg; 3142 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 3143 3144 if (!swapout) /* this was a swap cache but the swap is unused ! */ 3145 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 3146 3147 memcg = __mem_cgroup_uncharge_common(page, ctype); 3148 3149 /* 3150 * record memcg information, if swapout && memcg != NULL, 3151 * mem_cgroup_get() was called in uncharge(). 3152 */ 3153 if (do_swap_account && swapout && memcg) 3154 swap_cgroup_record(ent, css_id(&memcg->css)); 3155} 3156#endif 3157 3158#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 3159/* 3160 * called from swap_entry_free(). remove record in swap_cgroup and 3161 * uncharge "memsw" account. 3162 */ 3163void mem_cgroup_uncharge_swap(swp_entry_t ent) 3164{ 3165 struct mem_cgroup *memcg; 3166 unsigned short id; 3167 3168 if (!do_swap_account) 3169 return; 3170 3171 id = swap_cgroup_record(ent, 0); 3172 rcu_read_lock(); 3173 memcg = mem_cgroup_lookup(id); 3174 if (memcg) { 3175 /* 3176 * We uncharge this because swap is freed. 3177 * This memcg can be obsolete one. We avoid calling css_tryget 3178 */ 3179 if (!mem_cgroup_is_root(memcg)) 3180 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 3181 mem_cgroup_swap_statistics(memcg, false); 3182 mem_cgroup_put(memcg); 3183 } 3184 rcu_read_unlock(); 3185} 3186 3187/** 3188 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3189 * @entry: swap entry to be moved 3190 * @from: mem_cgroup which the entry is moved from 3191 * @to: mem_cgroup which the entry is moved to 3192 * @need_fixup: whether we should fixup res_counters and refcounts. 3193 * 3194 * It succeeds only when the swap_cgroup's record for this entry is the same 3195 * as the mem_cgroup's id of @from. 3196 * 3197 * Returns 0 on success, -EINVAL on failure. 3198 * 3199 * The caller must have charged to @to, IOW, called res_counter_charge() about 3200 * both res and memsw, and called css_get(). 3201 */ 3202static int mem_cgroup_move_swap_account(swp_entry_t entry, 3203 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3204{ 3205 unsigned short old_id, new_id; 3206 3207 old_id = css_id(&from->css); 3208 new_id = css_id(&to->css); 3209 3210 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3211 mem_cgroup_swap_statistics(from, false); 3212 mem_cgroup_swap_statistics(to, true); 3213 /* 3214 * This function is only called from task migration context now. 3215 * It postpones res_counter and refcount handling till the end 3216 * of task migration(mem_cgroup_clear_mc()) for performance 3217 * improvement. But we cannot postpone mem_cgroup_get(to) 3218 * because if the process that has been moved to @to does 3219 * swap-in, the refcount of @to might be decreased to 0. 3220 */ 3221 mem_cgroup_get(to); 3222 if (need_fixup) { 3223 if (!mem_cgroup_is_root(from)) 3224 res_counter_uncharge(&from->memsw, PAGE_SIZE); 3225 mem_cgroup_put(from); 3226 /* 3227 * we charged both to->res and to->memsw, so we should 3228 * uncharge to->res. 3229 */ 3230 if (!mem_cgroup_is_root(to)) 3231 res_counter_uncharge(&to->res, PAGE_SIZE); 3232 } 3233 return 0; 3234 } 3235 return -EINVAL; 3236} 3237#else 3238static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3239 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3240{ 3241 return -EINVAL; 3242} 3243#endif 3244 3245/* 3246 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 3247 * page belongs to. 3248 */ 3249int mem_cgroup_prepare_migration(struct page *page, 3250 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask) 3251{ 3252 struct mem_cgroup *memcg = NULL; 3253 struct page_cgroup *pc; 3254 enum charge_type ctype; 3255 int ret = 0; 3256 3257 *memcgp = NULL; 3258 3259 VM_BUG_ON(PageTransHuge(page)); 3260 if (mem_cgroup_disabled()) 3261 return 0; 3262 3263 pc = lookup_page_cgroup(page); 3264 lock_page_cgroup(pc); 3265 if (PageCgroupUsed(pc)) { 3266 memcg = pc->mem_cgroup; 3267 css_get(&memcg->css); 3268 /* 3269 * At migrating an anonymous page, its mapcount goes down 3270 * to 0 and uncharge() will be called. But, even if it's fully 3271 * unmapped, migration may fail and this page has to be 3272 * charged again. We set MIGRATION flag here and delay uncharge 3273 * until end_migration() is called 3274 * 3275 * Corner Case Thinking 3276 * A) 3277 * When the old page was mapped as Anon and it's unmap-and-freed 3278 * while migration was ongoing. 3279 * If unmap finds the old page, uncharge() of it will be delayed 3280 * until end_migration(). If unmap finds a new page, it's 3281 * uncharged when it make mapcount to be 1->0. If unmap code 3282 * finds swap_migration_entry, the new page will not be mapped 3283 * and end_migration() will find it(mapcount==0). 3284 * 3285 * B) 3286 * When the old page was mapped but migraion fails, the kernel 3287 * remaps it. A charge for it is kept by MIGRATION flag even 3288 * if mapcount goes down to 0. We can do remap successfully 3289 * without charging it again. 3290 * 3291 * C) 3292 * The "old" page is under lock_page() until the end of 3293 * migration, so, the old page itself will not be swapped-out. 3294 * If the new page is swapped out before end_migraton, our 3295 * hook to usual swap-out path will catch the event. 3296 */ 3297 if (PageAnon(page)) 3298 SetPageCgroupMigration(pc); 3299 } 3300 unlock_page_cgroup(pc); 3301 /* 3302 * If the page is not charged at this point, 3303 * we return here. 3304 */ 3305 if (!memcg) 3306 return 0; 3307 3308 *memcgp = memcg; 3309 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false); 3310 css_put(&memcg->css);/* drop extra refcnt */ 3311 if (ret || *memcgp == NULL) { 3312 if (PageAnon(page)) { 3313 lock_page_cgroup(pc); 3314 ClearPageCgroupMigration(pc); 3315 unlock_page_cgroup(pc); 3316 /* 3317 * The old page may be fully unmapped while we kept it. 3318 */ 3319 mem_cgroup_uncharge_page(page); 3320 } 3321 return -ENOMEM; 3322 } 3323 /* 3324 * We charge new page before it's used/mapped. So, even if unlock_page() 3325 * is called before end_migration, we can catch all events on this new 3326 * page. In the case new page is migrated but not remapped, new page's 3327 * mapcount will be finally 0 and we call uncharge in end_migration(). 3328 */ 3329 pc = lookup_page_cgroup(newpage); 3330 if (PageAnon(page)) 3331 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 3332 else if (page_is_file_cache(page)) 3333 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3334 else 3335 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3336 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); 3337 return ret; 3338} 3339 3340/* remove redundant charge if migration failed*/ 3341void mem_cgroup_end_migration(struct mem_cgroup *memcg, 3342 struct page *oldpage, struct page *newpage, bool migration_ok) 3343{ 3344 struct page *used, *unused; 3345 struct page_cgroup *pc; 3346 3347 if (!memcg) 3348 return; 3349 /* blocks rmdir() */ 3350 cgroup_exclude_rmdir(&memcg->css); 3351 if (!migration_ok) { 3352 used = oldpage; 3353 unused = newpage; 3354 } else { 3355 used = newpage; 3356 unused = oldpage; 3357 } 3358 /* 3359 * We disallowed uncharge of pages under migration because mapcount 3360 * of the page goes down to zero, temporarly. 3361 * Clear the flag and check the page should be charged. 3362 */ 3363 pc = lookup_page_cgroup(oldpage); 3364 lock_page_cgroup(pc); 3365 ClearPageCgroupMigration(pc); 3366 unlock_page_cgroup(pc); 3367 3368 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); 3369 3370 /* 3371 * If a page is a file cache, radix-tree replacement is very atomic 3372 * and we can skip this check. When it was an Anon page, its mapcount 3373 * goes down to 0. But because we added MIGRATION flage, it's not 3374 * uncharged yet. There are several case but page->mapcount check 3375 * and USED bit check in mem_cgroup_uncharge_page() will do enough 3376 * check. (see prepare_charge() also) 3377 */ 3378 if (PageAnon(used)) 3379 mem_cgroup_uncharge_page(used); 3380 /* 3381 * At migration, we may charge account against cgroup which has no 3382 * tasks. 3383 * So, rmdir()->pre_destroy() can be called while we do this charge. 3384 * In that case, we need to call pre_destroy() again. check it here. 3385 */ 3386 cgroup_release_and_wakeup_rmdir(&memcg->css); 3387} 3388 3389/* 3390 * At replace page cache, newpage is not under any memcg but it's on 3391 * LRU. So, this function doesn't touch res_counter but handles LRU 3392 * in correct way. Both pages are locked so we cannot race with uncharge. 3393 */ 3394void mem_cgroup_replace_page_cache(struct page *oldpage, 3395 struct page *newpage) 3396{ 3397 struct mem_cgroup *memcg; 3398 struct page_cgroup *pc; 3399 struct zone *zone; 3400 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 3401 unsigned long flags; 3402 3403 if (mem_cgroup_disabled()) 3404 return; 3405 3406 pc = lookup_page_cgroup(oldpage); 3407 /* fix accounting on old pages */ 3408 lock_page_cgroup(pc); 3409 memcg = pc->mem_cgroup; 3410 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1); 3411 ClearPageCgroupUsed(pc); 3412 unlock_page_cgroup(pc); 3413 3414 if (PageSwapBacked(oldpage)) 3415 type = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3416 3417 zone = page_zone(newpage); 3418 pc = lookup_page_cgroup(newpage); 3419 /* 3420 * Even if newpage->mapping was NULL before starting replacement, 3421 * the newpage may be on LRU(or pagevec for LRU) already. We lock 3422 * LRU while we overwrite pc->mem_cgroup. 3423 */ 3424 spin_lock_irqsave(&zone->lru_lock, flags); 3425 if (PageLRU(newpage)) 3426 del_page_from_lru_list(zone, newpage, page_lru(newpage)); 3427 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type); 3428 if (PageLRU(newpage)) 3429 add_page_to_lru_list(zone, newpage, page_lru(newpage)); 3430 spin_unlock_irqrestore(&zone->lru_lock, flags); 3431} 3432 3433#ifdef CONFIG_DEBUG_VM 3434static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 3435{ 3436 struct page_cgroup *pc; 3437 3438 pc = lookup_page_cgroup(page); 3439 /* 3440 * Can be NULL while feeding pages into the page allocator for 3441 * the first time, i.e. during boot or memory hotplug; 3442 * or when mem_cgroup_disabled(). 3443 */ 3444 if (likely(pc) && PageCgroupUsed(pc)) 3445 return pc; 3446 return NULL; 3447} 3448 3449bool mem_cgroup_bad_page_check(struct page *page) 3450{ 3451 if (mem_cgroup_disabled()) 3452 return false; 3453 3454 return lookup_page_cgroup_used(page) != NULL; 3455} 3456 3457void mem_cgroup_print_bad_page(struct page *page) 3458{ 3459 struct page_cgroup *pc; 3460 3461 pc = lookup_page_cgroup_used(page); 3462 if (pc) { 3463 int ret = -1; 3464 char *path; 3465 3466 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", 3467 pc, pc->flags, pc->mem_cgroup); 3468 3469 path = kmalloc(PATH_MAX, GFP_KERNEL); 3470 if (path) { 3471 rcu_read_lock(); 3472 ret = cgroup_path(pc->mem_cgroup->css.cgroup, 3473 path, PATH_MAX); 3474 rcu_read_unlock(); 3475 } 3476 3477 printk(KERN_CONT "(%s)\n", 3478 (ret < 0) ? "cannot get the path" : path); 3479 kfree(path); 3480 } 3481} 3482#endif 3483 3484static DEFINE_MUTEX(set_limit_mutex); 3485 3486static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 3487 unsigned long long val) 3488{ 3489 int retry_count; 3490 u64 memswlimit, memlimit; 3491 int ret = 0; 3492 int children = mem_cgroup_count_children(memcg); 3493 u64 curusage, oldusage; 3494 int enlarge; 3495 3496 /* 3497 * For keeping hierarchical_reclaim simple, how long we should retry 3498 * is depends on callers. We set our retry-count to be function 3499 * of # of children which we should visit in this loop. 3500 */ 3501 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 3502 3503 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3504 3505 enlarge = 0; 3506 while (retry_count) { 3507 if (signal_pending(current)) { 3508 ret = -EINTR; 3509 break; 3510 } 3511 /* 3512 * Rather than hide all in some function, I do this in 3513 * open coded manner. You see what this really does. 3514 * We have to guarantee memcg->res.limit < memcg->memsw.limit. 3515 */ 3516 mutex_lock(&set_limit_mutex); 3517 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3518 if (memswlimit < val) { 3519 ret = -EINVAL; 3520 mutex_unlock(&set_limit_mutex); 3521 break; 3522 } 3523 3524 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3525 if (memlimit < val) 3526 enlarge = 1; 3527 3528 ret = res_counter_set_limit(&memcg->res, val); 3529 if (!ret) { 3530 if (memswlimit == val) 3531 memcg->memsw_is_minimum = true; 3532 else 3533 memcg->memsw_is_minimum = false; 3534 } 3535 mutex_unlock(&set_limit_mutex); 3536 3537 if (!ret) 3538 break; 3539 3540 mem_cgroup_reclaim(memcg, GFP_KERNEL, 3541 MEM_CGROUP_RECLAIM_SHRINK); 3542 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3543 /* Usage is reduced ? */ 3544 if (curusage >= oldusage) 3545 retry_count--; 3546 else 3547 oldusage = curusage; 3548 } 3549 if (!ret && enlarge) 3550 memcg_oom_recover(memcg); 3551 3552 return ret; 3553} 3554 3555static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 3556 unsigned long long val) 3557{ 3558 int retry_count; 3559 u64 memlimit, memswlimit, oldusage, curusage; 3560 int children = mem_cgroup_count_children(memcg); 3561 int ret = -EBUSY; 3562 int enlarge = 0; 3563 3564 /* see mem_cgroup_resize_res_limit */ 3565 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 3566 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3567 while (retry_count) { 3568 if (signal_pending(current)) { 3569 ret = -EINTR; 3570 break; 3571 } 3572 /* 3573 * Rather than hide all in some function, I do this in 3574 * open coded manner. You see what this really does. 3575 * We have to guarantee memcg->res.limit < memcg->memsw.limit. 3576 */ 3577 mutex_lock(&set_limit_mutex); 3578 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3579 if (memlimit > val) { 3580 ret = -EINVAL; 3581 mutex_unlock(&set_limit_mutex); 3582 break; 3583 } 3584 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3585 if (memswlimit < val) 3586 enlarge = 1; 3587 ret = res_counter_set_limit(&memcg->memsw, val); 3588 if (!ret) { 3589 if (memlimit == val) 3590 memcg->memsw_is_minimum = true; 3591 else 3592 memcg->memsw_is_minimum = false; 3593 } 3594 mutex_unlock(&set_limit_mutex); 3595 3596 if (!ret) 3597 break; 3598 3599 mem_cgroup_reclaim(memcg, GFP_KERNEL, 3600 MEM_CGROUP_RECLAIM_NOSWAP | 3601 MEM_CGROUP_RECLAIM_SHRINK); 3602 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3603 /* Usage is reduced ? */ 3604 if (curusage >= oldusage) 3605 retry_count--; 3606 else 3607 oldusage = curusage; 3608 } 3609 if (!ret && enlarge) 3610 memcg_oom_recover(memcg); 3611 return ret; 3612} 3613 3614unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 3615 gfp_t gfp_mask, 3616 unsigned long *total_scanned) 3617{ 3618 unsigned long nr_reclaimed = 0; 3619 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 3620 unsigned long reclaimed; 3621 int loop = 0; 3622 struct mem_cgroup_tree_per_zone *mctz; 3623 unsigned long long excess; 3624 unsigned long nr_scanned; 3625 3626 if (order > 0) 3627 return 0; 3628 3629 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 3630 /* 3631 * This loop can run a while, specially if mem_cgroup's continuously 3632 * keep exceeding their soft limit and putting the system under 3633 * pressure 3634 */ 3635 do { 3636 if (next_mz) 3637 mz = next_mz; 3638 else 3639 mz = mem_cgroup_largest_soft_limit_node(mctz); 3640 if (!mz) 3641 break; 3642 3643 nr_scanned = 0; 3644 reclaimed = mem_cgroup_soft_reclaim(mz->mem, zone, 3645 gfp_mask, &nr_scanned); 3646 nr_reclaimed += reclaimed; 3647 *total_scanned += nr_scanned; 3648 spin_lock(&mctz->lock); 3649 3650 /* 3651 * If we failed to reclaim anything from this memory cgroup 3652 * it is time to move on to the next cgroup 3653 */ 3654 next_mz = NULL; 3655 if (!reclaimed) { 3656 do { 3657 /* 3658 * Loop until we find yet another one. 3659 * 3660 * By the time we get the soft_limit lock 3661 * again, someone might have aded the 3662 * group back on the RB tree. Iterate to 3663 * make sure we get a different mem. 3664 * mem_cgroup_largest_soft_limit_node returns 3665 * NULL if no other cgroup is present on 3666 * the tree 3667 */ 3668 next_mz = 3669 __mem_cgroup_largest_soft_limit_node(mctz); 3670 if (next_mz == mz) 3671 css_put(&next_mz->mem->css); 3672 else /* next_mz == NULL or other memcg */ 3673 break; 3674 } while (1); 3675 } 3676 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 3677 excess = res_counter_soft_limit_excess(&mz->mem->res); 3678 /* 3679 * One school of thought says that we should not add 3680 * back the node to the tree if reclaim returns 0. 3681 * But our reclaim could return 0, simply because due 3682 * to priority we are exposing a smaller subset of 3683 * memory to reclaim from. Consider this as a longer 3684 * term TODO. 3685 */ 3686 /* If excess == 0, no tree ops */ 3687 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); 3688 spin_unlock(&mctz->lock); 3689 css_put(&mz->mem->css); 3690 loop++; 3691 /* 3692 * Could not reclaim anything and there are no more 3693 * mem cgroups to try or we seem to be looping without 3694 * reclaiming anything. 3695 */ 3696 if (!nr_reclaimed && 3697 (next_mz == NULL || 3698 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3699 break; 3700 } while (!nr_reclaimed); 3701 if (next_mz) 3702 css_put(&next_mz->mem->css); 3703 return nr_reclaimed; 3704} 3705 3706/* 3707 * This routine traverse page_cgroup in given list and drop them all. 3708 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 3709 */ 3710static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, 3711 int node, int zid, enum lru_list lru) 3712{ 3713 struct mem_cgroup_per_zone *mz; 3714 unsigned long flags, loop; 3715 struct list_head *list; 3716 struct page *busy; 3717 struct zone *zone; 3718 int ret = 0; 3719 3720 zone = &NODE_DATA(node)->node_zones[zid]; 3721 mz = mem_cgroup_zoneinfo(memcg, node, zid); 3722 list = &mz->lruvec.lists[lru]; 3723 3724 loop = MEM_CGROUP_ZSTAT(mz, lru); 3725 /* give some margin against EBUSY etc...*/ 3726 loop += 256; 3727 busy = NULL; 3728 while (loop--) { 3729 struct page_cgroup *pc; 3730 struct page *page; 3731 3732 ret = 0; 3733 spin_lock_irqsave(&zone->lru_lock, flags); 3734 if (list_empty(list)) { 3735 spin_unlock_irqrestore(&zone->lru_lock, flags); 3736 break; 3737 } 3738 page = list_entry(list->prev, struct page, lru); 3739 if (busy == page) { 3740 list_move(&page->lru, list); 3741 busy = NULL; 3742 spin_unlock_irqrestore(&zone->lru_lock, flags); 3743 continue; 3744 } 3745 spin_unlock_irqrestore(&zone->lru_lock, flags); 3746 3747 pc = lookup_page_cgroup(page); 3748 3749 ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL); 3750 if (ret == -ENOMEM) 3751 break; 3752 3753 if (ret == -EBUSY || ret == -EINVAL) { 3754 /* found lock contention or "pc" is obsolete. */ 3755 busy = page; 3756 cond_resched(); 3757 } else 3758 busy = NULL; 3759 } 3760 3761 if (!ret && !list_empty(list)) 3762 return -EBUSY; 3763 return ret; 3764} 3765 3766/* 3767 * make mem_cgroup's charge to be 0 if there is no task. 3768 * This enables deleting this mem_cgroup. 3769 */ 3770static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all) 3771{ 3772 int ret; 3773 int node, zid, shrink; 3774 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3775 struct cgroup *cgrp = memcg->css.cgroup; 3776 3777 css_get(&memcg->css); 3778 3779 shrink = 0; 3780 /* should free all ? */ 3781 if (free_all) 3782 goto try_to_free; 3783move_account: 3784 do { 3785 ret = -EBUSY; 3786 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 3787 goto out; 3788 ret = -EINTR; 3789 if (signal_pending(current)) 3790 goto out; 3791 /* This is for making all *used* pages to be on LRU. */ 3792 lru_add_drain_all(); 3793 drain_all_stock_sync(memcg); 3794 ret = 0; 3795 mem_cgroup_start_move(memcg); 3796 for_each_node_state(node, N_HIGH_MEMORY) { 3797 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { 3798 enum lru_list l; 3799 for_each_lru(l) { 3800 ret = mem_cgroup_force_empty_list(memcg, 3801 node, zid, l); 3802 if (ret) 3803 break; 3804 } 3805 } 3806 if (ret) 3807 break; 3808 } 3809 mem_cgroup_end_move(memcg); 3810 memcg_oom_recover(memcg); 3811 /* it seems parent cgroup doesn't have enough mem */ 3812 if (ret == -ENOMEM) 3813 goto try_to_free; 3814 cond_resched(); 3815 /* "ret" should also be checked to ensure all lists are empty. */ 3816 } while (memcg->res.usage > 0 || ret); 3817out: 3818 css_put(&memcg->css); 3819 return ret; 3820 3821try_to_free: 3822 /* returns EBUSY if there is a task or if we come here twice. */ 3823 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { 3824 ret = -EBUSY; 3825 goto out; 3826 } 3827 /* we call try-to-free pages for make this cgroup empty */ 3828 lru_add_drain_all(); 3829 /* try to free all pages in this cgroup */ 3830 shrink = 1; 3831 while (nr_retries && memcg->res.usage > 0) { 3832 int progress; 3833 3834 if (signal_pending(current)) { 3835 ret = -EINTR; 3836 goto out; 3837 } 3838 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, 3839 false); 3840 if (!progress) { 3841 nr_retries--; 3842 /* maybe some writeback is necessary */ 3843 congestion_wait(BLK_RW_ASYNC, HZ/10); 3844 } 3845 3846 } 3847 lru_add_drain(); 3848 /* try move_account...there may be some *locked* pages. */ 3849 goto move_account; 3850} 3851 3852int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 3853{ 3854 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); 3855} 3856 3857 3858static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) 3859{ 3860 return mem_cgroup_from_cont(cont)->use_hierarchy; 3861} 3862 3863static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, 3864 u64 val) 3865{ 3866 int retval = 0; 3867 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3868 struct cgroup *parent = cont->parent; 3869 struct mem_cgroup *parent_memcg = NULL; 3870 3871 if (parent) 3872 parent_memcg = mem_cgroup_from_cont(parent); 3873 3874 cgroup_lock(); 3875 /* 3876 * If parent's use_hierarchy is set, we can't make any modifications 3877 * in the child subtrees. If it is unset, then the change can 3878 * occur, provided the current cgroup has no children. 3879 * 3880 * For the root cgroup, parent_mem is NULL, we allow value to be 3881 * set if there are no children. 3882 */ 3883 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3884 (val == 1 || val == 0)) { 3885 if (list_empty(&cont->children)) 3886 memcg->use_hierarchy = val; 3887 else 3888 retval = -EBUSY; 3889 } else 3890 retval = -EINVAL; 3891 cgroup_unlock(); 3892 3893 return retval; 3894} 3895 3896 3897static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, 3898 enum mem_cgroup_stat_index idx) 3899{ 3900 struct mem_cgroup *iter; 3901 long val = 0; 3902 3903 /* Per-cpu values can be negative, use a signed accumulator */ 3904 for_each_mem_cgroup_tree(iter, memcg) 3905 val += mem_cgroup_read_stat(iter, idx); 3906 3907 if (val < 0) /* race ? */ 3908 val = 0; 3909 return val; 3910} 3911 3912static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3913{ 3914 u64 val; 3915 3916 if (!mem_cgroup_is_root(memcg)) { 3917 if (!swap) 3918 return res_counter_read_u64(&memcg->res, RES_USAGE); 3919 else 3920 return res_counter_read_u64(&memcg->memsw, RES_USAGE); 3921 } 3922 3923 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); 3924 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); 3925 3926 if (swap) 3927 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT); 3928 3929 return val << PAGE_SHIFT; 3930} 3931 3932static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 3933{ 3934 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3935 u64 val; 3936 int type, name; 3937 3938 type = MEMFILE_TYPE(cft->private); 3939 name = MEMFILE_ATTR(cft->private); 3940 switch (type) { 3941 case _MEM: 3942 if (name == RES_USAGE) 3943 val = mem_cgroup_usage(memcg, false); 3944 else 3945 val = res_counter_read_u64(&memcg->res, name); 3946 break; 3947 case _MEMSWAP: 3948 if (name == RES_USAGE) 3949 val = mem_cgroup_usage(memcg, true); 3950 else 3951 val = res_counter_read_u64(&memcg->memsw, name); 3952 break; 3953 default: 3954 BUG(); 3955 break; 3956 } 3957 return val; 3958} 3959/* 3960 * The user of this function is... 3961 * RES_LIMIT. 3962 */ 3963static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 3964 const char *buffer) 3965{ 3966 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3967 int type, name; 3968 unsigned long long val; 3969 int ret; 3970 3971 type = MEMFILE_TYPE(cft->private); 3972 name = MEMFILE_ATTR(cft->private); 3973 switch (name) { 3974 case RES_LIMIT: 3975 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3976 ret = -EINVAL; 3977 break; 3978 } 3979 /* This function does all necessary parse...reuse it */ 3980 ret = res_counter_memparse_write_strategy(buffer, &val); 3981 if (ret) 3982 break; 3983 if (type == _MEM) 3984 ret = mem_cgroup_resize_limit(memcg, val); 3985 else 3986 ret = mem_cgroup_resize_memsw_limit(memcg, val); 3987 break; 3988 case RES_SOFT_LIMIT: 3989 ret = res_counter_memparse_write_strategy(buffer, &val); 3990 if (ret) 3991 break; 3992 /* 3993 * For memsw, soft limits are hard to implement in terms 3994 * of semantics, for now, we support soft limits for 3995 * control without swap 3996 */ 3997 if (type == _MEM) 3998 ret = res_counter_set_soft_limit(&memcg->res, val); 3999 else 4000 ret = -EINVAL; 4001 break; 4002 default: 4003 ret = -EINVAL; /* should be BUG() ? */ 4004 break; 4005 } 4006 return ret; 4007} 4008 4009static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 4010 unsigned long long *mem_limit, unsigned long long *memsw_limit) 4011{ 4012 struct cgroup *cgroup; 4013 unsigned long long min_limit, min_memsw_limit, tmp; 4014 4015 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4016 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4017 cgroup = memcg->css.cgroup; 4018 if (!memcg->use_hierarchy) 4019 goto out; 4020 4021 while (cgroup->parent) { 4022 cgroup = cgroup->parent; 4023 memcg = mem_cgroup_from_cont(cgroup); 4024 if (!memcg->use_hierarchy) 4025 break; 4026 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 4027 min_limit = min(min_limit, tmp); 4028 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4029 min_memsw_limit = min(min_memsw_limit, tmp); 4030 } 4031out: 4032 *mem_limit = min_limit; 4033 *memsw_limit = min_memsw_limit; 4034 return; 4035} 4036 4037static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 4038{ 4039 struct mem_cgroup *memcg; 4040 int type, name; 4041 4042 memcg = mem_cgroup_from_cont(cont); 4043 type = MEMFILE_TYPE(event); 4044 name = MEMFILE_ATTR(event); 4045 switch (name) { 4046 case RES_MAX_USAGE: 4047 if (type == _MEM) 4048 res_counter_reset_max(&memcg->res); 4049 else 4050 res_counter_reset_max(&memcg->memsw); 4051 break; 4052 case RES_FAILCNT: 4053 if (type == _MEM) 4054 res_counter_reset_failcnt(&memcg->res); 4055 else 4056 res_counter_reset_failcnt(&memcg->memsw); 4057 break; 4058 } 4059 4060 return 0; 4061} 4062 4063static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, 4064 struct cftype *cft) 4065{ 4066 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; 4067} 4068 4069#ifdef CONFIG_MMU 4070static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4071 struct cftype *cft, u64 val) 4072{ 4073 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4074 4075 if (val >= (1 << NR_MOVE_TYPE)) 4076 return -EINVAL; 4077 /* 4078 * We check this value several times in both in can_attach() and 4079 * attach(), so we need cgroup lock to prevent this value from being 4080 * inconsistent. 4081 */ 4082 cgroup_lock(); 4083 memcg->move_charge_at_immigrate = val; 4084 cgroup_unlock(); 4085 4086 return 0; 4087} 4088#else 4089static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4090 struct cftype *cft, u64 val) 4091{ 4092 return -ENOSYS; 4093} 4094#endif 4095 4096 4097/* For read statistics */ 4098enum { 4099 MCS_CACHE, 4100 MCS_RSS, 4101 MCS_FILE_MAPPED, 4102 MCS_PGPGIN, 4103 MCS_PGPGOUT, 4104 MCS_SWAP, 4105 MCS_PGFAULT, 4106 MCS_PGMAJFAULT, 4107 MCS_INACTIVE_ANON, 4108 MCS_ACTIVE_ANON, 4109 MCS_INACTIVE_FILE, 4110 MCS_ACTIVE_FILE, 4111 MCS_UNEVICTABLE, 4112 NR_MCS_STAT, 4113}; 4114 4115struct mcs_total_stat { 4116 s64 stat[NR_MCS_STAT]; 4117}; 4118 4119struct { 4120 char *local_name; 4121 char *total_name; 4122} memcg_stat_strings[NR_MCS_STAT] = { 4123 {"cache", "total_cache"}, 4124 {"rss", "total_rss"}, 4125 {"mapped_file", "total_mapped_file"}, 4126 {"pgpgin", "total_pgpgin"}, 4127 {"pgpgout", "total_pgpgout"}, 4128 {"swap", "total_swap"}, 4129 {"pgfault", "total_pgfault"}, 4130 {"pgmajfault", "total_pgmajfault"}, 4131 {"inactive_anon", "total_inactive_anon"}, 4132 {"active_anon", "total_active_anon"}, 4133 {"inactive_file", "total_inactive_file"}, 4134 {"active_file", "total_active_file"}, 4135 {"unevictable", "total_unevictable"} 4136}; 4137 4138 4139static void 4140mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s) 4141{ 4142 s64 val; 4143 4144 /* per cpu stat */ 4145 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE); 4146 s->stat[MCS_CACHE] += val * PAGE_SIZE; 4147 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS); 4148 s->stat[MCS_RSS] += val * PAGE_SIZE; 4149 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); 4150 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; 4151 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN); 4152 s->stat[MCS_PGPGIN] += val; 4153 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT); 4154 s->stat[MCS_PGPGOUT] += val; 4155 if (do_swap_account) { 4156 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT); 4157 s->stat[MCS_SWAP] += val * PAGE_SIZE; 4158 } 4159 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT); 4160 s->stat[MCS_PGFAULT] += val; 4161 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT); 4162 s->stat[MCS_PGMAJFAULT] += val; 4163 4164 /* per zone stat */ 4165 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); 4166 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; 4167 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); 4168 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; 4169 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); 4170 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; 4171 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); 4172 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; 4173 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE)); 4174 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; 4175} 4176 4177static void 4178mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s) 4179{ 4180 struct mem_cgroup *iter; 4181 4182 for_each_mem_cgroup_tree(iter, memcg) 4183 mem_cgroup_get_local_stat(iter, s); 4184} 4185 4186#ifdef CONFIG_NUMA 4187static int mem_control_numa_stat_show(struct seq_file *m, void *arg) 4188{ 4189 int nid; 4190 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 4191 unsigned long node_nr; 4192 struct cgroup *cont = m->private; 4193 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4194 4195 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL); 4196 seq_printf(m, "total=%lu", total_nr); 4197 for_each_node_state(nid, N_HIGH_MEMORY) { 4198 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL); 4199 seq_printf(m, " N%d=%lu", nid, node_nr); 4200 } 4201 seq_putc(m, '\n'); 4202 4203 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE); 4204 seq_printf(m, "file=%lu", file_nr); 4205 for_each_node_state(nid, N_HIGH_MEMORY) { 4206 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4207 LRU_ALL_FILE); 4208 seq_printf(m, " N%d=%lu", nid, node_nr); 4209 } 4210 seq_putc(m, '\n'); 4211 4212 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON); 4213 seq_printf(m, "anon=%lu", anon_nr); 4214 for_each_node_state(nid, N_HIGH_MEMORY) { 4215 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4216 LRU_ALL_ANON); 4217 seq_printf(m, " N%d=%lu", nid, node_nr); 4218 } 4219 seq_putc(m, '\n'); 4220 4221 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE)); 4222 seq_printf(m, "unevictable=%lu", unevictable_nr); 4223 for_each_node_state(nid, N_HIGH_MEMORY) { 4224 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4225 BIT(LRU_UNEVICTABLE)); 4226 seq_printf(m, " N%d=%lu", nid, node_nr); 4227 } 4228 seq_putc(m, '\n'); 4229 return 0; 4230} 4231#endif /* CONFIG_NUMA */ 4232 4233static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4234 struct cgroup_map_cb *cb) 4235{ 4236 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4237 struct mcs_total_stat mystat; 4238 int i; 4239 4240 memset(&mystat, 0, sizeof(mystat)); 4241 mem_cgroup_get_local_stat(mem_cont, &mystat); 4242 4243 4244 for (i = 0; i < NR_MCS_STAT; i++) { 4245 if (i == MCS_SWAP && !do_swap_account) 4246 continue; 4247 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); 4248 } 4249 4250 /* Hierarchical information */ 4251 { 4252 unsigned long long limit, memsw_limit; 4253 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); 4254 cb->fill(cb, "hierarchical_memory_limit", limit); 4255 if (do_swap_account) 4256 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 4257 } 4258 4259 memset(&mystat, 0, sizeof(mystat)); 4260 mem_cgroup_get_total_stat(mem_cont, &mystat); 4261 for (i = 0; i < NR_MCS_STAT; i++) { 4262 if (i == MCS_SWAP && !do_swap_account) 4263 continue; 4264 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); 4265 } 4266 4267#ifdef CONFIG_DEBUG_VM 4268 { 4269 int nid, zid; 4270 struct mem_cgroup_per_zone *mz; 4271 unsigned long recent_rotated[2] = {0, 0}; 4272 unsigned long recent_scanned[2] = {0, 0}; 4273 4274 for_each_online_node(nid) 4275 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4276 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 4277 4278 recent_rotated[0] += 4279 mz->reclaim_stat.recent_rotated[0]; 4280 recent_rotated[1] += 4281 mz->reclaim_stat.recent_rotated[1]; 4282 recent_scanned[0] += 4283 mz->reclaim_stat.recent_scanned[0]; 4284 recent_scanned[1] += 4285 mz->reclaim_stat.recent_scanned[1]; 4286 } 4287 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); 4288 cb->fill(cb, "recent_rotated_file", recent_rotated[1]); 4289 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); 4290 cb->fill(cb, "recent_scanned_file", recent_scanned[1]); 4291 } 4292#endif 4293 4294 return 0; 4295} 4296 4297static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) 4298{ 4299 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4300 4301 return mem_cgroup_swappiness(memcg); 4302} 4303 4304static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, 4305 u64 val) 4306{ 4307 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4308 struct mem_cgroup *parent; 4309 4310 if (val > 100) 4311 return -EINVAL; 4312 4313 if (cgrp->parent == NULL) 4314 return -EINVAL; 4315 4316 parent = mem_cgroup_from_cont(cgrp->parent); 4317 4318 cgroup_lock(); 4319 4320 /* If under hierarchy, only empty-root can set this value */ 4321 if ((parent->use_hierarchy) || 4322 (memcg->use_hierarchy && !list_empty(&cgrp->children))) { 4323 cgroup_unlock(); 4324 return -EINVAL; 4325 } 4326 4327 memcg->swappiness = val; 4328 4329 cgroup_unlock(); 4330 4331 return 0; 4332} 4333 4334static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4335{ 4336 struct mem_cgroup_threshold_ary *t; 4337 u64 usage; 4338 int i; 4339 4340 rcu_read_lock(); 4341 if (!swap) 4342 t = rcu_dereference(memcg->thresholds.primary); 4343 else 4344 t = rcu_dereference(memcg->memsw_thresholds.primary); 4345 4346 if (!t) 4347 goto unlock; 4348 4349 usage = mem_cgroup_usage(memcg, swap); 4350 4351 /* 4352 * current_threshold points to threshold just below usage. 4353 * If it's not true, a threshold was crossed after last 4354 * call of __mem_cgroup_threshold(). 4355 */ 4356 i = t->current_threshold; 4357 4358 /* 4359 * Iterate backward over array of thresholds starting from 4360 * current_threshold and check if a threshold is crossed. 4361 * If none of thresholds below usage is crossed, we read 4362 * only one element of the array here. 4363 */ 4364 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4365 eventfd_signal(t->entries[i].eventfd, 1); 4366 4367 /* i = current_threshold + 1 */ 4368 i++; 4369 4370 /* 4371 * Iterate forward over array of thresholds starting from 4372 * current_threshold+1 and check if a threshold is crossed. 4373 * If none of thresholds above usage is crossed, we read 4374 * only one element of the array here. 4375 */ 4376 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4377 eventfd_signal(t->entries[i].eventfd, 1); 4378 4379 /* Update current_threshold */ 4380 t->current_threshold = i - 1; 4381unlock: 4382 rcu_read_unlock(); 4383} 4384 4385static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4386{ 4387 while (memcg) { 4388 __mem_cgroup_threshold(memcg, false); 4389 if (do_swap_account) 4390 __mem_cgroup_threshold(memcg, true); 4391 4392 memcg = parent_mem_cgroup(memcg); 4393 } 4394} 4395 4396static int compare_thresholds(const void *a, const void *b) 4397{ 4398 const struct mem_cgroup_threshold *_a = a; 4399 const struct mem_cgroup_threshold *_b = b; 4400 4401 return _a->threshold - _b->threshold; 4402} 4403 4404static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4405{ 4406 struct mem_cgroup_eventfd_list *ev; 4407 4408 list_for_each_entry(ev, &memcg->oom_notify, list) 4409 eventfd_signal(ev->eventfd, 1); 4410 return 0; 4411} 4412 4413static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4414{ 4415 struct mem_cgroup *iter; 4416 4417 for_each_mem_cgroup_tree(iter, memcg) 4418 mem_cgroup_oom_notify_cb(iter); 4419} 4420 4421static int mem_cgroup_usage_register_event(struct cgroup *cgrp, 4422 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4423{ 4424 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4425 struct mem_cgroup_thresholds *thresholds; 4426 struct mem_cgroup_threshold_ary *new; 4427 int type = MEMFILE_TYPE(cft->private); 4428 u64 threshold, usage; 4429 int i, size, ret; 4430 4431 ret = res_counter_memparse_write_strategy(args, &threshold); 4432 if (ret) 4433 return ret; 4434 4435 mutex_lock(&memcg->thresholds_lock); 4436 4437 if (type == _MEM) 4438 thresholds = &memcg->thresholds; 4439 else if (type == _MEMSWAP) 4440 thresholds = &memcg->memsw_thresholds; 4441 else 4442 BUG(); 4443 4444 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4445 4446 /* Check if a threshold crossed before adding a new one */ 4447 if (thresholds->primary) 4448 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4449 4450 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4451 4452 /* Allocate memory for new array of thresholds */ 4453 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 4454 GFP_KERNEL); 4455 if (!new) { 4456 ret = -ENOMEM; 4457 goto unlock; 4458 } 4459 new->size = size; 4460 4461 /* Copy thresholds (if any) to new array */ 4462 if (thresholds->primary) { 4463 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 4464 sizeof(struct mem_cgroup_threshold)); 4465 } 4466 4467 /* Add new threshold */ 4468 new->entries[size - 1].eventfd = eventfd; 4469 new->entries[size - 1].threshold = threshold; 4470 4471 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4472 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 4473 compare_thresholds, NULL); 4474 4475 /* Find current threshold */ 4476 new->current_threshold = -1; 4477 for (i = 0; i < size; i++) { 4478 if (new->entries[i].threshold < usage) { 4479 /* 4480 * new->current_threshold will not be used until 4481 * rcu_assign_pointer(), so it's safe to increment 4482 * it here. 4483 */ 4484 ++new->current_threshold; 4485 } 4486 } 4487 4488 /* Free old spare buffer and save old primary buffer as spare */ 4489 kfree(thresholds->spare); 4490 thresholds->spare = thresholds->primary; 4491 4492 rcu_assign_pointer(thresholds->primary, new); 4493 4494 /* To be sure that nobody uses thresholds */ 4495 synchronize_rcu(); 4496 4497unlock: 4498 mutex_unlock(&memcg->thresholds_lock); 4499 4500 return ret; 4501} 4502 4503static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, 4504 struct cftype *cft, struct eventfd_ctx *eventfd) 4505{ 4506 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4507 struct mem_cgroup_thresholds *thresholds; 4508 struct mem_cgroup_threshold_ary *new; 4509 int type = MEMFILE_TYPE(cft->private); 4510 u64 usage; 4511 int i, j, size; 4512 4513 mutex_lock(&memcg->thresholds_lock); 4514 if (type == _MEM) 4515 thresholds = &memcg->thresholds; 4516 else if (type == _MEMSWAP) 4517 thresholds = &memcg->memsw_thresholds; 4518 else 4519 BUG(); 4520 4521 /* 4522 * Something went wrong if we trying to unregister a threshold 4523 * if we don't have thresholds 4524 */ 4525 BUG_ON(!thresholds); 4526 4527 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4528 4529 /* Check if a threshold crossed before removing */ 4530 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4531 4532 /* Calculate new number of threshold */ 4533 size = 0; 4534 for (i = 0; i < thresholds->primary->size; i++) { 4535 if (thresholds->primary->entries[i].eventfd != eventfd) 4536 size++; 4537 } 4538 4539 new = thresholds->spare; 4540 4541 /* Set thresholds array to NULL if we don't have thresholds */ 4542 if (!size) { 4543 kfree(new); 4544 new = NULL; 4545 goto swap_buffers; 4546 } 4547 4548 new->size = size; 4549 4550 /* Copy thresholds and find current threshold */ 4551 new->current_threshold = -1; 4552 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4553 if (thresholds->primary->entries[i].eventfd == eventfd) 4554 continue; 4555 4556 new->entries[j] = thresholds->primary->entries[i]; 4557 if (new->entries[j].threshold < usage) { 4558 /* 4559 * new->current_threshold will not be used 4560 * until rcu_assign_pointer(), so it's safe to increment 4561 * it here. 4562 */ 4563 ++new->current_threshold; 4564 } 4565 j++; 4566 } 4567 4568swap_buffers: 4569 /* Swap primary and spare array */ 4570 thresholds->spare = thresholds->primary; 4571 rcu_assign_pointer(thresholds->primary, new); 4572 4573 /* To be sure that nobody uses thresholds */ 4574 synchronize_rcu(); 4575 4576 mutex_unlock(&memcg->thresholds_lock); 4577} 4578 4579static int mem_cgroup_oom_register_event(struct cgroup *cgrp, 4580 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4581{ 4582 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4583 struct mem_cgroup_eventfd_list *event; 4584 int type = MEMFILE_TYPE(cft->private); 4585 4586 BUG_ON(type != _OOM_TYPE); 4587 event = kmalloc(sizeof(*event), GFP_KERNEL); 4588 if (!event) 4589 return -ENOMEM; 4590 4591 spin_lock(&memcg_oom_lock); 4592 4593 event->eventfd = eventfd; 4594 list_add(&event->list, &memcg->oom_notify); 4595 4596 /* already in OOM ? */ 4597 if (atomic_read(&memcg->under_oom)) 4598 eventfd_signal(eventfd, 1); 4599 spin_unlock(&memcg_oom_lock); 4600 4601 return 0; 4602} 4603 4604static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, 4605 struct cftype *cft, struct eventfd_ctx *eventfd) 4606{ 4607 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4608 struct mem_cgroup_eventfd_list *ev, *tmp; 4609 int type = MEMFILE_TYPE(cft->private); 4610 4611 BUG_ON(type != _OOM_TYPE); 4612 4613 spin_lock(&memcg_oom_lock); 4614 4615 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4616 if (ev->eventfd == eventfd) { 4617 list_del(&ev->list); 4618 kfree(ev); 4619 } 4620 } 4621 4622 spin_unlock(&memcg_oom_lock); 4623} 4624 4625static int mem_cgroup_oom_control_read(struct cgroup *cgrp, 4626 struct cftype *cft, struct cgroup_map_cb *cb) 4627{ 4628 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4629 4630 cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable); 4631 4632 if (atomic_read(&memcg->under_oom)) 4633 cb->fill(cb, "under_oom", 1); 4634 else 4635 cb->fill(cb, "under_oom", 0); 4636 return 0; 4637} 4638 4639static int mem_cgroup_oom_control_write(struct cgroup *cgrp, 4640 struct cftype *cft, u64 val) 4641{ 4642 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4643 struct mem_cgroup *parent; 4644 4645 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4646 if (!cgrp->parent || !((val == 0) || (val == 1))) 4647 return -EINVAL; 4648 4649 parent = mem_cgroup_from_cont(cgrp->parent); 4650 4651 cgroup_lock(); 4652 /* oom-kill-disable is a flag for subhierarchy. */ 4653 if ((parent->use_hierarchy) || 4654 (memcg->use_hierarchy && !list_empty(&cgrp->children))) { 4655 cgroup_unlock(); 4656 return -EINVAL; 4657 } 4658 memcg->oom_kill_disable = val; 4659 if (!val) 4660 memcg_oom_recover(memcg); 4661 cgroup_unlock(); 4662 return 0; 4663} 4664 4665#ifdef CONFIG_NUMA 4666static const struct file_operations mem_control_numa_stat_file_operations = { 4667 .read = seq_read, 4668 .llseek = seq_lseek, 4669 .release = single_release, 4670}; 4671 4672static int mem_control_numa_stat_open(struct inode *unused, struct file *file) 4673{ 4674 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; 4675 4676 file->f_op = &mem_control_numa_stat_file_operations; 4677 return single_open(file, mem_control_numa_stat_show, cont); 4678} 4679#endif /* CONFIG_NUMA */ 4680 4681#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 4682static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) 4683{ 4684 /* 4685 * Part of this would be better living in a separate allocation 4686 * function, leaving us with just the cgroup tree population work. 4687 * We, however, depend on state such as network's proto_list that 4688 * is only initialized after cgroup creation. I found the less 4689 * cumbersome way to deal with it to defer it all to populate time 4690 */ 4691 return mem_cgroup_sockets_init(cont, ss); 4692}; 4693 4694static void kmem_cgroup_destroy(struct cgroup_subsys *ss, 4695 struct cgroup *cont) 4696{ 4697 mem_cgroup_sockets_destroy(cont, ss); 4698} 4699#else 4700static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) 4701{ 4702 return 0; 4703} 4704 4705static void kmem_cgroup_destroy(struct cgroup_subsys *ss, 4706 struct cgroup *cont) 4707{ 4708} 4709#endif 4710 4711static struct cftype mem_cgroup_files[] = { 4712 { 4713 .name = "usage_in_bytes", 4714 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4715 .read_u64 = mem_cgroup_read, 4716 .register_event = mem_cgroup_usage_register_event, 4717 .unregister_event = mem_cgroup_usage_unregister_event, 4718 }, 4719 { 4720 .name = "max_usage_in_bytes", 4721 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4722 .trigger = mem_cgroup_reset, 4723 .read_u64 = mem_cgroup_read, 4724 }, 4725 { 4726 .name = "limit_in_bytes", 4727 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4728 .write_string = mem_cgroup_write, 4729 .read_u64 = mem_cgroup_read, 4730 }, 4731 { 4732 .name = "soft_limit_in_bytes", 4733 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4734 .write_string = mem_cgroup_write, 4735 .read_u64 = mem_cgroup_read, 4736 }, 4737 { 4738 .name = "failcnt", 4739 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4740 .trigger = mem_cgroup_reset, 4741 .read_u64 = mem_cgroup_read, 4742 }, 4743 { 4744 .name = "stat", 4745 .read_map = mem_control_stat_show, 4746 }, 4747 { 4748 .name = "force_empty", 4749 .trigger = mem_cgroup_force_empty_write, 4750 }, 4751 { 4752 .name = "use_hierarchy", 4753 .write_u64 = mem_cgroup_hierarchy_write, 4754 .read_u64 = mem_cgroup_hierarchy_read, 4755 }, 4756 { 4757 .name = "swappiness", 4758 .read_u64 = mem_cgroup_swappiness_read, 4759 .write_u64 = mem_cgroup_swappiness_write, 4760 }, 4761 { 4762 .name = "move_charge_at_immigrate", 4763 .read_u64 = mem_cgroup_move_charge_read, 4764 .write_u64 = mem_cgroup_move_charge_write, 4765 }, 4766 { 4767 .name = "oom_control", 4768 .read_map = mem_cgroup_oom_control_read, 4769 .write_u64 = mem_cgroup_oom_control_write, 4770 .register_event = mem_cgroup_oom_register_event, 4771 .unregister_event = mem_cgroup_oom_unregister_event, 4772 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4773 }, 4774#ifdef CONFIG_NUMA 4775 { 4776 .name = "numa_stat", 4777 .open = mem_control_numa_stat_open, 4778 .mode = S_IRUGO, 4779 }, 4780#endif 4781}; 4782 4783#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4784static struct cftype memsw_cgroup_files[] = { 4785 { 4786 .name = "memsw.usage_in_bytes", 4787 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 4788 .read_u64 = mem_cgroup_read, 4789 .register_event = mem_cgroup_usage_register_event, 4790 .unregister_event = mem_cgroup_usage_unregister_event, 4791 }, 4792 { 4793 .name = "memsw.max_usage_in_bytes", 4794 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 4795 .trigger = mem_cgroup_reset, 4796 .read_u64 = mem_cgroup_read, 4797 }, 4798 { 4799 .name = "memsw.limit_in_bytes", 4800 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 4801 .write_string = mem_cgroup_write, 4802 .read_u64 = mem_cgroup_read, 4803 }, 4804 { 4805 .name = "memsw.failcnt", 4806 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 4807 .trigger = mem_cgroup_reset, 4808 .read_u64 = mem_cgroup_read, 4809 }, 4810}; 4811 4812static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4813{ 4814 if (!do_swap_account) 4815 return 0; 4816 return cgroup_add_files(cont, ss, memsw_cgroup_files, 4817 ARRAY_SIZE(memsw_cgroup_files)); 4818}; 4819#else 4820static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4821{ 4822 return 0; 4823} 4824#endif 4825 4826static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4827{ 4828 struct mem_cgroup_per_node *pn; 4829 struct mem_cgroup_per_zone *mz; 4830 enum lru_list l; 4831 int zone, tmp = node; 4832 /* 4833 * This routine is called against possible nodes. 4834 * But it's BUG to call kmalloc() against offline node. 4835 * 4836 * TODO: this routine can waste much memory for nodes which will 4837 * never be onlined. It's better to use memory hotplug callback 4838 * function. 4839 */ 4840 if (!node_state(node, N_NORMAL_MEMORY)) 4841 tmp = -1; 4842 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4843 if (!pn) 4844 return 1; 4845 4846 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4847 mz = &pn->zoneinfo[zone]; 4848 for_each_lru(l) 4849 INIT_LIST_HEAD(&mz->lruvec.lists[l]); 4850 mz->usage_in_excess = 0; 4851 mz->on_tree = false; 4852 mz->mem = memcg; 4853 } 4854 memcg->info.nodeinfo[node] = pn; 4855 return 0; 4856} 4857 4858static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4859{ 4860 kfree(memcg->info.nodeinfo[node]); 4861} 4862 4863static struct mem_cgroup *mem_cgroup_alloc(void) 4864{ 4865 struct mem_cgroup *mem; 4866 int size = sizeof(struct mem_cgroup); 4867 4868 /* Can be very big if MAX_NUMNODES is very big */ 4869 if (size < PAGE_SIZE) 4870 mem = kzalloc(size, GFP_KERNEL); 4871 else 4872 mem = vzalloc(size); 4873 4874 if (!mem) 4875 return NULL; 4876 4877 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4878 if (!mem->stat) 4879 goto out_free; 4880 spin_lock_init(&mem->pcp_counter_lock); 4881 return mem; 4882 4883out_free: 4884 if (size < PAGE_SIZE) 4885 kfree(mem); 4886 else 4887 vfree(mem); 4888 return NULL; 4889} 4890 4891/* 4892 * At destroying mem_cgroup, references from swap_cgroup can remain. 4893 * (scanning all at force_empty is too costly...) 4894 * 4895 * Instead of clearing all references at force_empty, we remember 4896 * the number of reference from swap_cgroup and free mem_cgroup when 4897 * it goes down to 0. 4898 * 4899 * Removal of cgroup itself succeeds regardless of refs from swap. 4900 */ 4901 4902static void __mem_cgroup_free(struct mem_cgroup *memcg) 4903{ 4904 int node; 4905 4906 mem_cgroup_remove_from_trees(memcg); 4907 free_css_id(&mem_cgroup_subsys, &memcg->css); 4908 4909 for_each_node_state(node, N_POSSIBLE) 4910 free_mem_cgroup_per_zone_info(memcg, node); 4911 4912 free_percpu(memcg->stat); 4913 if (sizeof(struct mem_cgroup) < PAGE_SIZE) 4914 kfree(memcg); 4915 else 4916 vfree(memcg); 4917} 4918 4919static void mem_cgroup_get(struct mem_cgroup *memcg) 4920{ 4921 atomic_inc(&memcg->refcnt); 4922} 4923 4924static void __mem_cgroup_put(struct mem_cgroup *memcg, int count) 4925{ 4926 if (atomic_sub_and_test(count, &memcg->refcnt)) { 4927 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 4928 __mem_cgroup_free(memcg); 4929 if (parent) 4930 mem_cgroup_put(parent); 4931 } 4932} 4933 4934static void mem_cgroup_put(struct mem_cgroup *memcg) 4935{ 4936 __mem_cgroup_put(memcg, 1); 4937} 4938 4939/* 4940 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4941 */ 4942struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 4943{ 4944 if (!memcg->res.parent) 4945 return NULL; 4946 return mem_cgroup_from_res_counter(memcg->res.parent, res); 4947} 4948EXPORT_SYMBOL(parent_mem_cgroup); 4949 4950#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4951static void __init enable_swap_cgroup(void) 4952{ 4953 if (!mem_cgroup_disabled() && really_do_swap_account) 4954 do_swap_account = 1; 4955} 4956#else 4957static void __init enable_swap_cgroup(void) 4958{ 4959} 4960#endif 4961 4962static int mem_cgroup_soft_limit_tree_init(void) 4963{ 4964 struct mem_cgroup_tree_per_node *rtpn; 4965 struct mem_cgroup_tree_per_zone *rtpz; 4966 int tmp, node, zone; 4967 4968 for_each_node_state(node, N_POSSIBLE) { 4969 tmp = node; 4970 if (!node_state(node, N_NORMAL_MEMORY)) 4971 tmp = -1; 4972 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 4973 if (!rtpn) 4974 goto err_cleanup; 4975 4976 soft_limit_tree.rb_tree_per_node[node] = rtpn; 4977 4978 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4979 rtpz = &rtpn->rb_tree_per_zone[zone]; 4980 rtpz->rb_root = RB_ROOT; 4981 spin_lock_init(&rtpz->lock); 4982 } 4983 } 4984 return 0; 4985 4986err_cleanup: 4987 for_each_node_state(node, N_POSSIBLE) { 4988 if (!soft_limit_tree.rb_tree_per_node[node]) 4989 break; 4990 kfree(soft_limit_tree.rb_tree_per_node[node]); 4991 soft_limit_tree.rb_tree_per_node[node] = NULL; 4992 } 4993 return 1; 4994 4995} 4996 4997static struct cgroup_subsys_state * __ref 4998mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 4999{ 5000 struct mem_cgroup *memcg, *parent; 5001 long error = -ENOMEM; 5002 int node; 5003 5004 memcg = mem_cgroup_alloc(); 5005 if (!memcg) 5006 return ERR_PTR(error); 5007 5008 for_each_node_state(node, N_POSSIBLE) 5009 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 5010 goto free_out; 5011 5012 /* root ? */ 5013 if (cont->parent == NULL) { 5014 int cpu; 5015 enable_swap_cgroup(); 5016 parent = NULL; 5017 if (mem_cgroup_soft_limit_tree_init()) 5018 goto free_out; 5019 root_mem_cgroup = memcg; 5020 for_each_possible_cpu(cpu) { 5021 struct memcg_stock_pcp *stock = 5022 &per_cpu(memcg_stock, cpu); 5023 INIT_WORK(&stock->work, drain_local_stock); 5024 } 5025 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5026 } else { 5027 parent = mem_cgroup_from_cont(cont->parent); 5028 memcg->use_hierarchy = parent->use_hierarchy; 5029 memcg->oom_kill_disable = parent->oom_kill_disable; 5030 } 5031 5032 if (parent && parent->use_hierarchy) { 5033 res_counter_init(&memcg->res, &parent->res); 5034 res_counter_init(&memcg->memsw, &parent->memsw); 5035 /* 5036 * We increment refcnt of the parent to ensure that we can 5037 * safely access it on res_counter_charge/uncharge. 5038 * This refcnt will be decremented when freeing this 5039 * mem_cgroup(see mem_cgroup_put). 5040 */ 5041 mem_cgroup_get(parent); 5042 } else { 5043 res_counter_init(&memcg->res, NULL); 5044 res_counter_init(&memcg->memsw, NULL); 5045 } 5046 memcg->last_scanned_node = MAX_NUMNODES; 5047 INIT_LIST_HEAD(&memcg->oom_notify); 5048 5049 if (parent) 5050 memcg->swappiness = mem_cgroup_swappiness(parent); 5051 atomic_set(&memcg->refcnt, 1); 5052 memcg->move_charge_at_immigrate = 0; 5053 mutex_init(&memcg->thresholds_lock); 5054 return &memcg->css; 5055free_out: 5056 __mem_cgroup_free(memcg); 5057 return ERR_PTR(error); 5058} 5059 5060static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 5061 struct cgroup *cont) 5062{ 5063 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5064 5065 return mem_cgroup_force_empty(memcg, false); 5066} 5067 5068static void mem_cgroup_destroy(struct cgroup_subsys *ss, 5069 struct cgroup *cont) 5070{ 5071 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5072 5073 kmem_cgroup_destroy(ss, cont); 5074 5075 mem_cgroup_put(memcg); 5076} 5077 5078static int mem_cgroup_populate(struct cgroup_subsys *ss, 5079 struct cgroup *cont) 5080{ 5081 int ret; 5082 5083 ret = cgroup_add_files(cont, ss, mem_cgroup_files, 5084 ARRAY_SIZE(mem_cgroup_files)); 5085 5086 if (!ret) 5087 ret = register_memsw_files(cont, ss); 5088 5089 if (!ret) 5090 ret = register_kmem_files(cont, ss); 5091 5092 return ret; 5093} 5094 5095#ifdef CONFIG_MMU 5096/* Handlers for move charge at task migration. */ 5097#define PRECHARGE_COUNT_AT_ONCE 256 5098static int mem_cgroup_do_precharge(unsigned long count) 5099{ 5100 int ret = 0; 5101 int batch_count = PRECHARGE_COUNT_AT_ONCE; 5102 struct mem_cgroup *memcg = mc.to; 5103 5104 if (mem_cgroup_is_root(memcg)) { 5105 mc.precharge += count; 5106 /* we don't need css_get for root */ 5107 return ret; 5108 } 5109 /* try to charge at once */ 5110 if (count > 1) { 5111 struct res_counter *dummy; 5112 /* 5113 * "memcg" cannot be under rmdir() because we've already checked 5114 * by cgroup_lock_live_cgroup() that it is not removed and we 5115 * are still under the same cgroup_mutex. So we can postpone 5116 * css_get(). 5117 */ 5118 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy)) 5119 goto one_by_one; 5120 if (do_swap_account && res_counter_charge(&memcg->memsw, 5121 PAGE_SIZE * count, &dummy)) { 5122 res_counter_uncharge(&memcg->res, PAGE_SIZE * count); 5123 goto one_by_one; 5124 } 5125 mc.precharge += count; 5126 return ret; 5127 } 5128one_by_one: 5129 /* fall back to one by one charge */ 5130 while (count--) { 5131 if (signal_pending(current)) { 5132 ret = -EINTR; 5133 break; 5134 } 5135 if (!batch_count--) { 5136 batch_count = PRECHARGE_COUNT_AT_ONCE; 5137 cond_resched(); 5138 } 5139 ret = __mem_cgroup_try_charge(NULL, 5140 GFP_KERNEL, 1, &memcg, false); 5141 if (ret || !memcg) 5142 /* mem_cgroup_clear_mc() will do uncharge later */ 5143 return -ENOMEM; 5144 mc.precharge++; 5145 } 5146 return ret; 5147} 5148 5149/** 5150 * is_target_pte_for_mc - check a pte whether it is valid for move charge 5151 * @vma: the vma the pte to be checked belongs 5152 * @addr: the address corresponding to the pte to be checked 5153 * @ptent: the pte to be checked 5154 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5155 * 5156 * Returns 5157 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5158 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5159 * move charge. if @target is not NULL, the page is stored in target->page 5160 * with extra refcnt got(Callers should handle it). 5161 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5162 * target for charge migration. if @target is not NULL, the entry is stored 5163 * in target->ent. 5164 * 5165 * Called with pte lock held. 5166 */ 5167union mc_target { 5168 struct page *page; 5169 swp_entry_t ent; 5170}; 5171 5172enum mc_target_type { 5173 MC_TARGET_NONE, /* not used */ 5174 MC_TARGET_PAGE, 5175 MC_TARGET_SWAP, 5176}; 5177 5178static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5179 unsigned long addr, pte_t ptent) 5180{ 5181 struct page *page = vm_normal_page(vma, addr, ptent); 5182 5183 if (!page || !page_mapped(page)) 5184 return NULL; 5185 if (PageAnon(page)) { 5186 /* we don't move shared anon */ 5187 if (!move_anon() || page_mapcount(page) > 2) 5188 return NULL; 5189 } else if (!move_file()) 5190 /* we ignore mapcount for file pages */ 5191 return NULL; 5192 if (!get_page_unless_zero(page)) 5193 return NULL; 5194 5195 return page; 5196} 5197 5198static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5199 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5200{ 5201 int usage_count; 5202 struct page *page = NULL; 5203 swp_entry_t ent = pte_to_swp_entry(ptent); 5204 5205 if (!move_anon() || non_swap_entry(ent)) 5206 return NULL; 5207 usage_count = mem_cgroup_count_swap_user(ent, &page); 5208 if (usage_count > 1) { /* we don't move shared anon */ 5209 if (page) 5210 put_page(page); 5211 return NULL; 5212 } 5213 if (do_swap_account) 5214 entry->val = ent.val; 5215 5216 return page; 5217} 5218 5219static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5220 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5221{ 5222 struct page *page = NULL; 5223 struct inode *inode; 5224 struct address_space *mapping; 5225 pgoff_t pgoff; 5226 5227 if (!vma->vm_file) /* anonymous vma */ 5228 return NULL; 5229 if (!move_file()) 5230 return NULL; 5231 5232 inode = vma->vm_file->f_path.dentry->d_inode; 5233 mapping = vma->vm_file->f_mapping; 5234 if (pte_none(ptent)) 5235 pgoff = linear_page_index(vma, addr); 5236 else /* pte_file(ptent) is true */ 5237 pgoff = pte_to_pgoff(ptent); 5238 5239 /* page is moved even if it's not RSS of this task(page-faulted). */ 5240 page = find_get_page(mapping, pgoff); 5241 5242#ifdef CONFIG_SWAP 5243 /* shmem/tmpfs may report page out on swap: account for that too. */ 5244 if (radix_tree_exceptional_entry(page)) { 5245 swp_entry_t swap = radix_to_swp_entry(page); 5246 if (do_swap_account) 5247 *entry = swap; 5248 page = find_get_page(&swapper_space, swap.val); 5249 } 5250#endif 5251 return page; 5252} 5253 5254static int is_target_pte_for_mc(struct vm_area_struct *vma, 5255 unsigned long addr, pte_t ptent, union mc_target *target) 5256{ 5257 struct page *page = NULL; 5258 struct page_cgroup *pc; 5259 int ret = 0; 5260 swp_entry_t ent = { .val = 0 }; 5261 5262 if (pte_present(ptent)) 5263 page = mc_handle_present_pte(vma, addr, ptent); 5264 else if (is_swap_pte(ptent)) 5265 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 5266 else if (pte_none(ptent) || pte_file(ptent)) 5267 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5268 5269 if (!page && !ent.val) 5270 return 0; 5271 if (page) { 5272 pc = lookup_page_cgroup(page); 5273 /* 5274 * Do only loose check w/o page_cgroup lock. 5275 * mem_cgroup_move_account() checks the pc is valid or not under 5276 * the lock. 5277 */ 5278 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 5279 ret = MC_TARGET_PAGE; 5280 if (target) 5281 target->page = page; 5282 } 5283 if (!ret || !target) 5284 put_page(page); 5285 } 5286 /* There is a swap entry and a page doesn't exist or isn't charged */ 5287 if (ent.val && !ret && 5288 css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) { 5289 ret = MC_TARGET_SWAP; 5290 if (target) 5291 target->ent = ent; 5292 } 5293 return ret; 5294} 5295 5296static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5297 unsigned long addr, unsigned long end, 5298 struct mm_walk *walk) 5299{ 5300 struct vm_area_struct *vma = walk->private; 5301 pte_t *pte; 5302 spinlock_t *ptl; 5303 5304 split_huge_page_pmd(walk->mm, pmd); 5305 5306 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5307 for (; addr != end; pte++, addr += PAGE_SIZE) 5308 if (is_target_pte_for_mc(vma, addr, *pte, NULL)) 5309 mc.precharge++; /* increment precharge temporarily */ 5310 pte_unmap_unlock(pte - 1, ptl); 5311 cond_resched(); 5312 5313 return 0; 5314} 5315 5316static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5317{ 5318 unsigned long precharge; 5319 struct vm_area_struct *vma; 5320 5321 down_read(&mm->mmap_sem); 5322 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5323 struct mm_walk mem_cgroup_count_precharge_walk = { 5324 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5325 .mm = mm, 5326 .private = vma, 5327 }; 5328 if (is_vm_hugetlb_page(vma)) 5329 continue; 5330 walk_page_range(vma->vm_start, vma->vm_end, 5331 &mem_cgroup_count_precharge_walk); 5332 } 5333 up_read(&mm->mmap_sem); 5334 5335 precharge = mc.precharge; 5336 mc.precharge = 0; 5337 5338 return precharge; 5339} 5340 5341static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5342{ 5343 unsigned long precharge = mem_cgroup_count_precharge(mm); 5344 5345 VM_BUG_ON(mc.moving_task); 5346 mc.moving_task = current; 5347 return mem_cgroup_do_precharge(precharge); 5348} 5349 5350/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5351static void __mem_cgroup_clear_mc(void) 5352{ 5353 struct mem_cgroup *from = mc.from; 5354 struct mem_cgroup *to = mc.to; 5355 5356 /* we must uncharge all the leftover precharges from mc.to */ 5357 if (mc.precharge) { 5358 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 5359 mc.precharge = 0; 5360 } 5361 /* 5362 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5363 * we must uncharge here. 5364 */ 5365 if (mc.moved_charge) { 5366 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 5367 mc.moved_charge = 0; 5368 } 5369 /* we must fixup refcnts and charges */ 5370 if (mc.moved_swap) { 5371 /* uncharge swap account from the old cgroup */ 5372 if (!mem_cgroup_is_root(mc.from)) 5373 res_counter_uncharge(&mc.from->memsw, 5374 PAGE_SIZE * mc.moved_swap); 5375 __mem_cgroup_put(mc.from, mc.moved_swap); 5376 5377 if (!mem_cgroup_is_root(mc.to)) { 5378 /* 5379 * we charged both to->res and to->memsw, so we should 5380 * uncharge to->res. 5381 */ 5382 res_counter_uncharge(&mc.to->res, 5383 PAGE_SIZE * mc.moved_swap); 5384 } 5385 /* we've already done mem_cgroup_get(mc.to) */ 5386 mc.moved_swap = 0; 5387 } 5388 memcg_oom_recover(from); 5389 memcg_oom_recover(to); 5390 wake_up_all(&mc.waitq); 5391} 5392 5393static void mem_cgroup_clear_mc(void) 5394{ 5395 struct mem_cgroup *from = mc.from; 5396 5397 /* 5398 * we must clear moving_task before waking up waiters at the end of 5399 * task migration. 5400 */ 5401 mc.moving_task = NULL; 5402 __mem_cgroup_clear_mc(); 5403 spin_lock(&mc.lock); 5404 mc.from = NULL; 5405 mc.to = NULL; 5406 spin_unlock(&mc.lock); 5407 mem_cgroup_end_move(from); 5408} 5409 5410static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5411 struct cgroup *cgroup, 5412 struct cgroup_taskset *tset) 5413{ 5414 struct task_struct *p = cgroup_taskset_first(tset); 5415 int ret = 0; 5416 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup); 5417 5418 if (memcg->move_charge_at_immigrate) { 5419 struct mm_struct *mm; 5420 struct mem_cgroup *from = mem_cgroup_from_task(p); 5421 5422 VM_BUG_ON(from == memcg); 5423 5424 mm = get_task_mm(p); 5425 if (!mm) 5426 return 0; 5427 /* We move charges only when we move a owner of the mm */ 5428 if (mm->owner == p) { 5429 VM_BUG_ON(mc.from); 5430 VM_BUG_ON(mc.to); 5431 VM_BUG_ON(mc.precharge); 5432 VM_BUG_ON(mc.moved_charge); 5433 VM_BUG_ON(mc.moved_swap); 5434 mem_cgroup_start_move(from); 5435 spin_lock(&mc.lock); 5436 mc.from = from; 5437 mc.to = memcg; 5438 spin_unlock(&mc.lock); 5439 /* We set mc.moving_task later */ 5440 5441 ret = mem_cgroup_precharge_mc(mm); 5442 if (ret) 5443 mem_cgroup_clear_mc(); 5444 } 5445 mmput(mm); 5446 } 5447 return ret; 5448} 5449 5450static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5451 struct cgroup *cgroup, 5452 struct cgroup_taskset *tset) 5453{ 5454 mem_cgroup_clear_mc(); 5455} 5456 5457static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5458 unsigned long addr, unsigned long end, 5459 struct mm_walk *walk) 5460{ 5461 int ret = 0; 5462 struct vm_area_struct *vma = walk->private; 5463 pte_t *pte; 5464 spinlock_t *ptl; 5465 5466 split_huge_page_pmd(walk->mm, pmd); 5467retry: 5468 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5469 for (; addr != end; addr += PAGE_SIZE) { 5470 pte_t ptent = *(pte++); 5471 union mc_target target; 5472 int type; 5473 struct page *page; 5474 struct page_cgroup *pc; 5475 swp_entry_t ent; 5476 5477 if (!mc.precharge) 5478 break; 5479 5480 type = is_target_pte_for_mc(vma, addr, ptent, &target); 5481 switch (type) { 5482 case MC_TARGET_PAGE: 5483 page = target.page; 5484 if (isolate_lru_page(page)) 5485 goto put; 5486 pc = lookup_page_cgroup(page); 5487 if (!mem_cgroup_move_account(page, 1, pc, 5488 mc.from, mc.to, false)) { 5489 mc.precharge--; 5490 /* we uncharge from mc.from later. */ 5491 mc.moved_charge++; 5492 } 5493 putback_lru_page(page); 5494put: /* is_target_pte_for_mc() gets the page */ 5495 put_page(page); 5496 break; 5497 case MC_TARGET_SWAP: 5498 ent = target.ent; 5499 if (!mem_cgroup_move_swap_account(ent, 5500 mc.from, mc.to, false)) { 5501 mc.precharge--; 5502 /* we fixup refcnts and charges later. */ 5503 mc.moved_swap++; 5504 } 5505 break; 5506 default: 5507 break; 5508 } 5509 } 5510 pte_unmap_unlock(pte - 1, ptl); 5511 cond_resched(); 5512 5513 if (addr != end) { 5514 /* 5515 * We have consumed all precharges we got in can_attach(). 5516 * We try charge one by one, but don't do any additional 5517 * charges to mc.to if we have failed in charge once in attach() 5518 * phase. 5519 */ 5520 ret = mem_cgroup_do_precharge(1); 5521 if (!ret) 5522 goto retry; 5523 } 5524 5525 return ret; 5526} 5527 5528static void mem_cgroup_move_charge(struct mm_struct *mm) 5529{ 5530 struct vm_area_struct *vma; 5531 5532 lru_add_drain_all(); 5533retry: 5534 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5535 /* 5536 * Someone who are holding the mmap_sem might be waiting in 5537 * waitq. So we cancel all extra charges, wake up all waiters, 5538 * and retry. Because we cancel precharges, we might not be able 5539 * to move enough charges, but moving charge is a best-effort 5540 * feature anyway, so it wouldn't be a big problem. 5541 */ 5542 __mem_cgroup_clear_mc(); 5543 cond_resched(); 5544 goto retry; 5545 } 5546 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5547 int ret; 5548 struct mm_walk mem_cgroup_move_charge_walk = { 5549 .pmd_entry = mem_cgroup_move_charge_pte_range, 5550 .mm = mm, 5551 .private = vma, 5552 }; 5553 if (is_vm_hugetlb_page(vma)) 5554 continue; 5555 ret = walk_page_range(vma->vm_start, vma->vm_end, 5556 &mem_cgroup_move_charge_walk); 5557 if (ret) 5558 /* 5559 * means we have consumed all precharges and failed in 5560 * doing additional charge. Just abandon here. 5561 */ 5562 break; 5563 } 5564 up_read(&mm->mmap_sem); 5565} 5566 5567static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5568 struct cgroup *cont, 5569 struct cgroup_taskset *tset) 5570{ 5571 struct task_struct *p = cgroup_taskset_first(tset); 5572 struct mm_struct *mm = get_task_mm(p); 5573 5574 if (mm) { 5575 if (mc.to) 5576 mem_cgroup_move_charge(mm); 5577 put_swap_token(mm); 5578 mmput(mm); 5579 } 5580 if (mc.to) 5581 mem_cgroup_clear_mc(); 5582} 5583#else /* !CONFIG_MMU */ 5584static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5585 struct cgroup *cgroup, 5586 struct cgroup_taskset *tset) 5587{ 5588 return 0; 5589} 5590static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5591 struct cgroup *cgroup, 5592 struct cgroup_taskset *tset) 5593{ 5594} 5595static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5596 struct cgroup *cont, 5597 struct cgroup_taskset *tset) 5598{ 5599} 5600#endif 5601 5602struct cgroup_subsys mem_cgroup_subsys = { 5603 .name = "memory", 5604 .subsys_id = mem_cgroup_subsys_id, 5605 .create = mem_cgroup_create, 5606 .pre_destroy = mem_cgroup_pre_destroy, 5607 .destroy = mem_cgroup_destroy, 5608 .populate = mem_cgroup_populate, 5609 .can_attach = mem_cgroup_can_attach, 5610 .cancel_attach = mem_cgroup_cancel_attach, 5611 .attach = mem_cgroup_move_task, 5612 .early_init = 0, 5613 .use_id = 1, 5614}; 5615 5616#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 5617static int __init enable_swap_account(char *s) 5618{ 5619 /* consider enabled if no parameter or 1 is given */ 5620 if (!strcmp(s, "1")) 5621 really_do_swap_account = 1; 5622 else if (!strcmp(s, "0")) 5623 really_do_swap_account = 0; 5624 return 1; 5625} 5626__setup("swapaccount=", enable_swap_account); 5627 5628#endif 5629