memcontrol.c revision 7a0524cfc8f9f585471a31b1282a9ce4a1a7d444
1/* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 */ 23 24#include <linux/res_counter.h> 25#include <linux/memcontrol.h> 26#include <linux/cgroup.h> 27#include <linux/mm.h> 28#include <linux/hugetlb.h> 29#include <linux/pagemap.h> 30#include <linux/smp.h> 31#include <linux/page-flags.h> 32#include <linux/backing-dev.h> 33#include <linux/bit_spinlock.h> 34#include <linux/rcupdate.h> 35#include <linux/limits.h> 36#include <linux/export.h> 37#include <linux/mutex.h> 38#include <linux/rbtree.h> 39#include <linux/slab.h> 40#include <linux/swap.h> 41#include <linux/swapops.h> 42#include <linux/spinlock.h> 43#include <linux/eventfd.h> 44#include <linux/sort.h> 45#include <linux/fs.h> 46#include <linux/seq_file.h> 47#include <linux/vmalloc.h> 48#include <linux/mm_inline.h> 49#include <linux/page_cgroup.h> 50#include <linux/cpu.h> 51#include <linux/oom.h> 52#include "internal.h" 53#include <net/sock.h> 54#include <net/tcp_memcontrol.h> 55 56#include <asm/uaccess.h> 57 58#include <trace/events/vmscan.h> 59 60struct cgroup_subsys mem_cgroup_subsys __read_mostly; 61#define MEM_CGROUP_RECLAIM_RETRIES 5 62struct mem_cgroup *root_mem_cgroup __read_mostly; 63 64#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 65/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 66int do_swap_account __read_mostly; 67 68/* for remember boot option*/ 69#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED 70static int really_do_swap_account __initdata = 1; 71#else 72static int really_do_swap_account __initdata = 0; 73#endif 74 75#else 76#define do_swap_account (0) 77#endif 78 79 80/* 81 * Statistics for memory cgroup. 82 */ 83enum mem_cgroup_stat_index { 84 /* 85 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 86 */ 87 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 88 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 89 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 90 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 91 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ 92 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ 93 MEM_CGROUP_STAT_NSTATS, 94}; 95 96enum mem_cgroup_events_index { 97 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 98 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 99 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ 100 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 101 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 102 MEM_CGROUP_EVENTS_NSTATS, 103}; 104/* 105 * Per memcg event counter is incremented at every pagein/pageout. With THP, 106 * it will be incremated by the number of pages. This counter is used for 107 * for trigger some periodic events. This is straightforward and better 108 * than using jiffies etc. to handle periodic memcg event. 109 */ 110enum mem_cgroup_events_target { 111 MEM_CGROUP_TARGET_THRESH, 112 MEM_CGROUP_TARGET_SOFTLIMIT, 113 MEM_CGROUP_TARGET_NUMAINFO, 114 MEM_CGROUP_NTARGETS, 115}; 116#define THRESHOLDS_EVENTS_TARGET (128) 117#define SOFTLIMIT_EVENTS_TARGET (1024) 118#define NUMAINFO_EVENTS_TARGET (1024) 119 120struct mem_cgroup_stat_cpu { 121 long count[MEM_CGROUP_STAT_NSTATS]; 122 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 123 unsigned long targets[MEM_CGROUP_NTARGETS]; 124}; 125 126struct mem_cgroup_reclaim_iter { 127 /* css_id of the last scanned hierarchy member */ 128 int position; 129 /* scan generation, increased every round-trip */ 130 unsigned int generation; 131}; 132 133/* 134 * per-zone information in memory controller. 135 */ 136struct mem_cgroup_per_zone { 137 struct lruvec lruvec; 138 unsigned long count[NR_LRU_LISTS]; 139 140 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 141 142 struct zone_reclaim_stat reclaim_stat; 143 struct rb_node tree_node; /* RB tree node */ 144 unsigned long long usage_in_excess;/* Set to the value by which */ 145 /* the soft limit is exceeded*/ 146 bool on_tree; 147 struct mem_cgroup *mem; /* Back pointer, we cannot */ 148 /* use container_of */ 149}; 150/* Macro for accessing counter */ 151#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 152 153struct mem_cgroup_per_node { 154 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 155}; 156 157struct mem_cgroup_lru_info { 158 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; 159}; 160 161/* 162 * Cgroups above their limits are maintained in a RB-Tree, independent of 163 * their hierarchy representation 164 */ 165 166struct mem_cgroup_tree_per_zone { 167 struct rb_root rb_root; 168 spinlock_t lock; 169}; 170 171struct mem_cgroup_tree_per_node { 172 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 173}; 174 175struct mem_cgroup_tree { 176 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 177}; 178 179static struct mem_cgroup_tree soft_limit_tree __read_mostly; 180 181struct mem_cgroup_threshold { 182 struct eventfd_ctx *eventfd; 183 u64 threshold; 184}; 185 186/* For threshold */ 187struct mem_cgroup_threshold_ary { 188 /* An array index points to threshold just below usage. */ 189 int current_threshold; 190 /* Size of entries[] */ 191 unsigned int size; 192 /* Array of thresholds */ 193 struct mem_cgroup_threshold entries[0]; 194}; 195 196struct mem_cgroup_thresholds { 197 /* Primary thresholds array */ 198 struct mem_cgroup_threshold_ary *primary; 199 /* 200 * Spare threshold array. 201 * This is needed to make mem_cgroup_unregister_event() "never fail". 202 * It must be able to store at least primary->size - 1 entries. 203 */ 204 struct mem_cgroup_threshold_ary *spare; 205}; 206 207/* for OOM */ 208struct mem_cgroup_eventfd_list { 209 struct list_head list; 210 struct eventfd_ctx *eventfd; 211}; 212 213static void mem_cgroup_threshold(struct mem_cgroup *memcg); 214static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 215 216/* 217 * The memory controller data structure. The memory controller controls both 218 * page cache and RSS per cgroup. We would eventually like to provide 219 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 220 * to help the administrator determine what knobs to tune. 221 * 222 * TODO: Add a water mark for the memory controller. Reclaim will begin when 223 * we hit the water mark. May be even add a low water mark, such that 224 * no reclaim occurs from a cgroup at it's low water mark, this is 225 * a feature that will be implemented much later in the future. 226 */ 227struct mem_cgroup { 228 struct cgroup_subsys_state css; 229 /* 230 * the counter to account for memory usage 231 */ 232 struct res_counter res; 233 /* 234 * the counter to account for mem+swap usage. 235 */ 236 struct res_counter memsw; 237 /* 238 * Per cgroup active and inactive list, similar to the 239 * per zone LRU lists. 240 */ 241 struct mem_cgroup_lru_info info; 242 int last_scanned_node; 243#if MAX_NUMNODES > 1 244 nodemask_t scan_nodes; 245 atomic_t numainfo_events; 246 atomic_t numainfo_updating; 247#endif 248 /* 249 * Should the accounting and control be hierarchical, per subtree? 250 */ 251 bool use_hierarchy; 252 253 bool oom_lock; 254 atomic_t under_oom; 255 256 atomic_t refcnt; 257 258 int swappiness; 259 /* OOM-Killer disable */ 260 int oom_kill_disable; 261 262 /* set when res.limit == memsw.limit */ 263 bool memsw_is_minimum; 264 265 /* protect arrays of thresholds */ 266 struct mutex thresholds_lock; 267 268 /* thresholds for memory usage. RCU-protected */ 269 struct mem_cgroup_thresholds thresholds; 270 271 /* thresholds for mem+swap usage. RCU-protected */ 272 struct mem_cgroup_thresholds memsw_thresholds; 273 274 /* For oom notifier event fd */ 275 struct list_head oom_notify; 276 277 /* 278 * Should we move charges of a task when a task is moved into this 279 * mem_cgroup ? And what type of charges should we move ? 280 */ 281 unsigned long move_charge_at_immigrate; 282 /* 283 * percpu counter. 284 */ 285 struct mem_cgroup_stat_cpu *stat; 286 /* 287 * used when a cpu is offlined or other synchronizations 288 * See mem_cgroup_read_stat(). 289 */ 290 struct mem_cgroup_stat_cpu nocpu_base; 291 spinlock_t pcp_counter_lock; 292 293#ifdef CONFIG_INET 294 struct tcp_memcontrol tcp_mem; 295#endif 296}; 297 298/* Stuffs for move charges at task migration. */ 299/* 300 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a 301 * left-shifted bitmap of these types. 302 */ 303enum move_type { 304 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 305 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 306 NR_MOVE_TYPE, 307}; 308 309/* "mc" and its members are protected by cgroup_mutex */ 310static struct move_charge_struct { 311 spinlock_t lock; /* for from, to */ 312 struct mem_cgroup *from; 313 struct mem_cgroup *to; 314 unsigned long precharge; 315 unsigned long moved_charge; 316 unsigned long moved_swap; 317 struct task_struct *moving_task; /* a task moving charges */ 318 wait_queue_head_t waitq; /* a waitq for other context */ 319} mc = { 320 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 321 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 322}; 323 324static bool move_anon(void) 325{ 326 return test_bit(MOVE_CHARGE_TYPE_ANON, 327 &mc.to->move_charge_at_immigrate); 328} 329 330static bool move_file(void) 331{ 332 return test_bit(MOVE_CHARGE_TYPE_FILE, 333 &mc.to->move_charge_at_immigrate); 334} 335 336/* 337 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 338 * limit reclaim to prevent infinite loops, if they ever occur. 339 */ 340#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) 341#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) 342 343enum charge_type { 344 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 345 MEM_CGROUP_CHARGE_TYPE_MAPPED, 346 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 347 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 348 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 349 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 350 NR_CHARGE_TYPE, 351}; 352 353/* for encoding cft->private value on file */ 354#define _MEM (0) 355#define _MEMSWAP (1) 356#define _OOM_TYPE (2) 357#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 358#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 359#define MEMFILE_ATTR(val) ((val) & 0xffff) 360/* Used for OOM nofiier */ 361#define OOM_CONTROL (0) 362 363/* 364 * Reclaim flags for mem_cgroup_hierarchical_reclaim 365 */ 366#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 367#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 368#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 369#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 370 371static void mem_cgroup_get(struct mem_cgroup *memcg); 372static void mem_cgroup_put(struct mem_cgroup *memcg); 373 374/* Writing them here to avoid exposing memcg's inner layout */ 375#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 376#ifdef CONFIG_INET 377#include <net/sock.h> 378#include <net/ip.h> 379 380static bool mem_cgroup_is_root(struct mem_cgroup *memcg); 381void sock_update_memcg(struct sock *sk) 382{ 383 if (static_branch(&memcg_socket_limit_enabled)) { 384 struct mem_cgroup *memcg; 385 386 BUG_ON(!sk->sk_prot->proto_cgroup); 387 388 /* Socket cloning can throw us here with sk_cgrp already 389 * filled. It won't however, necessarily happen from 390 * process context. So the test for root memcg given 391 * the current task's memcg won't help us in this case. 392 * 393 * Respecting the original socket's memcg is a better 394 * decision in this case. 395 */ 396 if (sk->sk_cgrp) { 397 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 398 mem_cgroup_get(sk->sk_cgrp->memcg); 399 return; 400 } 401 402 rcu_read_lock(); 403 memcg = mem_cgroup_from_task(current); 404 if (!mem_cgroup_is_root(memcg)) { 405 mem_cgroup_get(memcg); 406 sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg); 407 } 408 rcu_read_unlock(); 409 } 410} 411EXPORT_SYMBOL(sock_update_memcg); 412 413void sock_release_memcg(struct sock *sk) 414{ 415 if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { 416 struct mem_cgroup *memcg; 417 WARN_ON(!sk->sk_cgrp->memcg); 418 memcg = sk->sk_cgrp->memcg; 419 mem_cgroup_put(memcg); 420 } 421} 422 423struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 424{ 425 if (!memcg || mem_cgroup_is_root(memcg)) 426 return NULL; 427 428 return &memcg->tcp_mem.cg_proto; 429} 430EXPORT_SYMBOL(tcp_proto_cgroup); 431#endif /* CONFIG_INET */ 432#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ 433 434static void drain_all_stock_async(struct mem_cgroup *memcg); 435 436static struct mem_cgroup_per_zone * 437mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) 438{ 439 return &memcg->info.nodeinfo[nid]->zoneinfo[zid]; 440} 441 442struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) 443{ 444 return &memcg->css; 445} 446 447static struct mem_cgroup_per_zone * 448page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page) 449{ 450 int nid = page_to_nid(page); 451 int zid = page_zonenum(page); 452 453 return mem_cgroup_zoneinfo(memcg, nid, zid); 454} 455 456static struct mem_cgroup_tree_per_zone * 457soft_limit_tree_node_zone(int nid, int zid) 458{ 459 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 460} 461 462static struct mem_cgroup_tree_per_zone * 463soft_limit_tree_from_page(struct page *page) 464{ 465 int nid = page_to_nid(page); 466 int zid = page_zonenum(page); 467 468 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 469} 470 471static void 472__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg, 473 struct mem_cgroup_per_zone *mz, 474 struct mem_cgroup_tree_per_zone *mctz, 475 unsigned long long new_usage_in_excess) 476{ 477 struct rb_node **p = &mctz->rb_root.rb_node; 478 struct rb_node *parent = NULL; 479 struct mem_cgroup_per_zone *mz_node; 480 481 if (mz->on_tree) 482 return; 483 484 mz->usage_in_excess = new_usage_in_excess; 485 if (!mz->usage_in_excess) 486 return; 487 while (*p) { 488 parent = *p; 489 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 490 tree_node); 491 if (mz->usage_in_excess < mz_node->usage_in_excess) 492 p = &(*p)->rb_left; 493 /* 494 * We can't avoid mem cgroups that are over their soft 495 * limit by the same amount 496 */ 497 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 498 p = &(*p)->rb_right; 499 } 500 rb_link_node(&mz->tree_node, parent, p); 501 rb_insert_color(&mz->tree_node, &mctz->rb_root); 502 mz->on_tree = true; 503} 504 505static void 506__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 507 struct mem_cgroup_per_zone *mz, 508 struct mem_cgroup_tree_per_zone *mctz) 509{ 510 if (!mz->on_tree) 511 return; 512 rb_erase(&mz->tree_node, &mctz->rb_root); 513 mz->on_tree = false; 514} 515 516static void 517mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 518 struct mem_cgroup_per_zone *mz, 519 struct mem_cgroup_tree_per_zone *mctz) 520{ 521 spin_lock(&mctz->lock); 522 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 523 spin_unlock(&mctz->lock); 524} 525 526 527static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 528{ 529 unsigned long long excess; 530 struct mem_cgroup_per_zone *mz; 531 struct mem_cgroup_tree_per_zone *mctz; 532 int nid = page_to_nid(page); 533 int zid = page_zonenum(page); 534 mctz = soft_limit_tree_from_page(page); 535 536 /* 537 * Necessary to update all ancestors when hierarchy is used. 538 * because their event counter is not touched. 539 */ 540 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 541 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 542 excess = res_counter_soft_limit_excess(&memcg->res); 543 /* 544 * We have to update the tree if mz is on RB-tree or 545 * mem is over its softlimit. 546 */ 547 if (excess || mz->on_tree) { 548 spin_lock(&mctz->lock); 549 /* if on-tree, remove it */ 550 if (mz->on_tree) 551 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 552 /* 553 * Insert again. mz->usage_in_excess will be updated. 554 * If excess is 0, no tree ops. 555 */ 556 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess); 557 spin_unlock(&mctz->lock); 558 } 559 } 560} 561 562static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 563{ 564 int node, zone; 565 struct mem_cgroup_per_zone *mz; 566 struct mem_cgroup_tree_per_zone *mctz; 567 568 for_each_node_state(node, N_POSSIBLE) { 569 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 570 mz = mem_cgroup_zoneinfo(memcg, node, zone); 571 mctz = soft_limit_tree_node_zone(node, zone); 572 mem_cgroup_remove_exceeded(memcg, mz, mctz); 573 } 574 } 575} 576 577static struct mem_cgroup_per_zone * 578__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 579{ 580 struct rb_node *rightmost = NULL; 581 struct mem_cgroup_per_zone *mz; 582 583retry: 584 mz = NULL; 585 rightmost = rb_last(&mctz->rb_root); 586 if (!rightmost) 587 goto done; /* Nothing to reclaim from */ 588 589 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 590 /* 591 * Remove the node now but someone else can add it back, 592 * we will to add it back at the end of reclaim to its correct 593 * position in the tree. 594 */ 595 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 596 if (!res_counter_soft_limit_excess(&mz->mem->res) || 597 !css_tryget(&mz->mem->css)) 598 goto retry; 599done: 600 return mz; 601} 602 603static struct mem_cgroup_per_zone * 604mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 605{ 606 struct mem_cgroup_per_zone *mz; 607 608 spin_lock(&mctz->lock); 609 mz = __mem_cgroup_largest_soft_limit_node(mctz); 610 spin_unlock(&mctz->lock); 611 return mz; 612} 613 614/* 615 * Implementation Note: reading percpu statistics for memcg. 616 * 617 * Both of vmstat[] and percpu_counter has threshold and do periodic 618 * synchronization to implement "quick" read. There are trade-off between 619 * reading cost and precision of value. Then, we may have a chance to implement 620 * a periodic synchronizion of counter in memcg's counter. 621 * 622 * But this _read() function is used for user interface now. The user accounts 623 * memory usage by memory cgroup and he _always_ requires exact value because 624 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 625 * have to visit all online cpus and make sum. So, for now, unnecessary 626 * synchronization is not implemented. (just implemented for cpu hotplug) 627 * 628 * If there are kernel internal actions which can make use of some not-exact 629 * value, and reading all cpu value can be performance bottleneck in some 630 * common workload, threashold and synchonization as vmstat[] should be 631 * implemented. 632 */ 633static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 634 enum mem_cgroup_stat_index idx) 635{ 636 long val = 0; 637 int cpu; 638 639 get_online_cpus(); 640 for_each_online_cpu(cpu) 641 val += per_cpu(memcg->stat->count[idx], cpu); 642#ifdef CONFIG_HOTPLUG_CPU 643 spin_lock(&memcg->pcp_counter_lock); 644 val += memcg->nocpu_base.count[idx]; 645 spin_unlock(&memcg->pcp_counter_lock); 646#endif 647 put_online_cpus(); 648 return val; 649} 650 651static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 652 bool charge) 653{ 654 int val = (charge) ? 1 : -1; 655 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); 656} 657 658static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 659 enum mem_cgroup_events_index idx) 660{ 661 unsigned long val = 0; 662 int cpu; 663 664 for_each_online_cpu(cpu) 665 val += per_cpu(memcg->stat->events[idx], cpu); 666#ifdef CONFIG_HOTPLUG_CPU 667 spin_lock(&memcg->pcp_counter_lock); 668 val += memcg->nocpu_base.events[idx]; 669 spin_unlock(&memcg->pcp_counter_lock); 670#endif 671 return val; 672} 673 674static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 675 bool file, int nr_pages) 676{ 677 preempt_disable(); 678 679 if (file) 680 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 681 nr_pages); 682 else 683 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 684 nr_pages); 685 686 /* pagein of a big page is an event. So, ignore page size */ 687 if (nr_pages > 0) 688 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 689 else { 690 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 691 nr_pages = -nr_pages; /* for event */ 692 } 693 694 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); 695 696 preempt_enable(); 697} 698 699unsigned long 700mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 701 unsigned int lru_mask) 702{ 703 struct mem_cgroup_per_zone *mz; 704 enum lru_list l; 705 unsigned long ret = 0; 706 707 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 708 709 for_each_lru(l) { 710 if (BIT(l) & lru_mask) 711 ret += MEM_CGROUP_ZSTAT(mz, l); 712 } 713 return ret; 714} 715 716static unsigned long 717mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 718 int nid, unsigned int lru_mask) 719{ 720 u64 total = 0; 721 int zid; 722 723 for (zid = 0; zid < MAX_NR_ZONES; zid++) 724 total += mem_cgroup_zone_nr_lru_pages(memcg, 725 nid, zid, lru_mask); 726 727 return total; 728} 729 730static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 731 unsigned int lru_mask) 732{ 733 int nid; 734 u64 total = 0; 735 736 for_each_node_state(nid, N_HIGH_MEMORY) 737 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 738 return total; 739} 740 741static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 742 enum mem_cgroup_events_target target) 743{ 744 unsigned long val, next; 745 746 val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); 747 next = __this_cpu_read(memcg->stat->targets[target]); 748 /* from time_after() in jiffies.h */ 749 if ((long)next - (long)val < 0) { 750 switch (target) { 751 case MEM_CGROUP_TARGET_THRESH: 752 next = val + THRESHOLDS_EVENTS_TARGET; 753 break; 754 case MEM_CGROUP_TARGET_SOFTLIMIT: 755 next = val + SOFTLIMIT_EVENTS_TARGET; 756 break; 757 case MEM_CGROUP_TARGET_NUMAINFO: 758 next = val + NUMAINFO_EVENTS_TARGET; 759 break; 760 default: 761 break; 762 } 763 __this_cpu_write(memcg->stat->targets[target], next); 764 return true; 765 } 766 return false; 767} 768 769/* 770 * Check events in order. 771 * 772 */ 773static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 774{ 775 preempt_disable(); 776 /* threshold event is triggered in finer grain than soft limit */ 777 if (unlikely(mem_cgroup_event_ratelimit(memcg, 778 MEM_CGROUP_TARGET_THRESH))) { 779 bool do_softlimit, do_numainfo; 780 781 do_softlimit = mem_cgroup_event_ratelimit(memcg, 782 MEM_CGROUP_TARGET_SOFTLIMIT); 783#if MAX_NUMNODES > 1 784 do_numainfo = mem_cgroup_event_ratelimit(memcg, 785 MEM_CGROUP_TARGET_NUMAINFO); 786#endif 787 preempt_enable(); 788 789 mem_cgroup_threshold(memcg); 790 if (unlikely(do_softlimit)) 791 mem_cgroup_update_tree(memcg, page); 792#if MAX_NUMNODES > 1 793 if (unlikely(do_numainfo)) 794 atomic_inc(&memcg->numainfo_events); 795#endif 796 } else 797 preempt_enable(); 798} 799 800struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 801{ 802 return container_of(cgroup_subsys_state(cont, 803 mem_cgroup_subsys_id), struct mem_cgroup, 804 css); 805} 806 807struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 808{ 809 /* 810 * mm_update_next_owner() may clear mm->owner to NULL 811 * if it races with swapoff, page migration, etc. 812 * So this can be called with p == NULL. 813 */ 814 if (unlikely(!p)) 815 return NULL; 816 817 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 818 struct mem_cgroup, css); 819} 820 821struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 822{ 823 struct mem_cgroup *memcg = NULL; 824 825 if (!mm) 826 return NULL; 827 /* 828 * Because we have no locks, mm->owner's may be being moved to other 829 * cgroup. We use css_tryget() here even if this looks 830 * pessimistic (rather than adding locks here). 831 */ 832 rcu_read_lock(); 833 do { 834 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 835 if (unlikely(!memcg)) 836 break; 837 } while (!css_tryget(&memcg->css)); 838 rcu_read_unlock(); 839 return memcg; 840} 841 842/** 843 * mem_cgroup_iter - iterate over memory cgroup hierarchy 844 * @root: hierarchy root 845 * @prev: previously returned memcg, NULL on first invocation 846 * @reclaim: cookie for shared reclaim walks, NULL for full walks 847 * 848 * Returns references to children of the hierarchy below @root, or 849 * @root itself, or %NULL after a full round-trip. 850 * 851 * Caller must pass the return value in @prev on subsequent 852 * invocations for reference counting, or use mem_cgroup_iter_break() 853 * to cancel a hierarchy walk before the round-trip is complete. 854 * 855 * Reclaimers can specify a zone and a priority level in @reclaim to 856 * divide up the memcgs in the hierarchy among all concurrent 857 * reclaimers operating on the same zone and priority. 858 */ 859struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 860 struct mem_cgroup *prev, 861 struct mem_cgroup_reclaim_cookie *reclaim) 862{ 863 struct mem_cgroup *memcg = NULL; 864 int id = 0; 865 866 if (mem_cgroup_disabled()) 867 return NULL; 868 869 if (!root) 870 root = root_mem_cgroup; 871 872 if (prev && !reclaim) 873 id = css_id(&prev->css); 874 875 if (prev && prev != root) 876 css_put(&prev->css); 877 878 if (!root->use_hierarchy && root != root_mem_cgroup) { 879 if (prev) 880 return NULL; 881 return root; 882 } 883 884 while (!memcg) { 885 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 886 struct cgroup_subsys_state *css; 887 888 if (reclaim) { 889 int nid = zone_to_nid(reclaim->zone); 890 int zid = zone_idx(reclaim->zone); 891 struct mem_cgroup_per_zone *mz; 892 893 mz = mem_cgroup_zoneinfo(root, nid, zid); 894 iter = &mz->reclaim_iter[reclaim->priority]; 895 if (prev && reclaim->generation != iter->generation) 896 return NULL; 897 id = iter->position; 898 } 899 900 rcu_read_lock(); 901 css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id); 902 if (css) { 903 if (css == &root->css || css_tryget(css)) 904 memcg = container_of(css, 905 struct mem_cgroup, css); 906 } else 907 id = 0; 908 rcu_read_unlock(); 909 910 if (reclaim) { 911 iter->position = id; 912 if (!css) 913 iter->generation++; 914 else if (!prev && memcg) 915 reclaim->generation = iter->generation; 916 } 917 918 if (prev && !css) 919 return NULL; 920 } 921 return memcg; 922} 923 924/** 925 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 926 * @root: hierarchy root 927 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 928 */ 929void mem_cgroup_iter_break(struct mem_cgroup *root, 930 struct mem_cgroup *prev) 931{ 932 if (!root) 933 root = root_mem_cgroup; 934 if (prev && prev != root) 935 css_put(&prev->css); 936} 937 938/* 939 * Iteration constructs for visiting all cgroups (under a tree). If 940 * loops are exited prematurely (break), mem_cgroup_iter_break() must 941 * be used for reference counting. 942 */ 943#define for_each_mem_cgroup_tree(iter, root) \ 944 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 945 iter != NULL; \ 946 iter = mem_cgroup_iter(root, iter, NULL)) 947 948#define for_each_mem_cgroup(iter) \ 949 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 950 iter != NULL; \ 951 iter = mem_cgroup_iter(NULL, iter, NULL)) 952 953static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 954{ 955 return (memcg == root_mem_cgroup); 956} 957 958void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 959{ 960 struct mem_cgroup *memcg; 961 962 if (!mm) 963 return; 964 965 rcu_read_lock(); 966 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 967 if (unlikely(!memcg)) 968 goto out; 969 970 switch (idx) { 971 case PGFAULT: 972 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); 973 break; 974 case PGMAJFAULT: 975 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 976 break; 977 default: 978 BUG(); 979 } 980out: 981 rcu_read_unlock(); 982} 983EXPORT_SYMBOL(mem_cgroup_count_vm_event); 984 985/** 986 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 987 * @zone: zone of the wanted lruvec 988 * @mem: memcg of the wanted lruvec 989 * 990 * Returns the lru list vector holding pages for the given @zone and 991 * @mem. This can be the global zone lruvec, if the memory controller 992 * is disabled. 993 */ 994struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 995 struct mem_cgroup *memcg) 996{ 997 struct mem_cgroup_per_zone *mz; 998 999 if (mem_cgroup_disabled()) 1000 return &zone->lruvec; 1001 1002 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); 1003 return &mz->lruvec; 1004} 1005 1006/* 1007 * Following LRU functions are allowed to be used without PCG_LOCK. 1008 * Operations are called by routine of global LRU independently from memcg. 1009 * What we have to take care of here is validness of pc->mem_cgroup. 1010 * 1011 * Changes to pc->mem_cgroup happens when 1012 * 1. charge 1013 * 2. moving account 1014 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 1015 * It is added to LRU before charge. 1016 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 1017 * When moving account, the page is not on LRU. It's isolated. 1018 */ 1019 1020/** 1021 * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec 1022 * @zone: zone of the page 1023 * @page: the page 1024 * @lru: current lru 1025 * 1026 * This function accounts for @page being added to @lru, and returns 1027 * the lruvec for the given @zone and the memcg @page is charged to. 1028 * 1029 * The callsite is then responsible for physically linking the page to 1030 * the returned lruvec->lists[@lru]. 1031 */ 1032struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, 1033 enum lru_list lru) 1034{ 1035 struct mem_cgroup_per_zone *mz; 1036 struct mem_cgroup *memcg; 1037 struct page_cgroup *pc; 1038 1039 if (mem_cgroup_disabled()) 1040 return &zone->lruvec; 1041 1042 pc = lookup_page_cgroup(page); 1043 VM_BUG_ON(PageCgroupAcctLRU(pc)); 1044 /* 1045 * putback: charge: 1046 * SetPageLRU SetPageCgroupUsed 1047 * smp_mb smp_mb 1048 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU 1049 * 1050 * Ensure that one of the two sides adds the page to the memcg 1051 * LRU during a race. 1052 */ 1053 smp_mb(); 1054 /* 1055 * If the page is uncharged, it may be freed soon, but it 1056 * could also be swap cache (readahead, swapoff) that needs to 1057 * be reclaimable in the future. root_mem_cgroup will babysit 1058 * it for the time being. 1059 */ 1060 if (PageCgroupUsed(pc)) { 1061 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1062 smp_rmb(); 1063 memcg = pc->mem_cgroup; 1064 SetPageCgroupAcctLRU(pc); 1065 } else 1066 memcg = root_mem_cgroup; 1067 mz = page_cgroup_zoneinfo(memcg, page); 1068 /* compound_order() is stabilized through lru_lock */ 1069 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1070 return &mz->lruvec; 1071} 1072 1073/** 1074 * mem_cgroup_lru_del_list - account for removing an lru page 1075 * @page: the page 1076 * @lru: target lru 1077 * 1078 * This function accounts for @page being removed from @lru. 1079 * 1080 * The callsite is then responsible for physically unlinking 1081 * @page->lru. 1082 */ 1083void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) 1084{ 1085 struct mem_cgroup_per_zone *mz; 1086 struct mem_cgroup *memcg; 1087 struct page_cgroup *pc; 1088 1089 if (mem_cgroup_disabled()) 1090 return; 1091 1092 pc = lookup_page_cgroup(page); 1093 /* 1094 * root_mem_cgroup babysits uncharged LRU pages, but 1095 * PageCgroupUsed is cleared when the page is about to get 1096 * freed. PageCgroupAcctLRU remembers whether the 1097 * LRU-accounting happened against pc->mem_cgroup or 1098 * root_mem_cgroup. 1099 */ 1100 if (TestClearPageCgroupAcctLRU(pc)) { 1101 VM_BUG_ON(!pc->mem_cgroup); 1102 memcg = pc->mem_cgroup; 1103 } else 1104 memcg = root_mem_cgroup; 1105 mz = page_cgroup_zoneinfo(memcg, page); 1106 /* huge page split is done under lru_lock. so, we have no races. */ 1107 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); 1108} 1109 1110void mem_cgroup_lru_del(struct page *page) 1111{ 1112 mem_cgroup_lru_del_list(page, page_lru(page)); 1113} 1114 1115/** 1116 * mem_cgroup_lru_move_lists - account for moving a page between lrus 1117 * @zone: zone of the page 1118 * @page: the page 1119 * @from: current lru 1120 * @to: target lru 1121 * 1122 * This function accounts for @page being moved between the lrus @from 1123 * and @to, and returns the lruvec for the given @zone and the memcg 1124 * @page is charged to. 1125 * 1126 * The callsite is then responsible for physically relinking 1127 * @page->lru to the returned lruvec->lists[@to]. 1128 */ 1129struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, 1130 struct page *page, 1131 enum lru_list from, 1132 enum lru_list to) 1133{ 1134 /* XXX: Optimize this, especially for @from == @to */ 1135 mem_cgroup_lru_del_list(page, from); 1136 return mem_cgroup_lru_add_list(zone, page, to); 1137} 1138 1139/* 1140 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed 1141 * while it's linked to lru because the page may be reused after it's fully 1142 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again. 1143 * It's done under lock_page and expected that zone->lru_lock isnever held. 1144 */ 1145static void mem_cgroup_lru_del_before_commit(struct page *page) 1146{ 1147 enum lru_list lru; 1148 unsigned long flags; 1149 struct zone *zone = page_zone(page); 1150 struct page_cgroup *pc = lookup_page_cgroup(page); 1151 1152 /* 1153 * Doing this check without taking ->lru_lock seems wrong but this 1154 * is safe. Because if page_cgroup's USED bit is unset, the page 1155 * will not be added to any memcg's LRU. If page_cgroup's USED bit is 1156 * set, the commit after this will fail, anyway. 1157 * This all charge/uncharge is done under some mutual execustion. 1158 * So, we don't need to taking care of changes in USED bit. 1159 */ 1160 if (likely(!PageLRU(page))) 1161 return; 1162 1163 spin_lock_irqsave(&zone->lru_lock, flags); 1164 lru = page_lru(page); 1165 /* 1166 * The uncharged page could still be registered to the LRU of 1167 * the stale pc->mem_cgroup. 1168 * 1169 * As pc->mem_cgroup is about to get overwritten, the old LRU 1170 * accounting needs to be taken care of. Let root_mem_cgroup 1171 * babysit the page until the new memcg is responsible for it. 1172 * 1173 * The PCG_USED bit is guarded by lock_page() as the page is 1174 * swapcache/pagecache. 1175 */ 1176 if (PageLRU(page) && PageCgroupAcctLRU(pc) && !PageCgroupUsed(pc)) { 1177 del_page_from_lru_list(zone, page, lru); 1178 add_page_to_lru_list(zone, page, lru); 1179 } 1180 spin_unlock_irqrestore(&zone->lru_lock, flags); 1181} 1182 1183static void mem_cgroup_lru_add_after_commit(struct page *page) 1184{ 1185 enum lru_list lru; 1186 unsigned long flags; 1187 struct zone *zone = page_zone(page); 1188 struct page_cgroup *pc = lookup_page_cgroup(page); 1189 /* 1190 * putback: charge: 1191 * SetPageLRU SetPageCgroupUsed 1192 * smp_mb smp_mb 1193 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU 1194 * 1195 * Ensure that one of the two sides adds the page to the memcg 1196 * LRU during a race. 1197 */ 1198 smp_mb(); 1199 /* taking care of that the page is added to LRU while we commit it */ 1200 if (likely(!PageLRU(page))) 1201 return; 1202 spin_lock_irqsave(&zone->lru_lock, flags); 1203 lru = page_lru(page); 1204 /* 1205 * If the page is not on the LRU, someone will soon put it 1206 * there. If it is, and also already accounted for on the 1207 * memcg-side, it must be on the right lruvec as setting 1208 * pc->mem_cgroup and PageCgroupUsed is properly ordered. 1209 * Otherwise, root_mem_cgroup has been babysitting the page 1210 * during the charge. Move it to the new memcg now. 1211 */ 1212 if (PageLRU(page) && !PageCgroupAcctLRU(pc)) { 1213 del_page_from_lru_list(zone, page, lru); 1214 add_page_to_lru_list(zone, page, lru); 1215 } 1216 spin_unlock_irqrestore(&zone->lru_lock, flags); 1217} 1218 1219/* 1220 * Checks whether given mem is same or in the root_mem_cgroup's 1221 * hierarchy subtree 1222 */ 1223static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1224 struct mem_cgroup *memcg) 1225{ 1226 if (root_memcg != memcg) { 1227 return (root_memcg->use_hierarchy && 1228 css_is_ancestor(&memcg->css, &root_memcg->css)); 1229 } 1230 1231 return true; 1232} 1233 1234int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) 1235{ 1236 int ret; 1237 struct mem_cgroup *curr = NULL; 1238 struct task_struct *p; 1239 1240 p = find_lock_task_mm(task); 1241 if (!p) 1242 return 0; 1243 curr = try_get_mem_cgroup_from_mm(p->mm); 1244 task_unlock(p); 1245 if (!curr) 1246 return 0; 1247 /* 1248 * We should check use_hierarchy of "memcg" not "curr". Because checking 1249 * use_hierarchy of "curr" here make this function true if hierarchy is 1250 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup* 1251 * hierarchy(even if use_hierarchy is disabled in "memcg"). 1252 */ 1253 ret = mem_cgroup_same_or_subtree(memcg, curr); 1254 css_put(&curr->css); 1255 return ret; 1256} 1257 1258int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) 1259{ 1260 unsigned long inactive_ratio; 1261 int nid = zone_to_nid(zone); 1262 int zid = zone_idx(zone); 1263 unsigned long inactive; 1264 unsigned long active; 1265 unsigned long gb; 1266 1267 inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1268 BIT(LRU_INACTIVE_ANON)); 1269 active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1270 BIT(LRU_ACTIVE_ANON)); 1271 1272 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1273 if (gb) 1274 inactive_ratio = int_sqrt(10 * gb); 1275 else 1276 inactive_ratio = 1; 1277 1278 return inactive * inactive_ratio < active; 1279} 1280 1281int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) 1282{ 1283 unsigned long active; 1284 unsigned long inactive; 1285 int zid = zone_idx(zone); 1286 int nid = zone_to_nid(zone); 1287 1288 inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1289 BIT(LRU_INACTIVE_FILE)); 1290 active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1291 BIT(LRU_ACTIVE_FILE)); 1292 1293 return (active > inactive); 1294} 1295 1296struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 1297 struct zone *zone) 1298{ 1299 int nid = zone_to_nid(zone); 1300 int zid = zone_idx(zone); 1301 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); 1302 1303 return &mz->reclaim_stat; 1304} 1305 1306struct zone_reclaim_stat * 1307mem_cgroup_get_reclaim_stat_from_page(struct page *page) 1308{ 1309 struct page_cgroup *pc; 1310 struct mem_cgroup_per_zone *mz; 1311 1312 if (mem_cgroup_disabled()) 1313 return NULL; 1314 1315 pc = lookup_page_cgroup(page); 1316 if (!PageCgroupUsed(pc)) 1317 return NULL; 1318 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1319 smp_rmb(); 1320 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1321 return &mz->reclaim_stat; 1322} 1323 1324#define mem_cgroup_from_res_counter(counter, member) \ 1325 container_of(counter, struct mem_cgroup, member) 1326 1327/** 1328 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1329 * @mem: the memory cgroup 1330 * 1331 * Returns the maximum amount of memory @mem can be charged with, in 1332 * pages. 1333 */ 1334static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1335{ 1336 unsigned long long margin; 1337 1338 margin = res_counter_margin(&memcg->res); 1339 if (do_swap_account) 1340 margin = min(margin, res_counter_margin(&memcg->memsw)); 1341 return margin >> PAGE_SHIFT; 1342} 1343 1344int mem_cgroup_swappiness(struct mem_cgroup *memcg) 1345{ 1346 struct cgroup *cgrp = memcg->css.cgroup; 1347 1348 /* root ? */ 1349 if (cgrp->parent == NULL) 1350 return vm_swappiness; 1351 1352 return memcg->swappiness; 1353} 1354 1355static void mem_cgroup_start_move(struct mem_cgroup *memcg) 1356{ 1357 int cpu; 1358 1359 get_online_cpus(); 1360 spin_lock(&memcg->pcp_counter_lock); 1361 for_each_online_cpu(cpu) 1362 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; 1363 memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; 1364 spin_unlock(&memcg->pcp_counter_lock); 1365 put_online_cpus(); 1366 1367 synchronize_rcu(); 1368} 1369 1370static void mem_cgroup_end_move(struct mem_cgroup *memcg) 1371{ 1372 int cpu; 1373 1374 if (!memcg) 1375 return; 1376 get_online_cpus(); 1377 spin_lock(&memcg->pcp_counter_lock); 1378 for_each_online_cpu(cpu) 1379 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; 1380 memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; 1381 spin_unlock(&memcg->pcp_counter_lock); 1382 put_online_cpus(); 1383} 1384/* 1385 * 2 routines for checking "mem" is under move_account() or not. 1386 * 1387 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used 1388 * for avoiding race in accounting. If true, 1389 * pc->mem_cgroup may be overwritten. 1390 * 1391 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1392 * under hierarchy of moving cgroups. This is for 1393 * waiting at hith-memory prressure caused by "move". 1394 */ 1395 1396static bool mem_cgroup_stealed(struct mem_cgroup *memcg) 1397{ 1398 VM_BUG_ON(!rcu_read_lock_held()); 1399 return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0; 1400} 1401 1402static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1403{ 1404 struct mem_cgroup *from; 1405 struct mem_cgroup *to; 1406 bool ret = false; 1407 /* 1408 * Unlike task_move routines, we access mc.to, mc.from not under 1409 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1410 */ 1411 spin_lock(&mc.lock); 1412 from = mc.from; 1413 to = mc.to; 1414 if (!from) 1415 goto unlock; 1416 1417 ret = mem_cgroup_same_or_subtree(memcg, from) 1418 || mem_cgroup_same_or_subtree(memcg, to); 1419unlock: 1420 spin_unlock(&mc.lock); 1421 return ret; 1422} 1423 1424static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1425{ 1426 if (mc.moving_task && current != mc.moving_task) { 1427 if (mem_cgroup_under_move(memcg)) { 1428 DEFINE_WAIT(wait); 1429 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1430 /* moving charge context might have finished. */ 1431 if (mc.moving_task) 1432 schedule(); 1433 finish_wait(&mc.waitq, &wait); 1434 return true; 1435 } 1436 } 1437 return false; 1438} 1439 1440/** 1441 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. 1442 * @memcg: The memory cgroup that went over limit 1443 * @p: Task that is going to be killed 1444 * 1445 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1446 * enabled 1447 */ 1448void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1449{ 1450 struct cgroup *task_cgrp; 1451 struct cgroup *mem_cgrp; 1452 /* 1453 * Need a buffer in BSS, can't rely on allocations. The code relies 1454 * on the assumption that OOM is serialized for memory controller. 1455 * If this assumption is broken, revisit this code. 1456 */ 1457 static char memcg_name[PATH_MAX]; 1458 int ret; 1459 1460 if (!memcg || !p) 1461 return; 1462 1463 1464 rcu_read_lock(); 1465 1466 mem_cgrp = memcg->css.cgroup; 1467 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1468 1469 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1470 if (ret < 0) { 1471 /* 1472 * Unfortunately, we are unable to convert to a useful name 1473 * But we'll still print out the usage information 1474 */ 1475 rcu_read_unlock(); 1476 goto done; 1477 } 1478 rcu_read_unlock(); 1479 1480 printk(KERN_INFO "Task in %s killed", memcg_name); 1481 1482 rcu_read_lock(); 1483 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1484 if (ret < 0) { 1485 rcu_read_unlock(); 1486 goto done; 1487 } 1488 rcu_read_unlock(); 1489 1490 /* 1491 * Continues from above, so we don't need an KERN_ level 1492 */ 1493 printk(KERN_CONT " as a result of limit of %s\n", memcg_name); 1494done: 1495 1496 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n", 1497 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1498 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1499 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1500 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, " 1501 "failcnt %llu\n", 1502 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1503 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1504 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1505} 1506 1507/* 1508 * This function returns the number of memcg under hierarchy tree. Returns 1509 * 1(self count) if no children. 1510 */ 1511static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1512{ 1513 int num = 0; 1514 struct mem_cgroup *iter; 1515 1516 for_each_mem_cgroup_tree(iter, memcg) 1517 num++; 1518 return num; 1519} 1520 1521/* 1522 * Return the memory (and swap, if configured) limit for a memcg. 1523 */ 1524u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1525{ 1526 u64 limit; 1527 u64 memsw; 1528 1529 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1530 limit += total_swap_pages << PAGE_SHIFT; 1531 1532 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1533 /* 1534 * If memsw is finite and limits the amount of swap space available 1535 * to this memcg, return that limit. 1536 */ 1537 return min(limit, memsw); 1538} 1539 1540static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, 1541 gfp_t gfp_mask, 1542 unsigned long flags) 1543{ 1544 unsigned long total = 0; 1545 bool noswap = false; 1546 int loop; 1547 1548 if (flags & MEM_CGROUP_RECLAIM_NOSWAP) 1549 noswap = true; 1550 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum) 1551 noswap = true; 1552 1553 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { 1554 if (loop) 1555 drain_all_stock_async(memcg); 1556 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap); 1557 /* 1558 * Allow limit shrinkers, which are triggered directly 1559 * by userspace, to catch signals and stop reclaim 1560 * after minimal progress, regardless of the margin. 1561 */ 1562 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK)) 1563 break; 1564 if (mem_cgroup_margin(memcg)) 1565 break; 1566 /* 1567 * If nothing was reclaimed after two attempts, there 1568 * may be no reclaimable pages in this hierarchy. 1569 */ 1570 if (loop && !total) 1571 break; 1572 } 1573 return total; 1574} 1575 1576/** 1577 * test_mem_cgroup_node_reclaimable 1578 * @mem: the target memcg 1579 * @nid: the node ID to be checked. 1580 * @noswap : specify true here if the user wants flle only information. 1581 * 1582 * This function returns whether the specified memcg contains any 1583 * reclaimable pages on a node. Returns true if there are any reclaimable 1584 * pages in the node. 1585 */ 1586static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1587 int nid, bool noswap) 1588{ 1589 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1590 return true; 1591 if (noswap || !total_swap_pages) 1592 return false; 1593 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1594 return true; 1595 return false; 1596 1597} 1598#if MAX_NUMNODES > 1 1599 1600/* 1601 * Always updating the nodemask is not very good - even if we have an empty 1602 * list or the wrong list here, we can start from some node and traverse all 1603 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1604 * 1605 */ 1606static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1607{ 1608 int nid; 1609 /* 1610 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1611 * pagein/pageout changes since the last update. 1612 */ 1613 if (!atomic_read(&memcg->numainfo_events)) 1614 return; 1615 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1616 return; 1617 1618 /* make a nodemask where this memcg uses memory from */ 1619 memcg->scan_nodes = node_states[N_HIGH_MEMORY]; 1620 1621 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { 1622 1623 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1624 node_clear(nid, memcg->scan_nodes); 1625 } 1626 1627 atomic_set(&memcg->numainfo_events, 0); 1628 atomic_set(&memcg->numainfo_updating, 0); 1629} 1630 1631/* 1632 * Selecting a node where we start reclaim from. Because what we need is just 1633 * reducing usage counter, start from anywhere is O,K. Considering 1634 * memory reclaim from current node, there are pros. and cons. 1635 * 1636 * Freeing memory from current node means freeing memory from a node which 1637 * we'll use or we've used. So, it may make LRU bad. And if several threads 1638 * hit limits, it will see a contention on a node. But freeing from remote 1639 * node means more costs for memory reclaim because of memory latency. 1640 * 1641 * Now, we use round-robin. Better algorithm is welcomed. 1642 */ 1643int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1644{ 1645 int node; 1646 1647 mem_cgroup_may_update_nodemask(memcg); 1648 node = memcg->last_scanned_node; 1649 1650 node = next_node(node, memcg->scan_nodes); 1651 if (node == MAX_NUMNODES) 1652 node = first_node(memcg->scan_nodes); 1653 /* 1654 * We call this when we hit limit, not when pages are added to LRU. 1655 * No LRU may hold pages because all pages are UNEVICTABLE or 1656 * memcg is too small and all pages are not on LRU. In that case, 1657 * we use curret node. 1658 */ 1659 if (unlikely(node == MAX_NUMNODES)) 1660 node = numa_node_id(); 1661 1662 memcg->last_scanned_node = node; 1663 return node; 1664} 1665 1666/* 1667 * Check all nodes whether it contains reclaimable pages or not. 1668 * For quick scan, we make use of scan_nodes. This will allow us to skip 1669 * unused nodes. But scan_nodes is lazily updated and may not cotain 1670 * enough new information. We need to do double check. 1671 */ 1672bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1673{ 1674 int nid; 1675 1676 /* 1677 * quick check...making use of scan_node. 1678 * We can skip unused nodes. 1679 */ 1680 if (!nodes_empty(memcg->scan_nodes)) { 1681 for (nid = first_node(memcg->scan_nodes); 1682 nid < MAX_NUMNODES; 1683 nid = next_node(nid, memcg->scan_nodes)) { 1684 1685 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 1686 return true; 1687 } 1688 } 1689 /* 1690 * Check rest of nodes. 1691 */ 1692 for_each_node_state(nid, N_HIGH_MEMORY) { 1693 if (node_isset(nid, memcg->scan_nodes)) 1694 continue; 1695 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 1696 return true; 1697 } 1698 return false; 1699} 1700 1701#else 1702int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1703{ 1704 return 0; 1705} 1706 1707bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1708{ 1709 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); 1710} 1711#endif 1712 1713static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1714 struct zone *zone, 1715 gfp_t gfp_mask, 1716 unsigned long *total_scanned) 1717{ 1718 struct mem_cgroup *victim = NULL; 1719 int total = 0; 1720 int loop = 0; 1721 unsigned long excess; 1722 unsigned long nr_scanned; 1723 struct mem_cgroup_reclaim_cookie reclaim = { 1724 .zone = zone, 1725 .priority = 0, 1726 }; 1727 1728 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; 1729 1730 while (1) { 1731 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1732 if (!victim) { 1733 loop++; 1734 if (loop >= 2) { 1735 /* 1736 * If we have not been able to reclaim 1737 * anything, it might because there are 1738 * no reclaimable pages under this hierarchy 1739 */ 1740 if (!total) 1741 break; 1742 /* 1743 * We want to do more targeted reclaim. 1744 * excess >> 2 is not to excessive so as to 1745 * reclaim too much, nor too less that we keep 1746 * coming back to reclaim from this cgroup 1747 */ 1748 if (total >= (excess >> 2) || 1749 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1750 break; 1751 } 1752 continue; 1753 } 1754 if (!mem_cgroup_reclaimable(victim, false)) 1755 continue; 1756 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 1757 zone, &nr_scanned); 1758 *total_scanned += nr_scanned; 1759 if (!res_counter_soft_limit_excess(&root_memcg->res)) 1760 break; 1761 } 1762 mem_cgroup_iter_break(root_memcg, victim); 1763 return total; 1764} 1765 1766/* 1767 * Check OOM-Killer is already running under our hierarchy. 1768 * If someone is running, return false. 1769 * Has to be called with memcg_oom_lock 1770 */ 1771static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg) 1772{ 1773 struct mem_cgroup *iter, *failed = NULL; 1774 1775 for_each_mem_cgroup_tree(iter, memcg) { 1776 if (iter->oom_lock) { 1777 /* 1778 * this subtree of our hierarchy is already locked 1779 * so we cannot give a lock. 1780 */ 1781 failed = iter; 1782 mem_cgroup_iter_break(memcg, iter); 1783 break; 1784 } else 1785 iter->oom_lock = true; 1786 } 1787 1788 if (!failed) 1789 return true; 1790 1791 /* 1792 * OK, we failed to lock the whole subtree so we have to clean up 1793 * what we set up to the failing subtree 1794 */ 1795 for_each_mem_cgroup_tree(iter, memcg) { 1796 if (iter == failed) { 1797 mem_cgroup_iter_break(memcg, iter); 1798 break; 1799 } 1800 iter->oom_lock = false; 1801 } 1802 return false; 1803} 1804 1805/* 1806 * Has to be called with memcg_oom_lock 1807 */ 1808static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1809{ 1810 struct mem_cgroup *iter; 1811 1812 for_each_mem_cgroup_tree(iter, memcg) 1813 iter->oom_lock = false; 1814 return 0; 1815} 1816 1817static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1818{ 1819 struct mem_cgroup *iter; 1820 1821 for_each_mem_cgroup_tree(iter, memcg) 1822 atomic_inc(&iter->under_oom); 1823} 1824 1825static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1826{ 1827 struct mem_cgroup *iter; 1828 1829 /* 1830 * When a new child is created while the hierarchy is under oom, 1831 * mem_cgroup_oom_lock() may not be called. We have to use 1832 * atomic_add_unless() here. 1833 */ 1834 for_each_mem_cgroup_tree(iter, memcg) 1835 atomic_add_unless(&iter->under_oom, -1, 0); 1836} 1837 1838static DEFINE_SPINLOCK(memcg_oom_lock); 1839static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1840 1841struct oom_wait_info { 1842 struct mem_cgroup *mem; 1843 wait_queue_t wait; 1844}; 1845 1846static int memcg_oom_wake_function(wait_queue_t *wait, 1847 unsigned mode, int sync, void *arg) 1848{ 1849 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg, 1850 *oom_wait_memcg; 1851 struct oom_wait_info *oom_wait_info; 1852 1853 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1854 oom_wait_memcg = oom_wait_info->mem; 1855 1856 /* 1857 * Both of oom_wait_info->mem and wake_mem are stable under us. 1858 * Then we can use css_is_ancestor without taking care of RCU. 1859 */ 1860 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) 1861 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg)) 1862 return 0; 1863 return autoremove_wake_function(wait, mode, sync, arg); 1864} 1865 1866static void memcg_wakeup_oom(struct mem_cgroup *memcg) 1867{ 1868 /* for filtering, pass "memcg" as argument. */ 1869 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1870} 1871 1872static void memcg_oom_recover(struct mem_cgroup *memcg) 1873{ 1874 if (memcg && atomic_read(&memcg->under_oom)) 1875 memcg_wakeup_oom(memcg); 1876} 1877 1878/* 1879 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1880 */ 1881bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask) 1882{ 1883 struct oom_wait_info owait; 1884 bool locked, need_to_kill; 1885 1886 owait.mem = memcg; 1887 owait.wait.flags = 0; 1888 owait.wait.func = memcg_oom_wake_function; 1889 owait.wait.private = current; 1890 INIT_LIST_HEAD(&owait.wait.task_list); 1891 need_to_kill = true; 1892 mem_cgroup_mark_under_oom(memcg); 1893 1894 /* At first, try to OOM lock hierarchy under memcg.*/ 1895 spin_lock(&memcg_oom_lock); 1896 locked = mem_cgroup_oom_lock(memcg); 1897 /* 1898 * Even if signal_pending(), we can't quit charge() loop without 1899 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 1900 * under OOM is always welcomed, use TASK_KILLABLE here. 1901 */ 1902 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1903 if (!locked || memcg->oom_kill_disable) 1904 need_to_kill = false; 1905 if (locked) 1906 mem_cgroup_oom_notify(memcg); 1907 spin_unlock(&memcg_oom_lock); 1908 1909 if (need_to_kill) { 1910 finish_wait(&memcg_oom_waitq, &owait.wait); 1911 mem_cgroup_out_of_memory(memcg, mask); 1912 } else { 1913 schedule(); 1914 finish_wait(&memcg_oom_waitq, &owait.wait); 1915 } 1916 spin_lock(&memcg_oom_lock); 1917 if (locked) 1918 mem_cgroup_oom_unlock(memcg); 1919 memcg_wakeup_oom(memcg); 1920 spin_unlock(&memcg_oom_lock); 1921 1922 mem_cgroup_unmark_under_oom(memcg); 1923 1924 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 1925 return false; 1926 /* Give chance to dying process */ 1927 schedule_timeout_uninterruptible(1); 1928 return true; 1929} 1930 1931/* 1932 * Currently used to update mapped file statistics, but the routine can be 1933 * generalized to update other statistics as well. 1934 * 1935 * Notes: Race condition 1936 * 1937 * We usually use page_cgroup_lock() for accessing page_cgroup member but 1938 * it tends to be costly. But considering some conditions, we doesn't need 1939 * to do so _always_. 1940 * 1941 * Considering "charge", lock_page_cgroup() is not required because all 1942 * file-stat operations happen after a page is attached to radix-tree. There 1943 * are no race with "charge". 1944 * 1945 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 1946 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 1947 * if there are race with "uncharge". Statistics itself is properly handled 1948 * by flags. 1949 * 1950 * Considering "move", this is an only case we see a race. To make the race 1951 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are 1952 * possibility of race condition. If there is, we take a lock. 1953 */ 1954 1955void mem_cgroup_update_page_stat(struct page *page, 1956 enum mem_cgroup_page_stat_item idx, int val) 1957{ 1958 struct mem_cgroup *memcg; 1959 struct page_cgroup *pc = lookup_page_cgroup(page); 1960 bool need_unlock = false; 1961 unsigned long uninitialized_var(flags); 1962 1963 if (mem_cgroup_disabled()) 1964 return; 1965 1966 rcu_read_lock(); 1967 memcg = pc->mem_cgroup; 1968 if (unlikely(!memcg || !PageCgroupUsed(pc))) 1969 goto out; 1970 /* pc->mem_cgroup is unstable ? */ 1971 if (unlikely(mem_cgroup_stealed(memcg)) || PageTransHuge(page)) { 1972 /* take a lock against to access pc->mem_cgroup */ 1973 move_lock_page_cgroup(pc, &flags); 1974 need_unlock = true; 1975 memcg = pc->mem_cgroup; 1976 if (!memcg || !PageCgroupUsed(pc)) 1977 goto out; 1978 } 1979 1980 switch (idx) { 1981 case MEMCG_NR_FILE_MAPPED: 1982 if (val > 0) 1983 SetPageCgroupFileMapped(pc); 1984 else if (!page_mapped(page)) 1985 ClearPageCgroupFileMapped(pc); 1986 idx = MEM_CGROUP_STAT_FILE_MAPPED; 1987 break; 1988 default: 1989 BUG(); 1990 } 1991 1992 this_cpu_add(memcg->stat->count[idx], val); 1993 1994out: 1995 if (unlikely(need_unlock)) 1996 move_unlock_page_cgroup(pc, &flags); 1997 rcu_read_unlock(); 1998 return; 1999} 2000EXPORT_SYMBOL(mem_cgroup_update_page_stat); 2001 2002/* 2003 * size of first charge trial. "32" comes from vmscan.c's magic value. 2004 * TODO: maybe necessary to use big numbers in big irons. 2005 */ 2006#define CHARGE_BATCH 32U 2007struct memcg_stock_pcp { 2008 struct mem_cgroup *cached; /* this never be root cgroup */ 2009 unsigned int nr_pages; 2010 struct work_struct work; 2011 unsigned long flags; 2012#define FLUSHING_CACHED_CHARGE (0) 2013}; 2014static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2015static DEFINE_MUTEX(percpu_charge_mutex); 2016 2017/* 2018 * Try to consume stocked charge on this cpu. If success, one page is consumed 2019 * from local stock and true is returned. If the stock is 0 or charges from a 2020 * cgroup which is not current target, returns false. This stock will be 2021 * refilled. 2022 */ 2023static bool consume_stock(struct mem_cgroup *memcg) 2024{ 2025 struct memcg_stock_pcp *stock; 2026 bool ret = true; 2027 2028 stock = &get_cpu_var(memcg_stock); 2029 if (memcg == stock->cached && stock->nr_pages) 2030 stock->nr_pages--; 2031 else /* need to call res_counter_charge */ 2032 ret = false; 2033 put_cpu_var(memcg_stock); 2034 return ret; 2035} 2036 2037/* 2038 * Returns stocks cached in percpu to res_counter and reset cached information. 2039 */ 2040static void drain_stock(struct memcg_stock_pcp *stock) 2041{ 2042 struct mem_cgroup *old = stock->cached; 2043 2044 if (stock->nr_pages) { 2045 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 2046 2047 res_counter_uncharge(&old->res, bytes); 2048 if (do_swap_account) 2049 res_counter_uncharge(&old->memsw, bytes); 2050 stock->nr_pages = 0; 2051 } 2052 stock->cached = NULL; 2053} 2054 2055/* 2056 * This must be called under preempt disabled or must be called by 2057 * a thread which is pinned to local cpu. 2058 */ 2059static void drain_local_stock(struct work_struct *dummy) 2060{ 2061 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 2062 drain_stock(stock); 2063 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2064} 2065 2066/* 2067 * Cache charges(val) which is from res_counter, to local per_cpu area. 2068 * This will be consumed by consume_stock() function, later. 2069 */ 2070static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2071{ 2072 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2073 2074 if (stock->cached != memcg) { /* reset if necessary */ 2075 drain_stock(stock); 2076 stock->cached = memcg; 2077 } 2078 stock->nr_pages += nr_pages; 2079 put_cpu_var(memcg_stock); 2080} 2081 2082/* 2083 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2084 * of the hierarchy under it. sync flag says whether we should block 2085 * until the work is done. 2086 */ 2087static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) 2088{ 2089 int cpu, curcpu; 2090 2091 /* Notify other cpus that system-wide "drain" is running */ 2092 get_online_cpus(); 2093 curcpu = get_cpu(); 2094 for_each_online_cpu(cpu) { 2095 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2096 struct mem_cgroup *memcg; 2097 2098 memcg = stock->cached; 2099 if (!memcg || !stock->nr_pages) 2100 continue; 2101 if (!mem_cgroup_same_or_subtree(root_memcg, memcg)) 2102 continue; 2103 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2104 if (cpu == curcpu) 2105 drain_local_stock(&stock->work); 2106 else 2107 schedule_work_on(cpu, &stock->work); 2108 } 2109 } 2110 put_cpu(); 2111 2112 if (!sync) 2113 goto out; 2114 2115 for_each_online_cpu(cpu) { 2116 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2117 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2118 flush_work(&stock->work); 2119 } 2120out: 2121 put_online_cpus(); 2122} 2123 2124/* 2125 * Tries to drain stocked charges in other cpus. This function is asynchronous 2126 * and just put a work per cpu for draining localy on each cpu. Caller can 2127 * expects some charges will be back to res_counter later but cannot wait for 2128 * it. 2129 */ 2130static void drain_all_stock_async(struct mem_cgroup *root_memcg) 2131{ 2132 /* 2133 * If someone calls draining, avoid adding more kworker runs. 2134 */ 2135 if (!mutex_trylock(&percpu_charge_mutex)) 2136 return; 2137 drain_all_stock(root_memcg, false); 2138 mutex_unlock(&percpu_charge_mutex); 2139} 2140 2141/* This is a synchronous drain interface. */ 2142static void drain_all_stock_sync(struct mem_cgroup *root_memcg) 2143{ 2144 /* called when force_empty is called */ 2145 mutex_lock(&percpu_charge_mutex); 2146 drain_all_stock(root_memcg, true); 2147 mutex_unlock(&percpu_charge_mutex); 2148} 2149 2150/* 2151 * This function drains percpu counter value from DEAD cpu and 2152 * move it to local cpu. Note that this function can be preempted. 2153 */ 2154static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) 2155{ 2156 int i; 2157 2158 spin_lock(&memcg->pcp_counter_lock); 2159 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { 2160 long x = per_cpu(memcg->stat->count[i], cpu); 2161 2162 per_cpu(memcg->stat->count[i], cpu) = 0; 2163 memcg->nocpu_base.count[i] += x; 2164 } 2165 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2166 unsigned long x = per_cpu(memcg->stat->events[i], cpu); 2167 2168 per_cpu(memcg->stat->events[i], cpu) = 0; 2169 memcg->nocpu_base.events[i] += x; 2170 } 2171 /* need to clear ON_MOVE value, works as a kind of lock. */ 2172 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; 2173 spin_unlock(&memcg->pcp_counter_lock); 2174} 2175 2176static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu) 2177{ 2178 int idx = MEM_CGROUP_ON_MOVE; 2179 2180 spin_lock(&memcg->pcp_counter_lock); 2181 per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx]; 2182 spin_unlock(&memcg->pcp_counter_lock); 2183} 2184 2185static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 2186 unsigned long action, 2187 void *hcpu) 2188{ 2189 int cpu = (unsigned long)hcpu; 2190 struct memcg_stock_pcp *stock; 2191 struct mem_cgroup *iter; 2192 2193 if ((action == CPU_ONLINE)) { 2194 for_each_mem_cgroup(iter) 2195 synchronize_mem_cgroup_on_move(iter, cpu); 2196 return NOTIFY_OK; 2197 } 2198 2199 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) 2200 return NOTIFY_OK; 2201 2202 for_each_mem_cgroup(iter) 2203 mem_cgroup_drain_pcp_counter(iter, cpu); 2204 2205 stock = &per_cpu(memcg_stock, cpu); 2206 drain_stock(stock); 2207 return NOTIFY_OK; 2208} 2209 2210 2211/* See __mem_cgroup_try_charge() for details */ 2212enum { 2213 CHARGE_OK, /* success */ 2214 CHARGE_RETRY, /* need to retry but retry is not bad */ 2215 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 2216 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 2217 CHARGE_OOM_DIE, /* the current is killed because of OOM */ 2218}; 2219 2220static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2221 unsigned int nr_pages, bool oom_check) 2222{ 2223 unsigned long csize = nr_pages * PAGE_SIZE; 2224 struct mem_cgroup *mem_over_limit; 2225 struct res_counter *fail_res; 2226 unsigned long flags = 0; 2227 int ret; 2228 2229 ret = res_counter_charge(&memcg->res, csize, &fail_res); 2230 2231 if (likely(!ret)) { 2232 if (!do_swap_account) 2233 return CHARGE_OK; 2234 ret = res_counter_charge(&memcg->memsw, csize, &fail_res); 2235 if (likely(!ret)) 2236 return CHARGE_OK; 2237 2238 res_counter_uncharge(&memcg->res, csize); 2239 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 2240 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 2241 } else 2242 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2243 /* 2244 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch 2245 * of regular pages (CHARGE_BATCH), or a single regular page (1). 2246 * 2247 * Never reclaim on behalf of optional batching, retry with a 2248 * single page instead. 2249 */ 2250 if (nr_pages == CHARGE_BATCH) 2251 return CHARGE_RETRY; 2252 2253 if (!(gfp_mask & __GFP_WAIT)) 2254 return CHARGE_WOULDBLOCK; 2255 2256 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags); 2257 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2258 return CHARGE_RETRY; 2259 /* 2260 * Even though the limit is exceeded at this point, reclaim 2261 * may have been able to free some pages. Retry the charge 2262 * before killing the task. 2263 * 2264 * Only for regular pages, though: huge pages are rather 2265 * unlikely to succeed so close to the limit, and we fall back 2266 * to regular pages anyway in case of failure. 2267 */ 2268 if (nr_pages == 1 && ret) 2269 return CHARGE_RETRY; 2270 2271 /* 2272 * At task move, charge accounts can be doubly counted. So, it's 2273 * better to wait until the end of task_move if something is going on. 2274 */ 2275 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2276 return CHARGE_RETRY; 2277 2278 /* If we don't need to call oom-killer at el, return immediately */ 2279 if (!oom_check) 2280 return CHARGE_NOMEM; 2281 /* check OOM */ 2282 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) 2283 return CHARGE_OOM_DIE; 2284 2285 return CHARGE_RETRY; 2286} 2287 2288/* 2289 * Unlike exported interface, "oom" parameter is added. if oom==true, 2290 * oom-killer can be invoked. 2291 */ 2292static int __mem_cgroup_try_charge(struct mm_struct *mm, 2293 gfp_t gfp_mask, 2294 unsigned int nr_pages, 2295 struct mem_cgroup **ptr, 2296 bool oom) 2297{ 2298 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2299 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2300 struct mem_cgroup *memcg = NULL; 2301 int ret; 2302 2303 /* 2304 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 2305 * in system level. So, allow to go ahead dying process in addition to 2306 * MEMDIE process. 2307 */ 2308 if (unlikely(test_thread_flag(TIF_MEMDIE) 2309 || fatal_signal_pending(current))) 2310 goto bypass; 2311 2312 /* 2313 * We always charge the cgroup the mm_struct belongs to. 2314 * The mm_struct's mem_cgroup changes on task migration if the 2315 * thread group leader migrates. It's possible that mm is not 2316 * set, if so charge the init_mm (happens for pagecache usage). 2317 */ 2318 if (!*ptr && !mm) 2319 goto bypass; 2320again: 2321 if (*ptr) { /* css should be a valid one */ 2322 memcg = *ptr; 2323 VM_BUG_ON(css_is_removed(&memcg->css)); 2324 if (mem_cgroup_is_root(memcg)) 2325 goto done; 2326 if (nr_pages == 1 && consume_stock(memcg)) 2327 goto done; 2328 css_get(&memcg->css); 2329 } else { 2330 struct task_struct *p; 2331 2332 rcu_read_lock(); 2333 p = rcu_dereference(mm->owner); 2334 /* 2335 * Because we don't have task_lock(), "p" can exit. 2336 * In that case, "memcg" can point to root or p can be NULL with 2337 * race with swapoff. Then, we have small risk of mis-accouning. 2338 * But such kind of mis-account by race always happens because 2339 * we don't have cgroup_mutex(). It's overkill and we allo that 2340 * small race, here. 2341 * (*) swapoff at el will charge against mm-struct not against 2342 * task-struct. So, mm->owner can be NULL. 2343 */ 2344 memcg = mem_cgroup_from_task(p); 2345 if (!memcg || mem_cgroup_is_root(memcg)) { 2346 rcu_read_unlock(); 2347 goto done; 2348 } 2349 if (nr_pages == 1 && consume_stock(memcg)) { 2350 /* 2351 * It seems dagerous to access memcg without css_get(). 2352 * But considering how consume_stok works, it's not 2353 * necessary. If consume_stock success, some charges 2354 * from this memcg are cached on this cpu. So, we 2355 * don't need to call css_get()/css_tryget() before 2356 * calling consume_stock(). 2357 */ 2358 rcu_read_unlock(); 2359 goto done; 2360 } 2361 /* after here, we may be blocked. we need to get refcnt */ 2362 if (!css_tryget(&memcg->css)) { 2363 rcu_read_unlock(); 2364 goto again; 2365 } 2366 rcu_read_unlock(); 2367 } 2368 2369 do { 2370 bool oom_check; 2371 2372 /* If killed, bypass charge */ 2373 if (fatal_signal_pending(current)) { 2374 css_put(&memcg->css); 2375 goto bypass; 2376 } 2377 2378 oom_check = false; 2379 if (oom && !nr_oom_retries) { 2380 oom_check = true; 2381 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2382 } 2383 2384 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check); 2385 switch (ret) { 2386 case CHARGE_OK: 2387 break; 2388 case CHARGE_RETRY: /* not in OOM situation but retry */ 2389 batch = nr_pages; 2390 css_put(&memcg->css); 2391 memcg = NULL; 2392 goto again; 2393 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2394 css_put(&memcg->css); 2395 goto nomem; 2396 case CHARGE_NOMEM: /* OOM routine works */ 2397 if (!oom) { 2398 css_put(&memcg->css); 2399 goto nomem; 2400 } 2401 /* If oom, we never return -ENOMEM */ 2402 nr_oom_retries--; 2403 break; 2404 case CHARGE_OOM_DIE: /* Killed by OOM Killer */ 2405 css_put(&memcg->css); 2406 goto bypass; 2407 } 2408 } while (ret != CHARGE_OK); 2409 2410 if (batch > nr_pages) 2411 refill_stock(memcg, batch - nr_pages); 2412 css_put(&memcg->css); 2413done: 2414 *ptr = memcg; 2415 return 0; 2416nomem: 2417 *ptr = NULL; 2418 return -ENOMEM; 2419bypass: 2420 *ptr = NULL; 2421 return 0; 2422} 2423 2424/* 2425 * Somemtimes we have to undo a charge we got by try_charge(). 2426 * This function is for that and do uncharge, put css's refcnt. 2427 * gotten by try_charge(). 2428 */ 2429static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg, 2430 unsigned int nr_pages) 2431{ 2432 if (!mem_cgroup_is_root(memcg)) { 2433 unsigned long bytes = nr_pages * PAGE_SIZE; 2434 2435 res_counter_uncharge(&memcg->res, bytes); 2436 if (do_swap_account) 2437 res_counter_uncharge(&memcg->memsw, bytes); 2438 } 2439} 2440 2441/* 2442 * A helper function to get mem_cgroup from ID. must be called under 2443 * rcu_read_lock(). The caller must check css_is_removed() or some if 2444 * it's concern. (dropping refcnt from swap can be called against removed 2445 * memcg.) 2446 */ 2447static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2448{ 2449 struct cgroup_subsys_state *css; 2450 2451 /* ID 0 is unused ID */ 2452 if (!id) 2453 return NULL; 2454 css = css_lookup(&mem_cgroup_subsys, id); 2455 if (!css) 2456 return NULL; 2457 return container_of(css, struct mem_cgroup, css); 2458} 2459 2460struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2461{ 2462 struct mem_cgroup *memcg = NULL; 2463 struct page_cgroup *pc; 2464 unsigned short id; 2465 swp_entry_t ent; 2466 2467 VM_BUG_ON(!PageLocked(page)); 2468 2469 pc = lookup_page_cgroup(page); 2470 lock_page_cgroup(pc); 2471 if (PageCgroupUsed(pc)) { 2472 memcg = pc->mem_cgroup; 2473 if (memcg && !css_tryget(&memcg->css)) 2474 memcg = NULL; 2475 } else if (PageSwapCache(page)) { 2476 ent.val = page_private(page); 2477 id = lookup_swap_cgroup(ent); 2478 rcu_read_lock(); 2479 memcg = mem_cgroup_lookup(id); 2480 if (memcg && !css_tryget(&memcg->css)) 2481 memcg = NULL; 2482 rcu_read_unlock(); 2483 } 2484 unlock_page_cgroup(pc); 2485 return memcg; 2486} 2487 2488static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, 2489 struct page *page, 2490 unsigned int nr_pages, 2491 struct page_cgroup *pc, 2492 enum charge_type ctype) 2493{ 2494 lock_page_cgroup(pc); 2495 if (unlikely(PageCgroupUsed(pc))) { 2496 unlock_page_cgroup(pc); 2497 __mem_cgroup_cancel_charge(memcg, nr_pages); 2498 return; 2499 } 2500 /* 2501 * we don't need page_cgroup_lock about tail pages, becase they are not 2502 * accessed by any other context at this point. 2503 */ 2504 pc->mem_cgroup = memcg; 2505 /* 2506 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2507 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2508 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2509 * before USED bit, we need memory barrier here. 2510 * See mem_cgroup_add_lru_list(), etc. 2511 */ 2512 smp_wmb(); 2513 switch (ctype) { 2514 case MEM_CGROUP_CHARGE_TYPE_CACHE: 2515 case MEM_CGROUP_CHARGE_TYPE_SHMEM: 2516 SetPageCgroupCache(pc); 2517 SetPageCgroupUsed(pc); 2518 break; 2519 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2520 ClearPageCgroupCache(pc); 2521 SetPageCgroupUsed(pc); 2522 break; 2523 default: 2524 break; 2525 } 2526 2527 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); 2528 unlock_page_cgroup(pc); 2529 /* 2530 * "charge_statistics" updated event counter. Then, check it. 2531 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2532 * if they exceeds softlimit. 2533 */ 2534 memcg_check_events(memcg, page); 2535} 2536 2537#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2538 2539#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ 2540 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) 2541/* 2542 * Because tail pages are not marked as "used", set it. We're under 2543 * zone->lru_lock, 'splitting on pmd' and compound_lock. 2544 * charge/uncharge will be never happen and move_account() is done under 2545 * compound_lock(), so we don't have to take care of races. 2546 */ 2547void mem_cgroup_split_huge_fixup(struct page *head) 2548{ 2549 struct page_cgroup *head_pc = lookup_page_cgroup(head); 2550 struct page_cgroup *pc; 2551 int i; 2552 2553 if (mem_cgroup_disabled()) 2554 return; 2555 for (i = 1; i < HPAGE_PMD_NR; i++) { 2556 pc = head_pc + i; 2557 pc->mem_cgroup = head_pc->mem_cgroup; 2558 smp_wmb();/* see __commit_charge() */ 2559 /* 2560 * LRU flags cannot be copied because we need to add tail 2561 * page to LRU by generic call and our hooks will be called. 2562 */ 2563 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 2564 } 2565 2566 if (PageCgroupAcctLRU(head_pc)) { 2567 enum lru_list lru; 2568 struct mem_cgroup_per_zone *mz; 2569 /* 2570 * We hold lru_lock, then, reduce counter directly. 2571 */ 2572 lru = page_lru(head); 2573 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); 2574 MEM_CGROUP_ZSTAT(mz, lru) -= HPAGE_PMD_NR - 1; 2575 } 2576} 2577#endif 2578 2579/** 2580 * mem_cgroup_move_account - move account of the page 2581 * @page: the page 2582 * @nr_pages: number of regular pages (>1 for huge pages) 2583 * @pc: page_cgroup of the page. 2584 * @from: mem_cgroup which the page is moved from. 2585 * @to: mem_cgroup which the page is moved to. @from != @to. 2586 * @uncharge: whether we should call uncharge and css_put against @from. 2587 * 2588 * The caller must confirm following. 2589 * - page is not on LRU (isolate_page() is useful.) 2590 * - compound_lock is held when nr_pages > 1 2591 * 2592 * This function doesn't do "charge" nor css_get to new cgroup. It should be 2593 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is 2594 * true, this function does "uncharge" from old cgroup, but it doesn't if 2595 * @uncharge is false, so a caller should do "uncharge". 2596 */ 2597static int mem_cgroup_move_account(struct page *page, 2598 unsigned int nr_pages, 2599 struct page_cgroup *pc, 2600 struct mem_cgroup *from, 2601 struct mem_cgroup *to, 2602 bool uncharge) 2603{ 2604 unsigned long flags; 2605 int ret; 2606 2607 VM_BUG_ON(from == to); 2608 VM_BUG_ON(PageLRU(page)); 2609 /* 2610 * The page is isolated from LRU. So, collapse function 2611 * will not handle this page. But page splitting can happen. 2612 * Do this check under compound_page_lock(). The caller should 2613 * hold it. 2614 */ 2615 ret = -EBUSY; 2616 if (nr_pages > 1 && !PageTransHuge(page)) 2617 goto out; 2618 2619 lock_page_cgroup(pc); 2620 2621 ret = -EINVAL; 2622 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 2623 goto unlock; 2624 2625 move_lock_page_cgroup(pc, &flags); 2626 2627 if (PageCgroupFileMapped(pc)) { 2628 /* Update mapped_file data for mem_cgroup */ 2629 preempt_disable(); 2630 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2631 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2632 preempt_enable(); 2633 } 2634 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); 2635 if (uncharge) 2636 /* This is not "cancel", but cancel_charge does all we need. */ 2637 __mem_cgroup_cancel_charge(from, nr_pages); 2638 2639 /* caller should have done css_get */ 2640 pc->mem_cgroup = to; 2641 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); 2642 /* 2643 * We charges against "to" which may not have any tasks. Then, "to" 2644 * can be under rmdir(). But in current implementation, caller of 2645 * this function is just force_empty() and move charge, so it's 2646 * guaranteed that "to" is never removed. So, we don't check rmdir 2647 * status here. 2648 */ 2649 move_unlock_page_cgroup(pc, &flags); 2650 ret = 0; 2651unlock: 2652 unlock_page_cgroup(pc); 2653 /* 2654 * check events 2655 */ 2656 memcg_check_events(to, page); 2657 memcg_check_events(from, page); 2658out: 2659 return ret; 2660} 2661 2662/* 2663 * move charges to its parent. 2664 */ 2665 2666static int mem_cgroup_move_parent(struct page *page, 2667 struct page_cgroup *pc, 2668 struct mem_cgroup *child, 2669 gfp_t gfp_mask) 2670{ 2671 struct cgroup *cg = child->css.cgroup; 2672 struct cgroup *pcg = cg->parent; 2673 struct mem_cgroup *parent; 2674 unsigned int nr_pages; 2675 unsigned long uninitialized_var(flags); 2676 int ret; 2677 2678 /* Is ROOT ? */ 2679 if (!pcg) 2680 return -EINVAL; 2681 2682 ret = -EBUSY; 2683 if (!get_page_unless_zero(page)) 2684 goto out; 2685 if (isolate_lru_page(page)) 2686 goto put; 2687 2688 nr_pages = hpage_nr_pages(page); 2689 2690 parent = mem_cgroup_from_cont(pcg); 2691 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); 2692 if (ret || !parent) 2693 goto put_back; 2694 2695 if (nr_pages > 1) 2696 flags = compound_lock_irqsave(page); 2697 2698 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); 2699 if (ret) 2700 __mem_cgroup_cancel_charge(parent, nr_pages); 2701 2702 if (nr_pages > 1) 2703 compound_unlock_irqrestore(page, flags); 2704put_back: 2705 putback_lru_page(page); 2706put: 2707 put_page(page); 2708out: 2709 return ret; 2710} 2711 2712/* 2713 * Charge the memory controller for page usage. 2714 * Return 2715 * 0 if the charge was successful 2716 * < 0 if the cgroup is over its limit 2717 */ 2718static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 2719 gfp_t gfp_mask, enum charge_type ctype) 2720{ 2721 struct mem_cgroup *memcg = NULL; 2722 unsigned int nr_pages = 1; 2723 struct page_cgroup *pc; 2724 bool oom = true; 2725 int ret; 2726 2727 if (PageTransHuge(page)) { 2728 nr_pages <<= compound_order(page); 2729 VM_BUG_ON(!PageTransHuge(page)); 2730 /* 2731 * Never OOM-kill a process for a huge page. The 2732 * fault handler will fall back to regular pages. 2733 */ 2734 oom = false; 2735 } 2736 2737 pc = lookup_page_cgroup(page); 2738 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 2739 if (ret || !memcg) 2740 return ret; 2741 2742 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); 2743 return 0; 2744} 2745 2746int mem_cgroup_newpage_charge(struct page *page, 2747 struct mm_struct *mm, gfp_t gfp_mask) 2748{ 2749 if (mem_cgroup_disabled()) 2750 return 0; 2751 VM_BUG_ON(page_mapped(page)); 2752 VM_BUG_ON(page->mapping && !PageAnon(page)); 2753 VM_BUG_ON(!mm); 2754 return mem_cgroup_charge_common(page, mm, gfp_mask, 2755 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2756} 2757 2758static void 2759__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2760 enum charge_type ctype); 2761 2762static void 2763__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg, 2764 enum charge_type ctype) 2765{ 2766 struct page_cgroup *pc = lookup_page_cgroup(page); 2767 /* 2768 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page 2769 * is already on LRU. It means the page may on some other page_cgroup's 2770 * LRU. Take care of it. 2771 */ 2772 mem_cgroup_lru_del_before_commit(page); 2773 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); 2774 mem_cgroup_lru_add_after_commit(page); 2775 return; 2776} 2777 2778int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2779 gfp_t gfp_mask) 2780{ 2781 struct mem_cgroup *memcg = NULL; 2782 int ret; 2783 2784 if (mem_cgroup_disabled()) 2785 return 0; 2786 if (PageCompound(page)) 2787 return 0; 2788 2789 if (unlikely(!mm)) 2790 mm = &init_mm; 2791 2792 if (page_is_file_cache(page)) { 2793 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &memcg, true); 2794 if (ret || !memcg) 2795 return ret; 2796 2797 /* 2798 * FUSE reuses pages without going through the final 2799 * put that would remove them from the LRU list, make 2800 * sure that they get relinked properly. 2801 */ 2802 __mem_cgroup_commit_charge_lrucare(page, memcg, 2803 MEM_CGROUP_CHARGE_TYPE_CACHE); 2804 return ret; 2805 } 2806 /* shmem */ 2807 if (PageSwapCache(page)) { 2808 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg); 2809 if (!ret) 2810 __mem_cgroup_commit_charge_swapin(page, memcg, 2811 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2812 } else 2813 ret = mem_cgroup_charge_common(page, mm, gfp_mask, 2814 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2815 2816 return ret; 2817} 2818 2819/* 2820 * While swap-in, try_charge -> commit or cancel, the page is locked. 2821 * And when try_charge() successfully returns, one refcnt to memcg without 2822 * struct page_cgroup is acquired. This refcnt will be consumed by 2823 * "commit()" or removed by "cancel()" 2824 */ 2825int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 2826 struct page *page, 2827 gfp_t mask, struct mem_cgroup **memcgp) 2828{ 2829 struct mem_cgroup *memcg; 2830 int ret; 2831 2832 *memcgp = NULL; 2833 2834 if (mem_cgroup_disabled()) 2835 return 0; 2836 2837 if (!do_swap_account) 2838 goto charge_cur_mm; 2839 /* 2840 * A racing thread's fault, or swapoff, may have already updated 2841 * the pte, and even removed page from swap cache: in those cases 2842 * do_swap_page()'s pte_same() test will fail; but there's also a 2843 * KSM case which does need to charge the page. 2844 */ 2845 if (!PageSwapCache(page)) 2846 goto charge_cur_mm; 2847 memcg = try_get_mem_cgroup_from_page(page); 2848 if (!memcg) 2849 goto charge_cur_mm; 2850 *memcgp = memcg; 2851 ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true); 2852 css_put(&memcg->css); 2853 return ret; 2854charge_cur_mm: 2855 if (unlikely(!mm)) 2856 mm = &init_mm; 2857 return __mem_cgroup_try_charge(mm, mask, 1, memcgp, true); 2858} 2859 2860static void 2861__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 2862 enum charge_type ctype) 2863{ 2864 if (mem_cgroup_disabled()) 2865 return; 2866 if (!memcg) 2867 return; 2868 cgroup_exclude_rmdir(&memcg->css); 2869 2870 __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); 2871 /* 2872 * Now swap is on-memory. This means this page may be 2873 * counted both as mem and swap....double count. 2874 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 2875 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 2876 * may call delete_from_swap_cache() before reach here. 2877 */ 2878 if (do_swap_account && PageSwapCache(page)) { 2879 swp_entry_t ent = {.val = page_private(page)}; 2880 struct mem_cgroup *swap_memcg; 2881 unsigned short id; 2882 2883 id = swap_cgroup_record(ent, 0); 2884 rcu_read_lock(); 2885 swap_memcg = mem_cgroup_lookup(id); 2886 if (swap_memcg) { 2887 /* 2888 * This recorded memcg can be obsolete one. So, avoid 2889 * calling css_tryget 2890 */ 2891 if (!mem_cgroup_is_root(swap_memcg)) 2892 res_counter_uncharge(&swap_memcg->memsw, 2893 PAGE_SIZE); 2894 mem_cgroup_swap_statistics(swap_memcg, false); 2895 mem_cgroup_put(swap_memcg); 2896 } 2897 rcu_read_unlock(); 2898 } 2899 /* 2900 * At swapin, we may charge account against cgroup which has no tasks. 2901 * So, rmdir()->pre_destroy() can be called while we do this charge. 2902 * In that case, we need to call pre_destroy() again. check it here. 2903 */ 2904 cgroup_release_and_wakeup_rmdir(&memcg->css); 2905} 2906 2907void mem_cgroup_commit_charge_swapin(struct page *page, 2908 struct mem_cgroup *memcg) 2909{ 2910 __mem_cgroup_commit_charge_swapin(page, memcg, 2911 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2912} 2913 2914void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 2915{ 2916 if (mem_cgroup_disabled()) 2917 return; 2918 if (!memcg) 2919 return; 2920 __mem_cgroup_cancel_charge(memcg, 1); 2921} 2922 2923static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg, 2924 unsigned int nr_pages, 2925 const enum charge_type ctype) 2926{ 2927 struct memcg_batch_info *batch = NULL; 2928 bool uncharge_memsw = true; 2929 2930 /* If swapout, usage of swap doesn't decrease */ 2931 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2932 uncharge_memsw = false; 2933 2934 batch = ¤t->memcg_batch; 2935 /* 2936 * In usual, we do css_get() when we remember memcg pointer. 2937 * But in this case, we keep res->usage until end of a series of 2938 * uncharges. Then, it's ok to ignore memcg's refcnt. 2939 */ 2940 if (!batch->memcg) 2941 batch->memcg = memcg; 2942 /* 2943 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 2944 * In those cases, all pages freed continuously can be expected to be in 2945 * the same cgroup and we have chance to coalesce uncharges. 2946 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 2947 * because we want to do uncharge as soon as possible. 2948 */ 2949 2950 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 2951 goto direct_uncharge; 2952 2953 if (nr_pages > 1) 2954 goto direct_uncharge; 2955 2956 /* 2957 * In typical case, batch->memcg == mem. This means we can 2958 * merge a series of uncharges to an uncharge of res_counter. 2959 * If not, we uncharge res_counter ony by one. 2960 */ 2961 if (batch->memcg != memcg) 2962 goto direct_uncharge; 2963 /* remember freed charge and uncharge it later */ 2964 batch->nr_pages++; 2965 if (uncharge_memsw) 2966 batch->memsw_nr_pages++; 2967 return; 2968direct_uncharge: 2969 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE); 2970 if (uncharge_memsw) 2971 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE); 2972 if (unlikely(batch->memcg != memcg)) 2973 memcg_oom_recover(memcg); 2974 return; 2975} 2976 2977/* 2978 * uncharge if !page_mapped(page) 2979 */ 2980static struct mem_cgroup * 2981__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 2982{ 2983 struct mem_cgroup *memcg = NULL; 2984 unsigned int nr_pages = 1; 2985 struct page_cgroup *pc; 2986 2987 if (mem_cgroup_disabled()) 2988 return NULL; 2989 2990 if (PageSwapCache(page)) 2991 return NULL; 2992 2993 if (PageTransHuge(page)) { 2994 nr_pages <<= compound_order(page); 2995 VM_BUG_ON(!PageTransHuge(page)); 2996 } 2997 /* 2998 * Check if our page_cgroup is valid 2999 */ 3000 pc = lookup_page_cgroup(page); 3001 if (unlikely(!PageCgroupUsed(pc))) 3002 return NULL; 3003 3004 lock_page_cgroup(pc); 3005 3006 memcg = pc->mem_cgroup; 3007 3008 if (!PageCgroupUsed(pc)) 3009 goto unlock_out; 3010 3011 switch (ctype) { 3012 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 3013 case MEM_CGROUP_CHARGE_TYPE_DROP: 3014 /* See mem_cgroup_prepare_migration() */ 3015 if (page_mapped(page) || PageCgroupMigration(pc)) 3016 goto unlock_out; 3017 break; 3018 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 3019 if (!PageAnon(page)) { /* Shared memory */ 3020 if (page->mapping && !page_is_file_cache(page)) 3021 goto unlock_out; 3022 } else if (page_mapped(page)) /* Anon */ 3023 goto unlock_out; 3024 break; 3025 default: 3026 break; 3027 } 3028 3029 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages); 3030 3031 ClearPageCgroupUsed(pc); 3032 /* 3033 * pc->mem_cgroup is not cleared here. It will be accessed when it's 3034 * freed from LRU. This is safe because uncharged page is expected not 3035 * to be reused (freed soon). Exception is SwapCache, it's handled by 3036 * special functions. 3037 */ 3038 3039 unlock_page_cgroup(pc); 3040 /* 3041 * even after unlock, we have memcg->res.usage here and this memcg 3042 * will never be freed. 3043 */ 3044 memcg_check_events(memcg, page); 3045 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 3046 mem_cgroup_swap_statistics(memcg, true); 3047 mem_cgroup_get(memcg); 3048 } 3049 if (!mem_cgroup_is_root(memcg)) 3050 mem_cgroup_do_uncharge(memcg, nr_pages, ctype); 3051 3052 return memcg; 3053 3054unlock_out: 3055 unlock_page_cgroup(pc); 3056 return NULL; 3057} 3058 3059void mem_cgroup_uncharge_page(struct page *page) 3060{ 3061 /* early check. */ 3062 if (page_mapped(page)) 3063 return; 3064 if (page->mapping && !PageAnon(page)) 3065 return; 3066 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); 3067} 3068 3069void mem_cgroup_uncharge_cache_page(struct page *page) 3070{ 3071 VM_BUG_ON(page_mapped(page)); 3072 VM_BUG_ON(page->mapping); 3073 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 3074} 3075 3076/* 3077 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 3078 * In that cases, pages are freed continuously and we can expect pages 3079 * are in the same memcg. All these calls itself limits the number of 3080 * pages freed at once, then uncharge_start/end() is called properly. 3081 * This may be called prural(2) times in a context, 3082 */ 3083 3084void mem_cgroup_uncharge_start(void) 3085{ 3086 current->memcg_batch.do_batch++; 3087 /* We can do nest. */ 3088 if (current->memcg_batch.do_batch == 1) { 3089 current->memcg_batch.memcg = NULL; 3090 current->memcg_batch.nr_pages = 0; 3091 current->memcg_batch.memsw_nr_pages = 0; 3092 } 3093} 3094 3095void mem_cgroup_uncharge_end(void) 3096{ 3097 struct memcg_batch_info *batch = ¤t->memcg_batch; 3098 3099 if (!batch->do_batch) 3100 return; 3101 3102 batch->do_batch--; 3103 if (batch->do_batch) /* If stacked, do nothing. */ 3104 return; 3105 3106 if (!batch->memcg) 3107 return; 3108 /* 3109 * This "batch->memcg" is valid without any css_get/put etc... 3110 * bacause we hide charges behind us. 3111 */ 3112 if (batch->nr_pages) 3113 res_counter_uncharge(&batch->memcg->res, 3114 batch->nr_pages * PAGE_SIZE); 3115 if (batch->memsw_nr_pages) 3116 res_counter_uncharge(&batch->memcg->memsw, 3117 batch->memsw_nr_pages * PAGE_SIZE); 3118 memcg_oom_recover(batch->memcg); 3119 /* forget this pointer (for sanity check) */ 3120 batch->memcg = NULL; 3121} 3122 3123#ifdef CONFIG_SWAP 3124/* 3125 * called after __delete_from_swap_cache() and drop "page" account. 3126 * memcg information is recorded to swap_cgroup of "ent" 3127 */ 3128void 3129mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 3130{ 3131 struct mem_cgroup *memcg; 3132 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 3133 3134 if (!swapout) /* this was a swap cache but the swap is unused ! */ 3135 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 3136 3137 memcg = __mem_cgroup_uncharge_common(page, ctype); 3138 3139 /* 3140 * record memcg information, if swapout && memcg != NULL, 3141 * mem_cgroup_get() was called in uncharge(). 3142 */ 3143 if (do_swap_account && swapout && memcg) 3144 swap_cgroup_record(ent, css_id(&memcg->css)); 3145} 3146#endif 3147 3148#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 3149/* 3150 * called from swap_entry_free(). remove record in swap_cgroup and 3151 * uncharge "memsw" account. 3152 */ 3153void mem_cgroup_uncharge_swap(swp_entry_t ent) 3154{ 3155 struct mem_cgroup *memcg; 3156 unsigned short id; 3157 3158 if (!do_swap_account) 3159 return; 3160 3161 id = swap_cgroup_record(ent, 0); 3162 rcu_read_lock(); 3163 memcg = mem_cgroup_lookup(id); 3164 if (memcg) { 3165 /* 3166 * We uncharge this because swap is freed. 3167 * This memcg can be obsolete one. We avoid calling css_tryget 3168 */ 3169 if (!mem_cgroup_is_root(memcg)) 3170 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 3171 mem_cgroup_swap_statistics(memcg, false); 3172 mem_cgroup_put(memcg); 3173 } 3174 rcu_read_unlock(); 3175} 3176 3177/** 3178 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3179 * @entry: swap entry to be moved 3180 * @from: mem_cgroup which the entry is moved from 3181 * @to: mem_cgroup which the entry is moved to 3182 * @need_fixup: whether we should fixup res_counters and refcounts. 3183 * 3184 * It succeeds only when the swap_cgroup's record for this entry is the same 3185 * as the mem_cgroup's id of @from. 3186 * 3187 * Returns 0 on success, -EINVAL on failure. 3188 * 3189 * The caller must have charged to @to, IOW, called res_counter_charge() about 3190 * both res and memsw, and called css_get(). 3191 */ 3192static int mem_cgroup_move_swap_account(swp_entry_t entry, 3193 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3194{ 3195 unsigned short old_id, new_id; 3196 3197 old_id = css_id(&from->css); 3198 new_id = css_id(&to->css); 3199 3200 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3201 mem_cgroup_swap_statistics(from, false); 3202 mem_cgroup_swap_statistics(to, true); 3203 /* 3204 * This function is only called from task migration context now. 3205 * It postpones res_counter and refcount handling till the end 3206 * of task migration(mem_cgroup_clear_mc()) for performance 3207 * improvement. But we cannot postpone mem_cgroup_get(to) 3208 * because if the process that has been moved to @to does 3209 * swap-in, the refcount of @to might be decreased to 0. 3210 */ 3211 mem_cgroup_get(to); 3212 if (need_fixup) { 3213 if (!mem_cgroup_is_root(from)) 3214 res_counter_uncharge(&from->memsw, PAGE_SIZE); 3215 mem_cgroup_put(from); 3216 /* 3217 * we charged both to->res and to->memsw, so we should 3218 * uncharge to->res. 3219 */ 3220 if (!mem_cgroup_is_root(to)) 3221 res_counter_uncharge(&to->res, PAGE_SIZE); 3222 } 3223 return 0; 3224 } 3225 return -EINVAL; 3226} 3227#else 3228static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3229 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3230{ 3231 return -EINVAL; 3232} 3233#endif 3234 3235/* 3236 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 3237 * page belongs to. 3238 */ 3239int mem_cgroup_prepare_migration(struct page *page, 3240 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask) 3241{ 3242 struct mem_cgroup *memcg = NULL; 3243 struct page_cgroup *pc; 3244 enum charge_type ctype; 3245 int ret = 0; 3246 3247 *memcgp = NULL; 3248 3249 VM_BUG_ON(PageTransHuge(page)); 3250 if (mem_cgroup_disabled()) 3251 return 0; 3252 3253 pc = lookup_page_cgroup(page); 3254 lock_page_cgroup(pc); 3255 if (PageCgroupUsed(pc)) { 3256 memcg = pc->mem_cgroup; 3257 css_get(&memcg->css); 3258 /* 3259 * At migrating an anonymous page, its mapcount goes down 3260 * to 0 and uncharge() will be called. But, even if it's fully 3261 * unmapped, migration may fail and this page has to be 3262 * charged again. We set MIGRATION flag here and delay uncharge 3263 * until end_migration() is called 3264 * 3265 * Corner Case Thinking 3266 * A) 3267 * When the old page was mapped as Anon and it's unmap-and-freed 3268 * while migration was ongoing. 3269 * If unmap finds the old page, uncharge() of it will be delayed 3270 * until end_migration(). If unmap finds a new page, it's 3271 * uncharged when it make mapcount to be 1->0. If unmap code 3272 * finds swap_migration_entry, the new page will not be mapped 3273 * and end_migration() will find it(mapcount==0). 3274 * 3275 * B) 3276 * When the old page was mapped but migraion fails, the kernel 3277 * remaps it. A charge for it is kept by MIGRATION flag even 3278 * if mapcount goes down to 0. We can do remap successfully 3279 * without charging it again. 3280 * 3281 * C) 3282 * The "old" page is under lock_page() until the end of 3283 * migration, so, the old page itself will not be swapped-out. 3284 * If the new page is swapped out before end_migraton, our 3285 * hook to usual swap-out path will catch the event. 3286 */ 3287 if (PageAnon(page)) 3288 SetPageCgroupMigration(pc); 3289 } 3290 unlock_page_cgroup(pc); 3291 /* 3292 * If the page is not charged at this point, 3293 * we return here. 3294 */ 3295 if (!memcg) 3296 return 0; 3297 3298 *memcgp = memcg; 3299 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false); 3300 css_put(&memcg->css);/* drop extra refcnt */ 3301 if (ret || *memcgp == NULL) { 3302 if (PageAnon(page)) { 3303 lock_page_cgroup(pc); 3304 ClearPageCgroupMigration(pc); 3305 unlock_page_cgroup(pc); 3306 /* 3307 * The old page may be fully unmapped while we kept it. 3308 */ 3309 mem_cgroup_uncharge_page(page); 3310 } 3311 return -ENOMEM; 3312 } 3313 /* 3314 * We charge new page before it's used/mapped. So, even if unlock_page() 3315 * is called before end_migration, we can catch all events on this new 3316 * page. In the case new page is migrated but not remapped, new page's 3317 * mapcount will be finally 0 and we call uncharge in end_migration(). 3318 */ 3319 pc = lookup_page_cgroup(newpage); 3320 if (PageAnon(page)) 3321 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 3322 else if (page_is_file_cache(page)) 3323 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3324 else 3325 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3326 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); 3327 return ret; 3328} 3329 3330/* remove redundant charge if migration failed*/ 3331void mem_cgroup_end_migration(struct mem_cgroup *memcg, 3332 struct page *oldpage, struct page *newpage, bool migration_ok) 3333{ 3334 struct page *used, *unused; 3335 struct page_cgroup *pc; 3336 3337 if (!memcg) 3338 return; 3339 /* blocks rmdir() */ 3340 cgroup_exclude_rmdir(&memcg->css); 3341 if (!migration_ok) { 3342 used = oldpage; 3343 unused = newpage; 3344 } else { 3345 used = newpage; 3346 unused = oldpage; 3347 } 3348 /* 3349 * We disallowed uncharge of pages under migration because mapcount 3350 * of the page goes down to zero, temporarly. 3351 * Clear the flag and check the page should be charged. 3352 */ 3353 pc = lookup_page_cgroup(oldpage); 3354 lock_page_cgroup(pc); 3355 ClearPageCgroupMigration(pc); 3356 unlock_page_cgroup(pc); 3357 3358 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); 3359 3360 /* 3361 * If a page is a file cache, radix-tree replacement is very atomic 3362 * and we can skip this check. When it was an Anon page, its mapcount 3363 * goes down to 0. But because we added MIGRATION flage, it's not 3364 * uncharged yet. There are several case but page->mapcount check 3365 * and USED bit check in mem_cgroup_uncharge_page() will do enough 3366 * check. (see prepare_charge() also) 3367 */ 3368 if (PageAnon(used)) 3369 mem_cgroup_uncharge_page(used); 3370 /* 3371 * At migration, we may charge account against cgroup which has no 3372 * tasks. 3373 * So, rmdir()->pre_destroy() can be called while we do this charge. 3374 * In that case, we need to call pre_destroy() again. check it here. 3375 */ 3376 cgroup_release_and_wakeup_rmdir(&memcg->css); 3377} 3378 3379/* 3380 * At replace page cache, newpage is not under any memcg but it's on 3381 * LRU. So, this function doesn't touch res_counter but handles LRU 3382 * in correct way. Both pages are locked so we cannot race with uncharge. 3383 */ 3384void mem_cgroup_replace_page_cache(struct page *oldpage, 3385 struct page *newpage) 3386{ 3387 struct mem_cgroup *memcg; 3388 struct page_cgroup *pc; 3389 struct zone *zone; 3390 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 3391 unsigned long flags; 3392 3393 if (mem_cgroup_disabled()) 3394 return; 3395 3396 pc = lookup_page_cgroup(oldpage); 3397 /* fix accounting on old pages */ 3398 lock_page_cgroup(pc); 3399 memcg = pc->mem_cgroup; 3400 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1); 3401 ClearPageCgroupUsed(pc); 3402 unlock_page_cgroup(pc); 3403 3404 if (PageSwapBacked(oldpage)) 3405 type = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3406 3407 zone = page_zone(newpage); 3408 pc = lookup_page_cgroup(newpage); 3409 /* 3410 * Even if newpage->mapping was NULL before starting replacement, 3411 * the newpage may be on LRU(or pagevec for LRU) already. We lock 3412 * LRU while we overwrite pc->mem_cgroup. 3413 */ 3414 spin_lock_irqsave(&zone->lru_lock, flags); 3415 if (PageLRU(newpage)) 3416 del_page_from_lru_list(zone, newpage, page_lru(newpage)); 3417 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type); 3418 if (PageLRU(newpage)) 3419 add_page_to_lru_list(zone, newpage, page_lru(newpage)); 3420 spin_unlock_irqrestore(&zone->lru_lock, flags); 3421} 3422 3423#ifdef CONFIG_DEBUG_VM 3424static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 3425{ 3426 struct page_cgroup *pc; 3427 3428 pc = lookup_page_cgroup(page); 3429 /* 3430 * Can be NULL while feeding pages into the page allocator for 3431 * the first time, i.e. during boot or memory hotplug; 3432 * or when mem_cgroup_disabled(). 3433 */ 3434 if (likely(pc) && PageCgroupUsed(pc)) 3435 return pc; 3436 return NULL; 3437} 3438 3439bool mem_cgroup_bad_page_check(struct page *page) 3440{ 3441 if (mem_cgroup_disabled()) 3442 return false; 3443 3444 return lookup_page_cgroup_used(page) != NULL; 3445} 3446 3447void mem_cgroup_print_bad_page(struct page *page) 3448{ 3449 struct page_cgroup *pc; 3450 3451 pc = lookup_page_cgroup_used(page); 3452 if (pc) { 3453 int ret = -1; 3454 char *path; 3455 3456 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", 3457 pc, pc->flags, pc->mem_cgroup); 3458 3459 path = kmalloc(PATH_MAX, GFP_KERNEL); 3460 if (path) { 3461 rcu_read_lock(); 3462 ret = cgroup_path(pc->mem_cgroup->css.cgroup, 3463 path, PATH_MAX); 3464 rcu_read_unlock(); 3465 } 3466 3467 printk(KERN_CONT "(%s)\n", 3468 (ret < 0) ? "cannot get the path" : path); 3469 kfree(path); 3470 } 3471} 3472#endif 3473 3474static DEFINE_MUTEX(set_limit_mutex); 3475 3476static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 3477 unsigned long long val) 3478{ 3479 int retry_count; 3480 u64 memswlimit, memlimit; 3481 int ret = 0; 3482 int children = mem_cgroup_count_children(memcg); 3483 u64 curusage, oldusage; 3484 int enlarge; 3485 3486 /* 3487 * For keeping hierarchical_reclaim simple, how long we should retry 3488 * is depends on callers. We set our retry-count to be function 3489 * of # of children which we should visit in this loop. 3490 */ 3491 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 3492 3493 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3494 3495 enlarge = 0; 3496 while (retry_count) { 3497 if (signal_pending(current)) { 3498 ret = -EINTR; 3499 break; 3500 } 3501 /* 3502 * Rather than hide all in some function, I do this in 3503 * open coded manner. You see what this really does. 3504 * We have to guarantee memcg->res.limit < memcg->memsw.limit. 3505 */ 3506 mutex_lock(&set_limit_mutex); 3507 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3508 if (memswlimit < val) { 3509 ret = -EINVAL; 3510 mutex_unlock(&set_limit_mutex); 3511 break; 3512 } 3513 3514 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3515 if (memlimit < val) 3516 enlarge = 1; 3517 3518 ret = res_counter_set_limit(&memcg->res, val); 3519 if (!ret) { 3520 if (memswlimit == val) 3521 memcg->memsw_is_minimum = true; 3522 else 3523 memcg->memsw_is_minimum = false; 3524 } 3525 mutex_unlock(&set_limit_mutex); 3526 3527 if (!ret) 3528 break; 3529 3530 mem_cgroup_reclaim(memcg, GFP_KERNEL, 3531 MEM_CGROUP_RECLAIM_SHRINK); 3532 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3533 /* Usage is reduced ? */ 3534 if (curusage >= oldusage) 3535 retry_count--; 3536 else 3537 oldusage = curusage; 3538 } 3539 if (!ret && enlarge) 3540 memcg_oom_recover(memcg); 3541 3542 return ret; 3543} 3544 3545static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 3546 unsigned long long val) 3547{ 3548 int retry_count; 3549 u64 memlimit, memswlimit, oldusage, curusage; 3550 int children = mem_cgroup_count_children(memcg); 3551 int ret = -EBUSY; 3552 int enlarge = 0; 3553 3554 /* see mem_cgroup_resize_res_limit */ 3555 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 3556 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3557 while (retry_count) { 3558 if (signal_pending(current)) { 3559 ret = -EINTR; 3560 break; 3561 } 3562 /* 3563 * Rather than hide all in some function, I do this in 3564 * open coded manner. You see what this really does. 3565 * We have to guarantee memcg->res.limit < memcg->memsw.limit. 3566 */ 3567 mutex_lock(&set_limit_mutex); 3568 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3569 if (memlimit > val) { 3570 ret = -EINVAL; 3571 mutex_unlock(&set_limit_mutex); 3572 break; 3573 } 3574 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3575 if (memswlimit < val) 3576 enlarge = 1; 3577 ret = res_counter_set_limit(&memcg->memsw, val); 3578 if (!ret) { 3579 if (memlimit == val) 3580 memcg->memsw_is_minimum = true; 3581 else 3582 memcg->memsw_is_minimum = false; 3583 } 3584 mutex_unlock(&set_limit_mutex); 3585 3586 if (!ret) 3587 break; 3588 3589 mem_cgroup_reclaim(memcg, GFP_KERNEL, 3590 MEM_CGROUP_RECLAIM_NOSWAP | 3591 MEM_CGROUP_RECLAIM_SHRINK); 3592 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3593 /* Usage is reduced ? */ 3594 if (curusage >= oldusage) 3595 retry_count--; 3596 else 3597 oldusage = curusage; 3598 } 3599 if (!ret && enlarge) 3600 memcg_oom_recover(memcg); 3601 return ret; 3602} 3603 3604unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 3605 gfp_t gfp_mask, 3606 unsigned long *total_scanned) 3607{ 3608 unsigned long nr_reclaimed = 0; 3609 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 3610 unsigned long reclaimed; 3611 int loop = 0; 3612 struct mem_cgroup_tree_per_zone *mctz; 3613 unsigned long long excess; 3614 unsigned long nr_scanned; 3615 3616 if (order > 0) 3617 return 0; 3618 3619 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 3620 /* 3621 * This loop can run a while, specially if mem_cgroup's continuously 3622 * keep exceeding their soft limit and putting the system under 3623 * pressure 3624 */ 3625 do { 3626 if (next_mz) 3627 mz = next_mz; 3628 else 3629 mz = mem_cgroup_largest_soft_limit_node(mctz); 3630 if (!mz) 3631 break; 3632 3633 nr_scanned = 0; 3634 reclaimed = mem_cgroup_soft_reclaim(mz->mem, zone, 3635 gfp_mask, &nr_scanned); 3636 nr_reclaimed += reclaimed; 3637 *total_scanned += nr_scanned; 3638 spin_lock(&mctz->lock); 3639 3640 /* 3641 * If we failed to reclaim anything from this memory cgroup 3642 * it is time to move on to the next cgroup 3643 */ 3644 next_mz = NULL; 3645 if (!reclaimed) { 3646 do { 3647 /* 3648 * Loop until we find yet another one. 3649 * 3650 * By the time we get the soft_limit lock 3651 * again, someone might have aded the 3652 * group back on the RB tree. Iterate to 3653 * make sure we get a different mem. 3654 * mem_cgroup_largest_soft_limit_node returns 3655 * NULL if no other cgroup is present on 3656 * the tree 3657 */ 3658 next_mz = 3659 __mem_cgroup_largest_soft_limit_node(mctz); 3660 if (next_mz == mz) 3661 css_put(&next_mz->mem->css); 3662 else /* next_mz == NULL or other memcg */ 3663 break; 3664 } while (1); 3665 } 3666 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 3667 excess = res_counter_soft_limit_excess(&mz->mem->res); 3668 /* 3669 * One school of thought says that we should not add 3670 * back the node to the tree if reclaim returns 0. 3671 * But our reclaim could return 0, simply because due 3672 * to priority we are exposing a smaller subset of 3673 * memory to reclaim from. Consider this as a longer 3674 * term TODO. 3675 */ 3676 /* If excess == 0, no tree ops */ 3677 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); 3678 spin_unlock(&mctz->lock); 3679 css_put(&mz->mem->css); 3680 loop++; 3681 /* 3682 * Could not reclaim anything and there are no more 3683 * mem cgroups to try or we seem to be looping without 3684 * reclaiming anything. 3685 */ 3686 if (!nr_reclaimed && 3687 (next_mz == NULL || 3688 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3689 break; 3690 } while (!nr_reclaimed); 3691 if (next_mz) 3692 css_put(&next_mz->mem->css); 3693 return nr_reclaimed; 3694} 3695 3696/* 3697 * This routine traverse page_cgroup in given list and drop them all. 3698 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 3699 */ 3700static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, 3701 int node, int zid, enum lru_list lru) 3702{ 3703 struct mem_cgroup_per_zone *mz; 3704 unsigned long flags, loop; 3705 struct list_head *list; 3706 struct page *busy; 3707 struct zone *zone; 3708 int ret = 0; 3709 3710 zone = &NODE_DATA(node)->node_zones[zid]; 3711 mz = mem_cgroup_zoneinfo(memcg, node, zid); 3712 list = &mz->lruvec.lists[lru]; 3713 3714 loop = MEM_CGROUP_ZSTAT(mz, lru); 3715 /* give some margin against EBUSY etc...*/ 3716 loop += 256; 3717 busy = NULL; 3718 while (loop--) { 3719 struct page_cgroup *pc; 3720 struct page *page; 3721 3722 ret = 0; 3723 spin_lock_irqsave(&zone->lru_lock, flags); 3724 if (list_empty(list)) { 3725 spin_unlock_irqrestore(&zone->lru_lock, flags); 3726 break; 3727 } 3728 page = list_entry(list->prev, struct page, lru); 3729 if (busy == page) { 3730 list_move(&page->lru, list); 3731 busy = NULL; 3732 spin_unlock_irqrestore(&zone->lru_lock, flags); 3733 continue; 3734 } 3735 spin_unlock_irqrestore(&zone->lru_lock, flags); 3736 3737 pc = lookup_page_cgroup(page); 3738 3739 ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL); 3740 if (ret == -ENOMEM) 3741 break; 3742 3743 if (ret == -EBUSY || ret == -EINVAL) { 3744 /* found lock contention or "pc" is obsolete. */ 3745 busy = page; 3746 cond_resched(); 3747 } else 3748 busy = NULL; 3749 } 3750 3751 if (!ret && !list_empty(list)) 3752 return -EBUSY; 3753 return ret; 3754} 3755 3756/* 3757 * make mem_cgroup's charge to be 0 if there is no task. 3758 * This enables deleting this mem_cgroup. 3759 */ 3760static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all) 3761{ 3762 int ret; 3763 int node, zid, shrink; 3764 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3765 struct cgroup *cgrp = memcg->css.cgroup; 3766 3767 css_get(&memcg->css); 3768 3769 shrink = 0; 3770 /* should free all ? */ 3771 if (free_all) 3772 goto try_to_free; 3773move_account: 3774 do { 3775 ret = -EBUSY; 3776 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 3777 goto out; 3778 ret = -EINTR; 3779 if (signal_pending(current)) 3780 goto out; 3781 /* This is for making all *used* pages to be on LRU. */ 3782 lru_add_drain_all(); 3783 drain_all_stock_sync(memcg); 3784 ret = 0; 3785 mem_cgroup_start_move(memcg); 3786 for_each_node_state(node, N_HIGH_MEMORY) { 3787 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { 3788 enum lru_list l; 3789 for_each_lru(l) { 3790 ret = mem_cgroup_force_empty_list(memcg, 3791 node, zid, l); 3792 if (ret) 3793 break; 3794 } 3795 } 3796 if (ret) 3797 break; 3798 } 3799 mem_cgroup_end_move(memcg); 3800 memcg_oom_recover(memcg); 3801 /* it seems parent cgroup doesn't have enough mem */ 3802 if (ret == -ENOMEM) 3803 goto try_to_free; 3804 cond_resched(); 3805 /* "ret" should also be checked to ensure all lists are empty. */ 3806 } while (memcg->res.usage > 0 || ret); 3807out: 3808 css_put(&memcg->css); 3809 return ret; 3810 3811try_to_free: 3812 /* returns EBUSY if there is a task or if we come here twice. */ 3813 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { 3814 ret = -EBUSY; 3815 goto out; 3816 } 3817 /* we call try-to-free pages for make this cgroup empty */ 3818 lru_add_drain_all(); 3819 /* try to free all pages in this cgroup */ 3820 shrink = 1; 3821 while (nr_retries && memcg->res.usage > 0) { 3822 int progress; 3823 3824 if (signal_pending(current)) { 3825 ret = -EINTR; 3826 goto out; 3827 } 3828 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, 3829 false); 3830 if (!progress) { 3831 nr_retries--; 3832 /* maybe some writeback is necessary */ 3833 congestion_wait(BLK_RW_ASYNC, HZ/10); 3834 } 3835 3836 } 3837 lru_add_drain(); 3838 /* try move_account...there may be some *locked* pages. */ 3839 goto move_account; 3840} 3841 3842int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 3843{ 3844 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); 3845} 3846 3847 3848static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) 3849{ 3850 return mem_cgroup_from_cont(cont)->use_hierarchy; 3851} 3852 3853static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, 3854 u64 val) 3855{ 3856 int retval = 0; 3857 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3858 struct cgroup *parent = cont->parent; 3859 struct mem_cgroup *parent_memcg = NULL; 3860 3861 if (parent) 3862 parent_memcg = mem_cgroup_from_cont(parent); 3863 3864 cgroup_lock(); 3865 /* 3866 * If parent's use_hierarchy is set, we can't make any modifications 3867 * in the child subtrees. If it is unset, then the change can 3868 * occur, provided the current cgroup has no children. 3869 * 3870 * For the root cgroup, parent_mem is NULL, we allow value to be 3871 * set if there are no children. 3872 */ 3873 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3874 (val == 1 || val == 0)) { 3875 if (list_empty(&cont->children)) 3876 memcg->use_hierarchy = val; 3877 else 3878 retval = -EBUSY; 3879 } else 3880 retval = -EINVAL; 3881 cgroup_unlock(); 3882 3883 return retval; 3884} 3885 3886 3887static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, 3888 enum mem_cgroup_stat_index idx) 3889{ 3890 struct mem_cgroup *iter; 3891 long val = 0; 3892 3893 /* Per-cpu values can be negative, use a signed accumulator */ 3894 for_each_mem_cgroup_tree(iter, memcg) 3895 val += mem_cgroup_read_stat(iter, idx); 3896 3897 if (val < 0) /* race ? */ 3898 val = 0; 3899 return val; 3900} 3901 3902static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3903{ 3904 u64 val; 3905 3906 if (!mem_cgroup_is_root(memcg)) { 3907 if (!swap) 3908 return res_counter_read_u64(&memcg->res, RES_USAGE); 3909 else 3910 return res_counter_read_u64(&memcg->memsw, RES_USAGE); 3911 } 3912 3913 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); 3914 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); 3915 3916 if (swap) 3917 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT); 3918 3919 return val << PAGE_SHIFT; 3920} 3921 3922static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 3923{ 3924 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3925 u64 val; 3926 int type, name; 3927 3928 type = MEMFILE_TYPE(cft->private); 3929 name = MEMFILE_ATTR(cft->private); 3930 switch (type) { 3931 case _MEM: 3932 if (name == RES_USAGE) 3933 val = mem_cgroup_usage(memcg, false); 3934 else 3935 val = res_counter_read_u64(&memcg->res, name); 3936 break; 3937 case _MEMSWAP: 3938 if (name == RES_USAGE) 3939 val = mem_cgroup_usage(memcg, true); 3940 else 3941 val = res_counter_read_u64(&memcg->memsw, name); 3942 break; 3943 default: 3944 BUG(); 3945 break; 3946 } 3947 return val; 3948} 3949/* 3950 * The user of this function is... 3951 * RES_LIMIT. 3952 */ 3953static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 3954 const char *buffer) 3955{ 3956 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3957 int type, name; 3958 unsigned long long val; 3959 int ret; 3960 3961 type = MEMFILE_TYPE(cft->private); 3962 name = MEMFILE_ATTR(cft->private); 3963 switch (name) { 3964 case RES_LIMIT: 3965 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3966 ret = -EINVAL; 3967 break; 3968 } 3969 /* This function does all necessary parse...reuse it */ 3970 ret = res_counter_memparse_write_strategy(buffer, &val); 3971 if (ret) 3972 break; 3973 if (type == _MEM) 3974 ret = mem_cgroup_resize_limit(memcg, val); 3975 else 3976 ret = mem_cgroup_resize_memsw_limit(memcg, val); 3977 break; 3978 case RES_SOFT_LIMIT: 3979 ret = res_counter_memparse_write_strategy(buffer, &val); 3980 if (ret) 3981 break; 3982 /* 3983 * For memsw, soft limits are hard to implement in terms 3984 * of semantics, for now, we support soft limits for 3985 * control without swap 3986 */ 3987 if (type == _MEM) 3988 ret = res_counter_set_soft_limit(&memcg->res, val); 3989 else 3990 ret = -EINVAL; 3991 break; 3992 default: 3993 ret = -EINVAL; /* should be BUG() ? */ 3994 break; 3995 } 3996 return ret; 3997} 3998 3999static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 4000 unsigned long long *mem_limit, unsigned long long *memsw_limit) 4001{ 4002 struct cgroup *cgroup; 4003 unsigned long long min_limit, min_memsw_limit, tmp; 4004 4005 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4006 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4007 cgroup = memcg->css.cgroup; 4008 if (!memcg->use_hierarchy) 4009 goto out; 4010 4011 while (cgroup->parent) { 4012 cgroup = cgroup->parent; 4013 memcg = mem_cgroup_from_cont(cgroup); 4014 if (!memcg->use_hierarchy) 4015 break; 4016 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 4017 min_limit = min(min_limit, tmp); 4018 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4019 min_memsw_limit = min(min_memsw_limit, tmp); 4020 } 4021out: 4022 *mem_limit = min_limit; 4023 *memsw_limit = min_memsw_limit; 4024 return; 4025} 4026 4027static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 4028{ 4029 struct mem_cgroup *memcg; 4030 int type, name; 4031 4032 memcg = mem_cgroup_from_cont(cont); 4033 type = MEMFILE_TYPE(event); 4034 name = MEMFILE_ATTR(event); 4035 switch (name) { 4036 case RES_MAX_USAGE: 4037 if (type == _MEM) 4038 res_counter_reset_max(&memcg->res); 4039 else 4040 res_counter_reset_max(&memcg->memsw); 4041 break; 4042 case RES_FAILCNT: 4043 if (type == _MEM) 4044 res_counter_reset_failcnt(&memcg->res); 4045 else 4046 res_counter_reset_failcnt(&memcg->memsw); 4047 break; 4048 } 4049 4050 return 0; 4051} 4052 4053static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, 4054 struct cftype *cft) 4055{ 4056 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; 4057} 4058 4059#ifdef CONFIG_MMU 4060static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4061 struct cftype *cft, u64 val) 4062{ 4063 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4064 4065 if (val >= (1 << NR_MOVE_TYPE)) 4066 return -EINVAL; 4067 /* 4068 * We check this value several times in both in can_attach() and 4069 * attach(), so we need cgroup lock to prevent this value from being 4070 * inconsistent. 4071 */ 4072 cgroup_lock(); 4073 memcg->move_charge_at_immigrate = val; 4074 cgroup_unlock(); 4075 4076 return 0; 4077} 4078#else 4079static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4080 struct cftype *cft, u64 val) 4081{ 4082 return -ENOSYS; 4083} 4084#endif 4085 4086 4087/* For read statistics */ 4088enum { 4089 MCS_CACHE, 4090 MCS_RSS, 4091 MCS_FILE_MAPPED, 4092 MCS_PGPGIN, 4093 MCS_PGPGOUT, 4094 MCS_SWAP, 4095 MCS_PGFAULT, 4096 MCS_PGMAJFAULT, 4097 MCS_INACTIVE_ANON, 4098 MCS_ACTIVE_ANON, 4099 MCS_INACTIVE_FILE, 4100 MCS_ACTIVE_FILE, 4101 MCS_UNEVICTABLE, 4102 NR_MCS_STAT, 4103}; 4104 4105struct mcs_total_stat { 4106 s64 stat[NR_MCS_STAT]; 4107}; 4108 4109struct { 4110 char *local_name; 4111 char *total_name; 4112} memcg_stat_strings[NR_MCS_STAT] = { 4113 {"cache", "total_cache"}, 4114 {"rss", "total_rss"}, 4115 {"mapped_file", "total_mapped_file"}, 4116 {"pgpgin", "total_pgpgin"}, 4117 {"pgpgout", "total_pgpgout"}, 4118 {"swap", "total_swap"}, 4119 {"pgfault", "total_pgfault"}, 4120 {"pgmajfault", "total_pgmajfault"}, 4121 {"inactive_anon", "total_inactive_anon"}, 4122 {"active_anon", "total_active_anon"}, 4123 {"inactive_file", "total_inactive_file"}, 4124 {"active_file", "total_active_file"}, 4125 {"unevictable", "total_unevictable"} 4126}; 4127 4128 4129static void 4130mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s) 4131{ 4132 s64 val; 4133 4134 /* per cpu stat */ 4135 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE); 4136 s->stat[MCS_CACHE] += val * PAGE_SIZE; 4137 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS); 4138 s->stat[MCS_RSS] += val * PAGE_SIZE; 4139 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); 4140 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; 4141 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN); 4142 s->stat[MCS_PGPGIN] += val; 4143 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT); 4144 s->stat[MCS_PGPGOUT] += val; 4145 if (do_swap_account) { 4146 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT); 4147 s->stat[MCS_SWAP] += val * PAGE_SIZE; 4148 } 4149 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT); 4150 s->stat[MCS_PGFAULT] += val; 4151 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT); 4152 s->stat[MCS_PGMAJFAULT] += val; 4153 4154 /* per zone stat */ 4155 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); 4156 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; 4157 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); 4158 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; 4159 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); 4160 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; 4161 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); 4162 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; 4163 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE)); 4164 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; 4165} 4166 4167static void 4168mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s) 4169{ 4170 struct mem_cgroup *iter; 4171 4172 for_each_mem_cgroup_tree(iter, memcg) 4173 mem_cgroup_get_local_stat(iter, s); 4174} 4175 4176#ifdef CONFIG_NUMA 4177static int mem_control_numa_stat_show(struct seq_file *m, void *arg) 4178{ 4179 int nid; 4180 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 4181 unsigned long node_nr; 4182 struct cgroup *cont = m->private; 4183 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4184 4185 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL); 4186 seq_printf(m, "total=%lu", total_nr); 4187 for_each_node_state(nid, N_HIGH_MEMORY) { 4188 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL); 4189 seq_printf(m, " N%d=%lu", nid, node_nr); 4190 } 4191 seq_putc(m, '\n'); 4192 4193 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE); 4194 seq_printf(m, "file=%lu", file_nr); 4195 for_each_node_state(nid, N_HIGH_MEMORY) { 4196 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4197 LRU_ALL_FILE); 4198 seq_printf(m, " N%d=%lu", nid, node_nr); 4199 } 4200 seq_putc(m, '\n'); 4201 4202 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON); 4203 seq_printf(m, "anon=%lu", anon_nr); 4204 for_each_node_state(nid, N_HIGH_MEMORY) { 4205 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4206 LRU_ALL_ANON); 4207 seq_printf(m, " N%d=%lu", nid, node_nr); 4208 } 4209 seq_putc(m, '\n'); 4210 4211 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE)); 4212 seq_printf(m, "unevictable=%lu", unevictable_nr); 4213 for_each_node_state(nid, N_HIGH_MEMORY) { 4214 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, 4215 BIT(LRU_UNEVICTABLE)); 4216 seq_printf(m, " N%d=%lu", nid, node_nr); 4217 } 4218 seq_putc(m, '\n'); 4219 return 0; 4220} 4221#endif /* CONFIG_NUMA */ 4222 4223static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4224 struct cgroup_map_cb *cb) 4225{ 4226 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4227 struct mcs_total_stat mystat; 4228 int i; 4229 4230 memset(&mystat, 0, sizeof(mystat)); 4231 mem_cgroup_get_local_stat(mem_cont, &mystat); 4232 4233 4234 for (i = 0; i < NR_MCS_STAT; i++) { 4235 if (i == MCS_SWAP && !do_swap_account) 4236 continue; 4237 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); 4238 } 4239 4240 /* Hierarchical information */ 4241 { 4242 unsigned long long limit, memsw_limit; 4243 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); 4244 cb->fill(cb, "hierarchical_memory_limit", limit); 4245 if (do_swap_account) 4246 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 4247 } 4248 4249 memset(&mystat, 0, sizeof(mystat)); 4250 mem_cgroup_get_total_stat(mem_cont, &mystat); 4251 for (i = 0; i < NR_MCS_STAT; i++) { 4252 if (i == MCS_SWAP && !do_swap_account) 4253 continue; 4254 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); 4255 } 4256 4257#ifdef CONFIG_DEBUG_VM 4258 { 4259 int nid, zid; 4260 struct mem_cgroup_per_zone *mz; 4261 unsigned long recent_rotated[2] = {0, 0}; 4262 unsigned long recent_scanned[2] = {0, 0}; 4263 4264 for_each_online_node(nid) 4265 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4266 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 4267 4268 recent_rotated[0] += 4269 mz->reclaim_stat.recent_rotated[0]; 4270 recent_rotated[1] += 4271 mz->reclaim_stat.recent_rotated[1]; 4272 recent_scanned[0] += 4273 mz->reclaim_stat.recent_scanned[0]; 4274 recent_scanned[1] += 4275 mz->reclaim_stat.recent_scanned[1]; 4276 } 4277 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); 4278 cb->fill(cb, "recent_rotated_file", recent_rotated[1]); 4279 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); 4280 cb->fill(cb, "recent_scanned_file", recent_scanned[1]); 4281 } 4282#endif 4283 4284 return 0; 4285} 4286 4287static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) 4288{ 4289 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4290 4291 return mem_cgroup_swappiness(memcg); 4292} 4293 4294static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, 4295 u64 val) 4296{ 4297 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4298 struct mem_cgroup *parent; 4299 4300 if (val > 100) 4301 return -EINVAL; 4302 4303 if (cgrp->parent == NULL) 4304 return -EINVAL; 4305 4306 parent = mem_cgroup_from_cont(cgrp->parent); 4307 4308 cgroup_lock(); 4309 4310 /* If under hierarchy, only empty-root can set this value */ 4311 if ((parent->use_hierarchy) || 4312 (memcg->use_hierarchy && !list_empty(&cgrp->children))) { 4313 cgroup_unlock(); 4314 return -EINVAL; 4315 } 4316 4317 memcg->swappiness = val; 4318 4319 cgroup_unlock(); 4320 4321 return 0; 4322} 4323 4324static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4325{ 4326 struct mem_cgroup_threshold_ary *t; 4327 u64 usage; 4328 int i; 4329 4330 rcu_read_lock(); 4331 if (!swap) 4332 t = rcu_dereference(memcg->thresholds.primary); 4333 else 4334 t = rcu_dereference(memcg->memsw_thresholds.primary); 4335 4336 if (!t) 4337 goto unlock; 4338 4339 usage = mem_cgroup_usage(memcg, swap); 4340 4341 /* 4342 * current_threshold points to threshold just below usage. 4343 * If it's not true, a threshold was crossed after last 4344 * call of __mem_cgroup_threshold(). 4345 */ 4346 i = t->current_threshold; 4347 4348 /* 4349 * Iterate backward over array of thresholds starting from 4350 * current_threshold and check if a threshold is crossed. 4351 * If none of thresholds below usage is crossed, we read 4352 * only one element of the array here. 4353 */ 4354 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4355 eventfd_signal(t->entries[i].eventfd, 1); 4356 4357 /* i = current_threshold + 1 */ 4358 i++; 4359 4360 /* 4361 * Iterate forward over array of thresholds starting from 4362 * current_threshold+1 and check if a threshold is crossed. 4363 * If none of thresholds above usage is crossed, we read 4364 * only one element of the array here. 4365 */ 4366 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4367 eventfd_signal(t->entries[i].eventfd, 1); 4368 4369 /* Update current_threshold */ 4370 t->current_threshold = i - 1; 4371unlock: 4372 rcu_read_unlock(); 4373} 4374 4375static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4376{ 4377 while (memcg) { 4378 __mem_cgroup_threshold(memcg, false); 4379 if (do_swap_account) 4380 __mem_cgroup_threshold(memcg, true); 4381 4382 memcg = parent_mem_cgroup(memcg); 4383 } 4384} 4385 4386static int compare_thresholds(const void *a, const void *b) 4387{ 4388 const struct mem_cgroup_threshold *_a = a; 4389 const struct mem_cgroup_threshold *_b = b; 4390 4391 return _a->threshold - _b->threshold; 4392} 4393 4394static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4395{ 4396 struct mem_cgroup_eventfd_list *ev; 4397 4398 list_for_each_entry(ev, &memcg->oom_notify, list) 4399 eventfd_signal(ev->eventfd, 1); 4400 return 0; 4401} 4402 4403static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4404{ 4405 struct mem_cgroup *iter; 4406 4407 for_each_mem_cgroup_tree(iter, memcg) 4408 mem_cgroup_oom_notify_cb(iter); 4409} 4410 4411static int mem_cgroup_usage_register_event(struct cgroup *cgrp, 4412 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4413{ 4414 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4415 struct mem_cgroup_thresholds *thresholds; 4416 struct mem_cgroup_threshold_ary *new; 4417 int type = MEMFILE_TYPE(cft->private); 4418 u64 threshold, usage; 4419 int i, size, ret; 4420 4421 ret = res_counter_memparse_write_strategy(args, &threshold); 4422 if (ret) 4423 return ret; 4424 4425 mutex_lock(&memcg->thresholds_lock); 4426 4427 if (type == _MEM) 4428 thresholds = &memcg->thresholds; 4429 else if (type == _MEMSWAP) 4430 thresholds = &memcg->memsw_thresholds; 4431 else 4432 BUG(); 4433 4434 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4435 4436 /* Check if a threshold crossed before adding a new one */ 4437 if (thresholds->primary) 4438 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4439 4440 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4441 4442 /* Allocate memory for new array of thresholds */ 4443 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 4444 GFP_KERNEL); 4445 if (!new) { 4446 ret = -ENOMEM; 4447 goto unlock; 4448 } 4449 new->size = size; 4450 4451 /* Copy thresholds (if any) to new array */ 4452 if (thresholds->primary) { 4453 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 4454 sizeof(struct mem_cgroup_threshold)); 4455 } 4456 4457 /* Add new threshold */ 4458 new->entries[size - 1].eventfd = eventfd; 4459 new->entries[size - 1].threshold = threshold; 4460 4461 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4462 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 4463 compare_thresholds, NULL); 4464 4465 /* Find current threshold */ 4466 new->current_threshold = -1; 4467 for (i = 0; i < size; i++) { 4468 if (new->entries[i].threshold < usage) { 4469 /* 4470 * new->current_threshold will not be used until 4471 * rcu_assign_pointer(), so it's safe to increment 4472 * it here. 4473 */ 4474 ++new->current_threshold; 4475 } 4476 } 4477 4478 /* Free old spare buffer and save old primary buffer as spare */ 4479 kfree(thresholds->spare); 4480 thresholds->spare = thresholds->primary; 4481 4482 rcu_assign_pointer(thresholds->primary, new); 4483 4484 /* To be sure that nobody uses thresholds */ 4485 synchronize_rcu(); 4486 4487unlock: 4488 mutex_unlock(&memcg->thresholds_lock); 4489 4490 return ret; 4491} 4492 4493static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, 4494 struct cftype *cft, struct eventfd_ctx *eventfd) 4495{ 4496 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4497 struct mem_cgroup_thresholds *thresholds; 4498 struct mem_cgroup_threshold_ary *new; 4499 int type = MEMFILE_TYPE(cft->private); 4500 u64 usage; 4501 int i, j, size; 4502 4503 mutex_lock(&memcg->thresholds_lock); 4504 if (type == _MEM) 4505 thresholds = &memcg->thresholds; 4506 else if (type == _MEMSWAP) 4507 thresholds = &memcg->memsw_thresholds; 4508 else 4509 BUG(); 4510 4511 /* 4512 * Something went wrong if we trying to unregister a threshold 4513 * if we don't have thresholds 4514 */ 4515 BUG_ON(!thresholds); 4516 4517 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4518 4519 /* Check if a threshold crossed before removing */ 4520 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4521 4522 /* Calculate new number of threshold */ 4523 size = 0; 4524 for (i = 0; i < thresholds->primary->size; i++) { 4525 if (thresholds->primary->entries[i].eventfd != eventfd) 4526 size++; 4527 } 4528 4529 new = thresholds->spare; 4530 4531 /* Set thresholds array to NULL if we don't have thresholds */ 4532 if (!size) { 4533 kfree(new); 4534 new = NULL; 4535 goto swap_buffers; 4536 } 4537 4538 new->size = size; 4539 4540 /* Copy thresholds and find current threshold */ 4541 new->current_threshold = -1; 4542 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4543 if (thresholds->primary->entries[i].eventfd == eventfd) 4544 continue; 4545 4546 new->entries[j] = thresholds->primary->entries[i]; 4547 if (new->entries[j].threshold < usage) { 4548 /* 4549 * new->current_threshold will not be used 4550 * until rcu_assign_pointer(), so it's safe to increment 4551 * it here. 4552 */ 4553 ++new->current_threshold; 4554 } 4555 j++; 4556 } 4557 4558swap_buffers: 4559 /* Swap primary and spare array */ 4560 thresholds->spare = thresholds->primary; 4561 rcu_assign_pointer(thresholds->primary, new); 4562 4563 /* To be sure that nobody uses thresholds */ 4564 synchronize_rcu(); 4565 4566 mutex_unlock(&memcg->thresholds_lock); 4567} 4568 4569static int mem_cgroup_oom_register_event(struct cgroup *cgrp, 4570 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4571{ 4572 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4573 struct mem_cgroup_eventfd_list *event; 4574 int type = MEMFILE_TYPE(cft->private); 4575 4576 BUG_ON(type != _OOM_TYPE); 4577 event = kmalloc(sizeof(*event), GFP_KERNEL); 4578 if (!event) 4579 return -ENOMEM; 4580 4581 spin_lock(&memcg_oom_lock); 4582 4583 event->eventfd = eventfd; 4584 list_add(&event->list, &memcg->oom_notify); 4585 4586 /* already in OOM ? */ 4587 if (atomic_read(&memcg->under_oom)) 4588 eventfd_signal(eventfd, 1); 4589 spin_unlock(&memcg_oom_lock); 4590 4591 return 0; 4592} 4593 4594static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, 4595 struct cftype *cft, struct eventfd_ctx *eventfd) 4596{ 4597 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4598 struct mem_cgroup_eventfd_list *ev, *tmp; 4599 int type = MEMFILE_TYPE(cft->private); 4600 4601 BUG_ON(type != _OOM_TYPE); 4602 4603 spin_lock(&memcg_oom_lock); 4604 4605 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4606 if (ev->eventfd == eventfd) { 4607 list_del(&ev->list); 4608 kfree(ev); 4609 } 4610 } 4611 4612 spin_unlock(&memcg_oom_lock); 4613} 4614 4615static int mem_cgroup_oom_control_read(struct cgroup *cgrp, 4616 struct cftype *cft, struct cgroup_map_cb *cb) 4617{ 4618 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4619 4620 cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable); 4621 4622 if (atomic_read(&memcg->under_oom)) 4623 cb->fill(cb, "under_oom", 1); 4624 else 4625 cb->fill(cb, "under_oom", 0); 4626 return 0; 4627} 4628 4629static int mem_cgroup_oom_control_write(struct cgroup *cgrp, 4630 struct cftype *cft, u64 val) 4631{ 4632 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4633 struct mem_cgroup *parent; 4634 4635 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4636 if (!cgrp->parent || !((val == 0) || (val == 1))) 4637 return -EINVAL; 4638 4639 parent = mem_cgroup_from_cont(cgrp->parent); 4640 4641 cgroup_lock(); 4642 /* oom-kill-disable is a flag for subhierarchy. */ 4643 if ((parent->use_hierarchy) || 4644 (memcg->use_hierarchy && !list_empty(&cgrp->children))) { 4645 cgroup_unlock(); 4646 return -EINVAL; 4647 } 4648 memcg->oom_kill_disable = val; 4649 if (!val) 4650 memcg_oom_recover(memcg); 4651 cgroup_unlock(); 4652 return 0; 4653} 4654 4655#ifdef CONFIG_NUMA 4656static const struct file_operations mem_control_numa_stat_file_operations = { 4657 .read = seq_read, 4658 .llseek = seq_lseek, 4659 .release = single_release, 4660}; 4661 4662static int mem_control_numa_stat_open(struct inode *unused, struct file *file) 4663{ 4664 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; 4665 4666 file->f_op = &mem_control_numa_stat_file_operations; 4667 return single_open(file, mem_control_numa_stat_show, cont); 4668} 4669#endif /* CONFIG_NUMA */ 4670 4671#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 4672static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) 4673{ 4674 /* 4675 * Part of this would be better living in a separate allocation 4676 * function, leaving us with just the cgroup tree population work. 4677 * We, however, depend on state such as network's proto_list that 4678 * is only initialized after cgroup creation. I found the less 4679 * cumbersome way to deal with it to defer it all to populate time 4680 */ 4681 return mem_cgroup_sockets_init(cont, ss); 4682}; 4683 4684static void kmem_cgroup_destroy(struct cgroup_subsys *ss, 4685 struct cgroup *cont) 4686{ 4687 mem_cgroup_sockets_destroy(cont, ss); 4688} 4689#else 4690static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) 4691{ 4692 return 0; 4693} 4694 4695static void kmem_cgroup_destroy(struct cgroup_subsys *ss, 4696 struct cgroup *cont) 4697{ 4698} 4699#endif 4700 4701static struct cftype mem_cgroup_files[] = { 4702 { 4703 .name = "usage_in_bytes", 4704 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4705 .read_u64 = mem_cgroup_read, 4706 .register_event = mem_cgroup_usage_register_event, 4707 .unregister_event = mem_cgroup_usage_unregister_event, 4708 }, 4709 { 4710 .name = "max_usage_in_bytes", 4711 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4712 .trigger = mem_cgroup_reset, 4713 .read_u64 = mem_cgroup_read, 4714 }, 4715 { 4716 .name = "limit_in_bytes", 4717 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4718 .write_string = mem_cgroup_write, 4719 .read_u64 = mem_cgroup_read, 4720 }, 4721 { 4722 .name = "soft_limit_in_bytes", 4723 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4724 .write_string = mem_cgroup_write, 4725 .read_u64 = mem_cgroup_read, 4726 }, 4727 { 4728 .name = "failcnt", 4729 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4730 .trigger = mem_cgroup_reset, 4731 .read_u64 = mem_cgroup_read, 4732 }, 4733 { 4734 .name = "stat", 4735 .read_map = mem_control_stat_show, 4736 }, 4737 { 4738 .name = "force_empty", 4739 .trigger = mem_cgroup_force_empty_write, 4740 }, 4741 { 4742 .name = "use_hierarchy", 4743 .write_u64 = mem_cgroup_hierarchy_write, 4744 .read_u64 = mem_cgroup_hierarchy_read, 4745 }, 4746 { 4747 .name = "swappiness", 4748 .read_u64 = mem_cgroup_swappiness_read, 4749 .write_u64 = mem_cgroup_swappiness_write, 4750 }, 4751 { 4752 .name = "move_charge_at_immigrate", 4753 .read_u64 = mem_cgroup_move_charge_read, 4754 .write_u64 = mem_cgroup_move_charge_write, 4755 }, 4756 { 4757 .name = "oom_control", 4758 .read_map = mem_cgroup_oom_control_read, 4759 .write_u64 = mem_cgroup_oom_control_write, 4760 .register_event = mem_cgroup_oom_register_event, 4761 .unregister_event = mem_cgroup_oom_unregister_event, 4762 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4763 }, 4764#ifdef CONFIG_NUMA 4765 { 4766 .name = "numa_stat", 4767 .open = mem_control_numa_stat_open, 4768 .mode = S_IRUGO, 4769 }, 4770#endif 4771}; 4772 4773#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4774static struct cftype memsw_cgroup_files[] = { 4775 { 4776 .name = "memsw.usage_in_bytes", 4777 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 4778 .read_u64 = mem_cgroup_read, 4779 .register_event = mem_cgroup_usage_register_event, 4780 .unregister_event = mem_cgroup_usage_unregister_event, 4781 }, 4782 { 4783 .name = "memsw.max_usage_in_bytes", 4784 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 4785 .trigger = mem_cgroup_reset, 4786 .read_u64 = mem_cgroup_read, 4787 }, 4788 { 4789 .name = "memsw.limit_in_bytes", 4790 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 4791 .write_string = mem_cgroup_write, 4792 .read_u64 = mem_cgroup_read, 4793 }, 4794 { 4795 .name = "memsw.failcnt", 4796 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 4797 .trigger = mem_cgroup_reset, 4798 .read_u64 = mem_cgroup_read, 4799 }, 4800}; 4801 4802static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4803{ 4804 if (!do_swap_account) 4805 return 0; 4806 return cgroup_add_files(cont, ss, memsw_cgroup_files, 4807 ARRAY_SIZE(memsw_cgroup_files)); 4808}; 4809#else 4810static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4811{ 4812 return 0; 4813} 4814#endif 4815 4816static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4817{ 4818 struct mem_cgroup_per_node *pn; 4819 struct mem_cgroup_per_zone *mz; 4820 enum lru_list l; 4821 int zone, tmp = node; 4822 /* 4823 * This routine is called against possible nodes. 4824 * But it's BUG to call kmalloc() against offline node. 4825 * 4826 * TODO: this routine can waste much memory for nodes which will 4827 * never be onlined. It's better to use memory hotplug callback 4828 * function. 4829 */ 4830 if (!node_state(node, N_NORMAL_MEMORY)) 4831 tmp = -1; 4832 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4833 if (!pn) 4834 return 1; 4835 4836 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4837 mz = &pn->zoneinfo[zone]; 4838 for_each_lru(l) 4839 INIT_LIST_HEAD(&mz->lruvec.lists[l]); 4840 mz->usage_in_excess = 0; 4841 mz->on_tree = false; 4842 mz->mem = memcg; 4843 } 4844 memcg->info.nodeinfo[node] = pn; 4845 return 0; 4846} 4847 4848static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4849{ 4850 kfree(memcg->info.nodeinfo[node]); 4851} 4852 4853static struct mem_cgroup *mem_cgroup_alloc(void) 4854{ 4855 struct mem_cgroup *mem; 4856 int size = sizeof(struct mem_cgroup); 4857 4858 /* Can be very big if MAX_NUMNODES is very big */ 4859 if (size < PAGE_SIZE) 4860 mem = kzalloc(size, GFP_KERNEL); 4861 else 4862 mem = vzalloc(size); 4863 4864 if (!mem) 4865 return NULL; 4866 4867 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4868 if (!mem->stat) 4869 goto out_free; 4870 spin_lock_init(&mem->pcp_counter_lock); 4871 return mem; 4872 4873out_free: 4874 if (size < PAGE_SIZE) 4875 kfree(mem); 4876 else 4877 vfree(mem); 4878 return NULL; 4879} 4880 4881/* 4882 * At destroying mem_cgroup, references from swap_cgroup can remain. 4883 * (scanning all at force_empty is too costly...) 4884 * 4885 * Instead of clearing all references at force_empty, we remember 4886 * the number of reference from swap_cgroup and free mem_cgroup when 4887 * it goes down to 0. 4888 * 4889 * Removal of cgroup itself succeeds regardless of refs from swap. 4890 */ 4891 4892static void __mem_cgroup_free(struct mem_cgroup *memcg) 4893{ 4894 int node; 4895 4896 mem_cgroup_remove_from_trees(memcg); 4897 free_css_id(&mem_cgroup_subsys, &memcg->css); 4898 4899 for_each_node_state(node, N_POSSIBLE) 4900 free_mem_cgroup_per_zone_info(memcg, node); 4901 4902 free_percpu(memcg->stat); 4903 if (sizeof(struct mem_cgroup) < PAGE_SIZE) 4904 kfree(memcg); 4905 else 4906 vfree(memcg); 4907} 4908 4909static void mem_cgroup_get(struct mem_cgroup *memcg) 4910{ 4911 atomic_inc(&memcg->refcnt); 4912} 4913 4914static void __mem_cgroup_put(struct mem_cgroup *memcg, int count) 4915{ 4916 if (atomic_sub_and_test(count, &memcg->refcnt)) { 4917 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 4918 __mem_cgroup_free(memcg); 4919 if (parent) 4920 mem_cgroup_put(parent); 4921 } 4922} 4923 4924static void mem_cgroup_put(struct mem_cgroup *memcg) 4925{ 4926 __mem_cgroup_put(memcg, 1); 4927} 4928 4929/* 4930 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4931 */ 4932struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 4933{ 4934 if (!memcg->res.parent) 4935 return NULL; 4936 return mem_cgroup_from_res_counter(memcg->res.parent, res); 4937} 4938EXPORT_SYMBOL(parent_mem_cgroup); 4939 4940#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4941static void __init enable_swap_cgroup(void) 4942{ 4943 if (!mem_cgroup_disabled() && really_do_swap_account) 4944 do_swap_account = 1; 4945} 4946#else 4947static void __init enable_swap_cgroup(void) 4948{ 4949} 4950#endif 4951 4952static int mem_cgroup_soft_limit_tree_init(void) 4953{ 4954 struct mem_cgroup_tree_per_node *rtpn; 4955 struct mem_cgroup_tree_per_zone *rtpz; 4956 int tmp, node, zone; 4957 4958 for_each_node_state(node, N_POSSIBLE) { 4959 tmp = node; 4960 if (!node_state(node, N_NORMAL_MEMORY)) 4961 tmp = -1; 4962 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 4963 if (!rtpn) 4964 return 1; 4965 4966 soft_limit_tree.rb_tree_per_node[node] = rtpn; 4967 4968 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4969 rtpz = &rtpn->rb_tree_per_zone[zone]; 4970 rtpz->rb_root = RB_ROOT; 4971 spin_lock_init(&rtpz->lock); 4972 } 4973 } 4974 return 0; 4975} 4976 4977static struct cgroup_subsys_state * __ref 4978mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 4979{ 4980 struct mem_cgroup *memcg, *parent; 4981 long error = -ENOMEM; 4982 int node; 4983 4984 memcg = mem_cgroup_alloc(); 4985 if (!memcg) 4986 return ERR_PTR(error); 4987 4988 for_each_node_state(node, N_POSSIBLE) 4989 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 4990 goto free_out; 4991 4992 /* root ? */ 4993 if (cont->parent == NULL) { 4994 int cpu; 4995 enable_swap_cgroup(); 4996 parent = NULL; 4997 if (mem_cgroup_soft_limit_tree_init()) 4998 goto free_out; 4999 root_mem_cgroup = memcg; 5000 for_each_possible_cpu(cpu) { 5001 struct memcg_stock_pcp *stock = 5002 &per_cpu(memcg_stock, cpu); 5003 INIT_WORK(&stock->work, drain_local_stock); 5004 } 5005 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5006 } else { 5007 parent = mem_cgroup_from_cont(cont->parent); 5008 memcg->use_hierarchy = parent->use_hierarchy; 5009 memcg->oom_kill_disable = parent->oom_kill_disable; 5010 } 5011 5012 if (parent && parent->use_hierarchy) { 5013 res_counter_init(&memcg->res, &parent->res); 5014 res_counter_init(&memcg->memsw, &parent->memsw); 5015 /* 5016 * We increment refcnt of the parent to ensure that we can 5017 * safely access it on res_counter_charge/uncharge. 5018 * This refcnt will be decremented when freeing this 5019 * mem_cgroup(see mem_cgroup_put). 5020 */ 5021 mem_cgroup_get(parent); 5022 } else { 5023 res_counter_init(&memcg->res, NULL); 5024 res_counter_init(&memcg->memsw, NULL); 5025 } 5026 memcg->last_scanned_node = MAX_NUMNODES; 5027 INIT_LIST_HEAD(&memcg->oom_notify); 5028 5029 if (parent) 5030 memcg->swappiness = mem_cgroup_swappiness(parent); 5031 atomic_set(&memcg->refcnt, 1); 5032 memcg->move_charge_at_immigrate = 0; 5033 mutex_init(&memcg->thresholds_lock); 5034 return &memcg->css; 5035free_out: 5036 __mem_cgroup_free(memcg); 5037 return ERR_PTR(error); 5038} 5039 5040static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 5041 struct cgroup *cont) 5042{ 5043 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5044 5045 return mem_cgroup_force_empty(memcg, false); 5046} 5047 5048static void mem_cgroup_destroy(struct cgroup_subsys *ss, 5049 struct cgroup *cont) 5050{ 5051 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5052 5053 kmem_cgroup_destroy(ss, cont); 5054 5055 mem_cgroup_put(memcg); 5056} 5057 5058static int mem_cgroup_populate(struct cgroup_subsys *ss, 5059 struct cgroup *cont) 5060{ 5061 int ret; 5062 5063 ret = cgroup_add_files(cont, ss, mem_cgroup_files, 5064 ARRAY_SIZE(mem_cgroup_files)); 5065 5066 if (!ret) 5067 ret = register_memsw_files(cont, ss); 5068 5069 if (!ret) 5070 ret = register_kmem_files(cont, ss); 5071 5072 return ret; 5073} 5074 5075#ifdef CONFIG_MMU 5076/* Handlers for move charge at task migration. */ 5077#define PRECHARGE_COUNT_AT_ONCE 256 5078static int mem_cgroup_do_precharge(unsigned long count) 5079{ 5080 int ret = 0; 5081 int batch_count = PRECHARGE_COUNT_AT_ONCE; 5082 struct mem_cgroup *memcg = mc.to; 5083 5084 if (mem_cgroup_is_root(memcg)) { 5085 mc.precharge += count; 5086 /* we don't need css_get for root */ 5087 return ret; 5088 } 5089 /* try to charge at once */ 5090 if (count > 1) { 5091 struct res_counter *dummy; 5092 /* 5093 * "memcg" cannot be under rmdir() because we've already checked 5094 * by cgroup_lock_live_cgroup() that it is not removed and we 5095 * are still under the same cgroup_mutex. So we can postpone 5096 * css_get(). 5097 */ 5098 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy)) 5099 goto one_by_one; 5100 if (do_swap_account && res_counter_charge(&memcg->memsw, 5101 PAGE_SIZE * count, &dummy)) { 5102 res_counter_uncharge(&memcg->res, PAGE_SIZE * count); 5103 goto one_by_one; 5104 } 5105 mc.precharge += count; 5106 return ret; 5107 } 5108one_by_one: 5109 /* fall back to one by one charge */ 5110 while (count--) { 5111 if (signal_pending(current)) { 5112 ret = -EINTR; 5113 break; 5114 } 5115 if (!batch_count--) { 5116 batch_count = PRECHARGE_COUNT_AT_ONCE; 5117 cond_resched(); 5118 } 5119 ret = __mem_cgroup_try_charge(NULL, 5120 GFP_KERNEL, 1, &memcg, false); 5121 if (ret || !memcg) 5122 /* mem_cgroup_clear_mc() will do uncharge later */ 5123 return -ENOMEM; 5124 mc.precharge++; 5125 } 5126 return ret; 5127} 5128 5129/** 5130 * is_target_pte_for_mc - check a pte whether it is valid for move charge 5131 * @vma: the vma the pte to be checked belongs 5132 * @addr: the address corresponding to the pte to be checked 5133 * @ptent: the pte to be checked 5134 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5135 * 5136 * Returns 5137 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5138 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5139 * move charge. if @target is not NULL, the page is stored in target->page 5140 * with extra refcnt got(Callers should handle it). 5141 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5142 * target for charge migration. if @target is not NULL, the entry is stored 5143 * in target->ent. 5144 * 5145 * Called with pte lock held. 5146 */ 5147union mc_target { 5148 struct page *page; 5149 swp_entry_t ent; 5150}; 5151 5152enum mc_target_type { 5153 MC_TARGET_NONE, /* not used */ 5154 MC_TARGET_PAGE, 5155 MC_TARGET_SWAP, 5156}; 5157 5158static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5159 unsigned long addr, pte_t ptent) 5160{ 5161 struct page *page = vm_normal_page(vma, addr, ptent); 5162 5163 if (!page || !page_mapped(page)) 5164 return NULL; 5165 if (PageAnon(page)) { 5166 /* we don't move shared anon */ 5167 if (!move_anon() || page_mapcount(page) > 2) 5168 return NULL; 5169 } else if (!move_file()) 5170 /* we ignore mapcount for file pages */ 5171 return NULL; 5172 if (!get_page_unless_zero(page)) 5173 return NULL; 5174 5175 return page; 5176} 5177 5178static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5179 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5180{ 5181 int usage_count; 5182 struct page *page = NULL; 5183 swp_entry_t ent = pte_to_swp_entry(ptent); 5184 5185 if (!move_anon() || non_swap_entry(ent)) 5186 return NULL; 5187 usage_count = mem_cgroup_count_swap_user(ent, &page); 5188 if (usage_count > 1) { /* we don't move shared anon */ 5189 if (page) 5190 put_page(page); 5191 return NULL; 5192 } 5193 if (do_swap_account) 5194 entry->val = ent.val; 5195 5196 return page; 5197} 5198 5199static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5200 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5201{ 5202 struct page *page = NULL; 5203 struct inode *inode; 5204 struct address_space *mapping; 5205 pgoff_t pgoff; 5206 5207 if (!vma->vm_file) /* anonymous vma */ 5208 return NULL; 5209 if (!move_file()) 5210 return NULL; 5211 5212 inode = vma->vm_file->f_path.dentry->d_inode; 5213 mapping = vma->vm_file->f_mapping; 5214 if (pte_none(ptent)) 5215 pgoff = linear_page_index(vma, addr); 5216 else /* pte_file(ptent) is true */ 5217 pgoff = pte_to_pgoff(ptent); 5218 5219 /* page is moved even if it's not RSS of this task(page-faulted). */ 5220 page = find_get_page(mapping, pgoff); 5221 5222#ifdef CONFIG_SWAP 5223 /* shmem/tmpfs may report page out on swap: account for that too. */ 5224 if (radix_tree_exceptional_entry(page)) { 5225 swp_entry_t swap = radix_to_swp_entry(page); 5226 if (do_swap_account) 5227 *entry = swap; 5228 page = find_get_page(&swapper_space, swap.val); 5229 } 5230#endif 5231 return page; 5232} 5233 5234static int is_target_pte_for_mc(struct vm_area_struct *vma, 5235 unsigned long addr, pte_t ptent, union mc_target *target) 5236{ 5237 struct page *page = NULL; 5238 struct page_cgroup *pc; 5239 int ret = 0; 5240 swp_entry_t ent = { .val = 0 }; 5241 5242 if (pte_present(ptent)) 5243 page = mc_handle_present_pte(vma, addr, ptent); 5244 else if (is_swap_pte(ptent)) 5245 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 5246 else if (pte_none(ptent) || pte_file(ptent)) 5247 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5248 5249 if (!page && !ent.val) 5250 return 0; 5251 if (page) { 5252 pc = lookup_page_cgroup(page); 5253 /* 5254 * Do only loose check w/o page_cgroup lock. 5255 * mem_cgroup_move_account() checks the pc is valid or not under 5256 * the lock. 5257 */ 5258 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 5259 ret = MC_TARGET_PAGE; 5260 if (target) 5261 target->page = page; 5262 } 5263 if (!ret || !target) 5264 put_page(page); 5265 } 5266 /* There is a swap entry and a page doesn't exist or isn't charged */ 5267 if (ent.val && !ret && 5268 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { 5269 ret = MC_TARGET_SWAP; 5270 if (target) 5271 target->ent = ent; 5272 } 5273 return ret; 5274} 5275 5276static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5277 unsigned long addr, unsigned long end, 5278 struct mm_walk *walk) 5279{ 5280 struct vm_area_struct *vma = walk->private; 5281 pte_t *pte; 5282 spinlock_t *ptl; 5283 5284 split_huge_page_pmd(walk->mm, pmd); 5285 5286 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5287 for (; addr != end; pte++, addr += PAGE_SIZE) 5288 if (is_target_pte_for_mc(vma, addr, *pte, NULL)) 5289 mc.precharge++; /* increment precharge temporarily */ 5290 pte_unmap_unlock(pte - 1, ptl); 5291 cond_resched(); 5292 5293 return 0; 5294} 5295 5296static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5297{ 5298 unsigned long precharge; 5299 struct vm_area_struct *vma; 5300 5301 down_read(&mm->mmap_sem); 5302 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5303 struct mm_walk mem_cgroup_count_precharge_walk = { 5304 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5305 .mm = mm, 5306 .private = vma, 5307 }; 5308 if (is_vm_hugetlb_page(vma)) 5309 continue; 5310 walk_page_range(vma->vm_start, vma->vm_end, 5311 &mem_cgroup_count_precharge_walk); 5312 } 5313 up_read(&mm->mmap_sem); 5314 5315 precharge = mc.precharge; 5316 mc.precharge = 0; 5317 5318 return precharge; 5319} 5320 5321static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5322{ 5323 unsigned long precharge = mem_cgroup_count_precharge(mm); 5324 5325 VM_BUG_ON(mc.moving_task); 5326 mc.moving_task = current; 5327 return mem_cgroup_do_precharge(precharge); 5328} 5329 5330/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5331static void __mem_cgroup_clear_mc(void) 5332{ 5333 struct mem_cgroup *from = mc.from; 5334 struct mem_cgroup *to = mc.to; 5335 5336 /* we must uncharge all the leftover precharges from mc.to */ 5337 if (mc.precharge) { 5338 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 5339 mc.precharge = 0; 5340 } 5341 /* 5342 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5343 * we must uncharge here. 5344 */ 5345 if (mc.moved_charge) { 5346 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 5347 mc.moved_charge = 0; 5348 } 5349 /* we must fixup refcnts and charges */ 5350 if (mc.moved_swap) { 5351 /* uncharge swap account from the old cgroup */ 5352 if (!mem_cgroup_is_root(mc.from)) 5353 res_counter_uncharge(&mc.from->memsw, 5354 PAGE_SIZE * mc.moved_swap); 5355 __mem_cgroup_put(mc.from, mc.moved_swap); 5356 5357 if (!mem_cgroup_is_root(mc.to)) { 5358 /* 5359 * we charged both to->res and to->memsw, so we should 5360 * uncharge to->res. 5361 */ 5362 res_counter_uncharge(&mc.to->res, 5363 PAGE_SIZE * mc.moved_swap); 5364 } 5365 /* we've already done mem_cgroup_get(mc.to) */ 5366 mc.moved_swap = 0; 5367 } 5368 memcg_oom_recover(from); 5369 memcg_oom_recover(to); 5370 wake_up_all(&mc.waitq); 5371} 5372 5373static void mem_cgroup_clear_mc(void) 5374{ 5375 struct mem_cgroup *from = mc.from; 5376 5377 /* 5378 * we must clear moving_task before waking up waiters at the end of 5379 * task migration. 5380 */ 5381 mc.moving_task = NULL; 5382 __mem_cgroup_clear_mc(); 5383 spin_lock(&mc.lock); 5384 mc.from = NULL; 5385 mc.to = NULL; 5386 spin_unlock(&mc.lock); 5387 mem_cgroup_end_move(from); 5388} 5389 5390static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5391 struct cgroup *cgroup, 5392 struct cgroup_taskset *tset) 5393{ 5394 struct task_struct *p = cgroup_taskset_first(tset); 5395 int ret = 0; 5396 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup); 5397 5398 if (memcg->move_charge_at_immigrate) { 5399 struct mm_struct *mm; 5400 struct mem_cgroup *from = mem_cgroup_from_task(p); 5401 5402 VM_BUG_ON(from == memcg); 5403 5404 mm = get_task_mm(p); 5405 if (!mm) 5406 return 0; 5407 /* We move charges only when we move a owner of the mm */ 5408 if (mm->owner == p) { 5409 VM_BUG_ON(mc.from); 5410 VM_BUG_ON(mc.to); 5411 VM_BUG_ON(mc.precharge); 5412 VM_BUG_ON(mc.moved_charge); 5413 VM_BUG_ON(mc.moved_swap); 5414 mem_cgroup_start_move(from); 5415 spin_lock(&mc.lock); 5416 mc.from = from; 5417 mc.to = memcg; 5418 spin_unlock(&mc.lock); 5419 /* We set mc.moving_task later */ 5420 5421 ret = mem_cgroup_precharge_mc(mm); 5422 if (ret) 5423 mem_cgroup_clear_mc(); 5424 } 5425 mmput(mm); 5426 } 5427 return ret; 5428} 5429 5430static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5431 struct cgroup *cgroup, 5432 struct cgroup_taskset *tset) 5433{ 5434 mem_cgroup_clear_mc(); 5435} 5436 5437static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5438 unsigned long addr, unsigned long end, 5439 struct mm_walk *walk) 5440{ 5441 int ret = 0; 5442 struct vm_area_struct *vma = walk->private; 5443 pte_t *pte; 5444 spinlock_t *ptl; 5445 5446 split_huge_page_pmd(walk->mm, pmd); 5447retry: 5448 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5449 for (; addr != end; addr += PAGE_SIZE) { 5450 pte_t ptent = *(pte++); 5451 union mc_target target; 5452 int type; 5453 struct page *page; 5454 struct page_cgroup *pc; 5455 swp_entry_t ent; 5456 5457 if (!mc.precharge) 5458 break; 5459 5460 type = is_target_pte_for_mc(vma, addr, ptent, &target); 5461 switch (type) { 5462 case MC_TARGET_PAGE: 5463 page = target.page; 5464 if (isolate_lru_page(page)) 5465 goto put; 5466 pc = lookup_page_cgroup(page); 5467 if (!mem_cgroup_move_account(page, 1, pc, 5468 mc.from, mc.to, false)) { 5469 mc.precharge--; 5470 /* we uncharge from mc.from later. */ 5471 mc.moved_charge++; 5472 } 5473 putback_lru_page(page); 5474put: /* is_target_pte_for_mc() gets the page */ 5475 put_page(page); 5476 break; 5477 case MC_TARGET_SWAP: 5478 ent = target.ent; 5479 if (!mem_cgroup_move_swap_account(ent, 5480 mc.from, mc.to, false)) { 5481 mc.precharge--; 5482 /* we fixup refcnts and charges later. */ 5483 mc.moved_swap++; 5484 } 5485 break; 5486 default: 5487 break; 5488 } 5489 } 5490 pte_unmap_unlock(pte - 1, ptl); 5491 cond_resched(); 5492 5493 if (addr != end) { 5494 /* 5495 * We have consumed all precharges we got in can_attach(). 5496 * We try charge one by one, but don't do any additional 5497 * charges to mc.to if we have failed in charge once in attach() 5498 * phase. 5499 */ 5500 ret = mem_cgroup_do_precharge(1); 5501 if (!ret) 5502 goto retry; 5503 } 5504 5505 return ret; 5506} 5507 5508static void mem_cgroup_move_charge(struct mm_struct *mm) 5509{ 5510 struct vm_area_struct *vma; 5511 5512 lru_add_drain_all(); 5513retry: 5514 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5515 /* 5516 * Someone who are holding the mmap_sem might be waiting in 5517 * waitq. So we cancel all extra charges, wake up all waiters, 5518 * and retry. Because we cancel precharges, we might not be able 5519 * to move enough charges, but moving charge is a best-effort 5520 * feature anyway, so it wouldn't be a big problem. 5521 */ 5522 __mem_cgroup_clear_mc(); 5523 cond_resched(); 5524 goto retry; 5525 } 5526 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5527 int ret; 5528 struct mm_walk mem_cgroup_move_charge_walk = { 5529 .pmd_entry = mem_cgroup_move_charge_pte_range, 5530 .mm = mm, 5531 .private = vma, 5532 }; 5533 if (is_vm_hugetlb_page(vma)) 5534 continue; 5535 ret = walk_page_range(vma->vm_start, vma->vm_end, 5536 &mem_cgroup_move_charge_walk); 5537 if (ret) 5538 /* 5539 * means we have consumed all precharges and failed in 5540 * doing additional charge. Just abandon here. 5541 */ 5542 break; 5543 } 5544 up_read(&mm->mmap_sem); 5545} 5546 5547static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5548 struct cgroup *cont, 5549 struct cgroup_taskset *tset) 5550{ 5551 struct task_struct *p = cgroup_taskset_first(tset); 5552 struct mm_struct *mm = get_task_mm(p); 5553 5554 if (mm) { 5555 if (mc.to) 5556 mem_cgroup_move_charge(mm); 5557 put_swap_token(mm); 5558 mmput(mm); 5559 } 5560 if (mc.to) 5561 mem_cgroup_clear_mc(); 5562} 5563#else /* !CONFIG_MMU */ 5564static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5565 struct cgroup *cgroup, 5566 struct cgroup_taskset *tset) 5567{ 5568 return 0; 5569} 5570static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5571 struct cgroup *cgroup, 5572 struct cgroup_taskset *tset) 5573{ 5574} 5575static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5576 struct cgroup *cont, 5577 struct cgroup_taskset *tset) 5578{ 5579} 5580#endif 5581 5582struct cgroup_subsys mem_cgroup_subsys = { 5583 .name = "memory", 5584 .subsys_id = mem_cgroup_subsys_id, 5585 .create = mem_cgroup_create, 5586 .pre_destroy = mem_cgroup_pre_destroy, 5587 .destroy = mem_cgroup_destroy, 5588 .populate = mem_cgroup_populate, 5589 .can_attach = mem_cgroup_can_attach, 5590 .cancel_attach = mem_cgroup_cancel_attach, 5591 .attach = mem_cgroup_move_task, 5592 .early_init = 0, 5593 .use_id = 1, 5594}; 5595 5596#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 5597static int __init enable_swap_account(char *s) 5598{ 5599 /* consider enabled if no parameter or 1 is given */ 5600 if (!strcmp(s, "1")) 5601 really_do_swap_account = 1; 5602 else if (!strcmp(s, "0")) 5603 really_do_swap_account = 0; 5604 return 1; 5605} 5606__setup("swapaccount=", enable_swap_account); 5607 5608#endif 5609