memcontrol.c revision f53d7ce32e13dbd09573b176e6521a04c2c77803
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
27#include <linux/mm.h>
28#include <linux/hugetlb.h>
29#include <linux/pagemap.h>
30#include <linux/smp.h>
31#include <linux/page-flags.h>
32#include <linux/backing-dev.h>
33#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
35#include <linux/limits.h>
36#include <linux/export.h>
37#include <linux/mutex.h>
38#include <linux/rbtree.h>
39#include <linux/slab.h>
40#include <linux/swap.h>
41#include <linux/swapops.h>
42#include <linux/spinlock.h>
43#include <linux/eventfd.h>
44#include <linux/sort.h>
45#include <linux/fs.h>
46#include <linux/seq_file.h>
47#include <linux/vmalloc.h>
48#include <linux/mm_inline.h>
49#include <linux/page_cgroup.h>
50#include <linux/cpu.h>
51#include <linux/oom.h>
52#include "internal.h"
53#include <net/sock.h>
54#include <net/tcp_memcontrol.h>
55
56#include <asm/uaccess.h>
57
58#include <trace/events/vmscan.h>
59
60struct cgroup_subsys mem_cgroup_subsys __read_mostly;
61#define MEM_CGROUP_RECLAIM_RETRIES	5
62struct mem_cgroup *root_mem_cgroup __read_mostly;
63
64#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
65/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
66int do_swap_account __read_mostly;
67
68/* for remember boot option*/
69#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
70static int really_do_swap_account __initdata = 1;
71#else
72static int really_do_swap_account __initdata = 0;
73#endif
74
75#else
76#define do_swap_account		(0)
77#endif
78
79
80/*
81 * Statistics for memory cgroup.
82 */
83enum mem_cgroup_stat_index {
84	/*
85	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
86	 */
87	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
88	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
89	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
90	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
91	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
92	MEM_CGROUP_ON_MOVE,	/* someone is moving account between groups */
93	MEM_CGROUP_STAT_NSTATS,
94};
95
96enum mem_cgroup_events_index {
97	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
98	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
99	MEM_CGROUP_EVENTS_COUNT,	/* # of pages paged in/out */
100	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
101	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
102	MEM_CGROUP_EVENTS_NSTATS,
103};
104/*
105 * Per memcg event counter is incremented at every pagein/pageout. With THP,
106 * it will be incremated by the number of pages. This counter is used for
107 * for trigger some periodic events. This is straightforward and better
108 * than using jiffies etc. to handle periodic memcg event.
109 */
110enum mem_cgroup_events_target {
111	MEM_CGROUP_TARGET_THRESH,
112	MEM_CGROUP_TARGET_SOFTLIMIT,
113	MEM_CGROUP_TARGET_NUMAINFO,
114	MEM_CGROUP_NTARGETS,
115};
116#define THRESHOLDS_EVENTS_TARGET (128)
117#define SOFTLIMIT_EVENTS_TARGET (1024)
118#define NUMAINFO_EVENTS_TARGET	(1024)
119
120struct mem_cgroup_stat_cpu {
121	long count[MEM_CGROUP_STAT_NSTATS];
122	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
123	unsigned long targets[MEM_CGROUP_NTARGETS];
124};
125
126struct mem_cgroup_reclaim_iter {
127	/* css_id of the last scanned hierarchy member */
128	int position;
129	/* scan generation, increased every round-trip */
130	unsigned int generation;
131};
132
133/*
134 * per-zone information in memory controller.
135 */
136struct mem_cgroup_per_zone {
137	struct lruvec		lruvec;
138	unsigned long		count[NR_LRU_LISTS];
139
140	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
141
142	struct zone_reclaim_stat reclaim_stat;
143	struct rb_node		tree_node;	/* RB tree node */
144	unsigned long long	usage_in_excess;/* Set to the value by which */
145						/* the soft limit is exceeded*/
146	bool			on_tree;
147	struct mem_cgroup	*mem;		/* Back pointer, we cannot */
148						/* use container_of	   */
149};
150/* Macro for accessing counter */
151#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
152
153struct mem_cgroup_per_node {
154	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
155};
156
157struct mem_cgroup_lru_info {
158	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
159};
160
161/*
162 * Cgroups above their limits are maintained in a RB-Tree, independent of
163 * their hierarchy representation
164 */
165
166struct mem_cgroup_tree_per_zone {
167	struct rb_root rb_root;
168	spinlock_t lock;
169};
170
171struct mem_cgroup_tree_per_node {
172	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
173};
174
175struct mem_cgroup_tree {
176	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
177};
178
179static struct mem_cgroup_tree soft_limit_tree __read_mostly;
180
181struct mem_cgroup_threshold {
182	struct eventfd_ctx *eventfd;
183	u64 threshold;
184};
185
186/* For threshold */
187struct mem_cgroup_threshold_ary {
188	/* An array index points to threshold just below usage. */
189	int current_threshold;
190	/* Size of entries[] */
191	unsigned int size;
192	/* Array of thresholds */
193	struct mem_cgroup_threshold entries[0];
194};
195
196struct mem_cgroup_thresholds {
197	/* Primary thresholds array */
198	struct mem_cgroup_threshold_ary *primary;
199	/*
200	 * Spare threshold array.
201	 * This is needed to make mem_cgroup_unregister_event() "never fail".
202	 * It must be able to store at least primary->size - 1 entries.
203	 */
204	struct mem_cgroup_threshold_ary *spare;
205};
206
207/* for OOM */
208struct mem_cgroup_eventfd_list {
209	struct list_head list;
210	struct eventfd_ctx *eventfd;
211};
212
213static void mem_cgroup_threshold(struct mem_cgroup *memcg);
214static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
215
216/*
217 * The memory controller data structure. The memory controller controls both
218 * page cache and RSS per cgroup. We would eventually like to provide
219 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
220 * to help the administrator determine what knobs to tune.
221 *
222 * TODO: Add a water mark for the memory controller. Reclaim will begin when
223 * we hit the water mark. May be even add a low water mark, such that
224 * no reclaim occurs from a cgroup at it's low water mark, this is
225 * a feature that will be implemented much later in the future.
226 */
227struct mem_cgroup {
228	struct cgroup_subsys_state css;
229	/*
230	 * the counter to account for memory usage
231	 */
232	struct res_counter res;
233	/*
234	 * the counter to account for mem+swap usage.
235	 */
236	struct res_counter memsw;
237	/*
238	 * Per cgroup active and inactive list, similar to the
239	 * per zone LRU lists.
240	 */
241	struct mem_cgroup_lru_info info;
242	int last_scanned_node;
243#if MAX_NUMNODES > 1
244	nodemask_t	scan_nodes;
245	atomic_t	numainfo_events;
246	atomic_t	numainfo_updating;
247#endif
248	/*
249	 * Should the accounting and control be hierarchical, per subtree?
250	 */
251	bool use_hierarchy;
252
253	bool		oom_lock;
254	atomic_t	under_oom;
255
256	atomic_t	refcnt;
257
258	int	swappiness;
259	/* OOM-Killer disable */
260	int		oom_kill_disable;
261
262	/* set when res.limit == memsw.limit */
263	bool		memsw_is_minimum;
264
265	/* protect arrays of thresholds */
266	struct mutex thresholds_lock;
267
268	/* thresholds for memory usage. RCU-protected */
269	struct mem_cgroup_thresholds thresholds;
270
271	/* thresholds for mem+swap usage. RCU-protected */
272	struct mem_cgroup_thresholds memsw_thresholds;
273
274	/* For oom notifier event fd */
275	struct list_head oom_notify;
276
277	/*
278	 * Should we move charges of a task when a task is moved into this
279	 * mem_cgroup ? And what type of charges should we move ?
280	 */
281	unsigned long 	move_charge_at_immigrate;
282	/*
283	 * percpu counter.
284	 */
285	struct mem_cgroup_stat_cpu *stat;
286	/*
287	 * used when a cpu is offlined or other synchronizations
288	 * See mem_cgroup_read_stat().
289	 */
290	struct mem_cgroup_stat_cpu nocpu_base;
291	spinlock_t pcp_counter_lock;
292
293#ifdef CONFIG_INET
294	struct tcp_memcontrol tcp_mem;
295#endif
296};
297
298/* Stuffs for move charges at task migration. */
299/*
300 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
301 * left-shifted bitmap of these types.
302 */
303enum move_type {
304	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
305	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
306	NR_MOVE_TYPE,
307};
308
309/* "mc" and its members are protected by cgroup_mutex */
310static struct move_charge_struct {
311	spinlock_t	  lock; /* for from, to */
312	struct mem_cgroup *from;
313	struct mem_cgroup *to;
314	unsigned long precharge;
315	unsigned long moved_charge;
316	unsigned long moved_swap;
317	struct task_struct *moving_task;	/* a task moving charges */
318	wait_queue_head_t waitq;		/* a waitq for other context */
319} mc = {
320	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
321	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
322};
323
324static bool move_anon(void)
325{
326	return test_bit(MOVE_CHARGE_TYPE_ANON,
327					&mc.to->move_charge_at_immigrate);
328}
329
330static bool move_file(void)
331{
332	return test_bit(MOVE_CHARGE_TYPE_FILE,
333					&mc.to->move_charge_at_immigrate);
334}
335
336/*
337 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
338 * limit reclaim to prevent infinite loops, if they ever occur.
339 */
340#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
341#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)
342
343enum charge_type {
344	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
345	MEM_CGROUP_CHARGE_TYPE_MAPPED,
346	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
347	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
348	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
349	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
350	NR_CHARGE_TYPE,
351};
352
353/* for encoding cft->private value on file */
354#define _MEM			(0)
355#define _MEMSWAP		(1)
356#define _OOM_TYPE		(2)
357#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
358#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
359#define MEMFILE_ATTR(val)	((val) & 0xffff)
360/* Used for OOM nofiier */
361#define OOM_CONTROL		(0)
362
363/*
364 * Reclaim flags for mem_cgroup_hierarchical_reclaim
365 */
366#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
367#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
368#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
369#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
370
371static void mem_cgroup_get(struct mem_cgroup *memcg);
372static void mem_cgroup_put(struct mem_cgroup *memcg);
373
374/* Writing them here to avoid exposing memcg's inner layout */
375#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
376#ifdef CONFIG_INET
377#include <net/sock.h>
378#include <net/ip.h>
379
380static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
381void sock_update_memcg(struct sock *sk)
382{
383	if (static_branch(&memcg_socket_limit_enabled)) {
384		struct mem_cgroup *memcg;
385
386		BUG_ON(!sk->sk_prot->proto_cgroup);
387
388		/* Socket cloning can throw us here with sk_cgrp already
389		 * filled. It won't however, necessarily happen from
390		 * process context. So the test for root memcg given
391		 * the current task's memcg won't help us in this case.
392		 *
393		 * Respecting the original socket's memcg is a better
394		 * decision in this case.
395		 */
396		if (sk->sk_cgrp) {
397			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
398			mem_cgroup_get(sk->sk_cgrp->memcg);
399			return;
400		}
401
402		rcu_read_lock();
403		memcg = mem_cgroup_from_task(current);
404		if (!mem_cgroup_is_root(memcg)) {
405			mem_cgroup_get(memcg);
406			sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
407		}
408		rcu_read_unlock();
409	}
410}
411EXPORT_SYMBOL(sock_update_memcg);
412
413void sock_release_memcg(struct sock *sk)
414{
415	if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
416		struct mem_cgroup *memcg;
417		WARN_ON(!sk->sk_cgrp->memcg);
418		memcg = sk->sk_cgrp->memcg;
419		mem_cgroup_put(memcg);
420	}
421}
422
423struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
424{
425	if (!memcg || mem_cgroup_is_root(memcg))
426		return NULL;
427
428	return &memcg->tcp_mem.cg_proto;
429}
430EXPORT_SYMBOL(tcp_proto_cgroup);
431#endif /* CONFIG_INET */
432#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
433
434static void drain_all_stock_async(struct mem_cgroup *memcg);
435
436static struct mem_cgroup_per_zone *
437mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
438{
439	return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
440}
441
442struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
443{
444	return &memcg->css;
445}
446
447static struct mem_cgroup_per_zone *
448page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
449{
450	int nid = page_to_nid(page);
451	int zid = page_zonenum(page);
452
453	return mem_cgroup_zoneinfo(memcg, nid, zid);
454}
455
456static struct mem_cgroup_tree_per_zone *
457soft_limit_tree_node_zone(int nid, int zid)
458{
459	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
460}
461
462static struct mem_cgroup_tree_per_zone *
463soft_limit_tree_from_page(struct page *page)
464{
465	int nid = page_to_nid(page);
466	int zid = page_zonenum(page);
467
468	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
469}
470
471static void
472__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
473				struct mem_cgroup_per_zone *mz,
474				struct mem_cgroup_tree_per_zone *mctz,
475				unsigned long long new_usage_in_excess)
476{
477	struct rb_node **p = &mctz->rb_root.rb_node;
478	struct rb_node *parent = NULL;
479	struct mem_cgroup_per_zone *mz_node;
480
481	if (mz->on_tree)
482		return;
483
484	mz->usage_in_excess = new_usage_in_excess;
485	if (!mz->usage_in_excess)
486		return;
487	while (*p) {
488		parent = *p;
489		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
490					tree_node);
491		if (mz->usage_in_excess < mz_node->usage_in_excess)
492			p = &(*p)->rb_left;
493		/*
494		 * We can't avoid mem cgroups that are over their soft
495		 * limit by the same amount
496		 */
497		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
498			p = &(*p)->rb_right;
499	}
500	rb_link_node(&mz->tree_node, parent, p);
501	rb_insert_color(&mz->tree_node, &mctz->rb_root);
502	mz->on_tree = true;
503}
504
505static void
506__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
507				struct mem_cgroup_per_zone *mz,
508				struct mem_cgroup_tree_per_zone *mctz)
509{
510	if (!mz->on_tree)
511		return;
512	rb_erase(&mz->tree_node, &mctz->rb_root);
513	mz->on_tree = false;
514}
515
516static void
517mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
518				struct mem_cgroup_per_zone *mz,
519				struct mem_cgroup_tree_per_zone *mctz)
520{
521	spin_lock(&mctz->lock);
522	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
523	spin_unlock(&mctz->lock);
524}
525
526
527static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
528{
529	unsigned long long excess;
530	struct mem_cgroup_per_zone *mz;
531	struct mem_cgroup_tree_per_zone *mctz;
532	int nid = page_to_nid(page);
533	int zid = page_zonenum(page);
534	mctz = soft_limit_tree_from_page(page);
535
536	/*
537	 * Necessary to update all ancestors when hierarchy is used.
538	 * because their event counter is not touched.
539	 */
540	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
541		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
542		excess = res_counter_soft_limit_excess(&memcg->res);
543		/*
544		 * We have to update the tree if mz is on RB-tree or
545		 * mem is over its softlimit.
546		 */
547		if (excess || mz->on_tree) {
548			spin_lock(&mctz->lock);
549			/* if on-tree, remove it */
550			if (mz->on_tree)
551				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
552			/*
553			 * Insert again. mz->usage_in_excess will be updated.
554			 * If excess is 0, no tree ops.
555			 */
556			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
557			spin_unlock(&mctz->lock);
558		}
559	}
560}
561
562static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
563{
564	int node, zone;
565	struct mem_cgroup_per_zone *mz;
566	struct mem_cgroup_tree_per_zone *mctz;
567
568	for_each_node_state(node, N_POSSIBLE) {
569		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
570			mz = mem_cgroup_zoneinfo(memcg, node, zone);
571			mctz = soft_limit_tree_node_zone(node, zone);
572			mem_cgroup_remove_exceeded(memcg, mz, mctz);
573		}
574	}
575}
576
577static struct mem_cgroup_per_zone *
578__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
579{
580	struct rb_node *rightmost = NULL;
581	struct mem_cgroup_per_zone *mz;
582
583retry:
584	mz = NULL;
585	rightmost = rb_last(&mctz->rb_root);
586	if (!rightmost)
587		goto done;		/* Nothing to reclaim from */
588
589	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
590	/*
591	 * Remove the node now but someone else can add it back,
592	 * we will to add it back at the end of reclaim to its correct
593	 * position in the tree.
594	 */
595	__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
596	if (!res_counter_soft_limit_excess(&mz->mem->res) ||
597		!css_tryget(&mz->mem->css))
598		goto retry;
599done:
600	return mz;
601}
602
603static struct mem_cgroup_per_zone *
604mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
605{
606	struct mem_cgroup_per_zone *mz;
607
608	spin_lock(&mctz->lock);
609	mz = __mem_cgroup_largest_soft_limit_node(mctz);
610	spin_unlock(&mctz->lock);
611	return mz;
612}
613
614/*
615 * Implementation Note: reading percpu statistics for memcg.
616 *
617 * Both of vmstat[] and percpu_counter has threshold and do periodic
618 * synchronization to implement "quick" read. There are trade-off between
619 * reading cost and precision of value. Then, we may have a chance to implement
620 * a periodic synchronizion of counter in memcg's counter.
621 *
622 * But this _read() function is used for user interface now. The user accounts
623 * memory usage by memory cgroup and he _always_ requires exact value because
624 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
625 * have to visit all online cpus and make sum. So, for now, unnecessary
626 * synchronization is not implemented. (just implemented for cpu hotplug)
627 *
628 * If there are kernel internal actions which can make use of some not-exact
629 * value, and reading all cpu value can be performance bottleneck in some
630 * common workload, threashold and synchonization as vmstat[] should be
631 * implemented.
632 */
633static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
634				 enum mem_cgroup_stat_index idx)
635{
636	long val = 0;
637	int cpu;
638
639	get_online_cpus();
640	for_each_online_cpu(cpu)
641		val += per_cpu(memcg->stat->count[idx], cpu);
642#ifdef CONFIG_HOTPLUG_CPU
643	spin_lock(&memcg->pcp_counter_lock);
644	val += memcg->nocpu_base.count[idx];
645	spin_unlock(&memcg->pcp_counter_lock);
646#endif
647	put_online_cpus();
648	return val;
649}
650
651static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
652					 bool charge)
653{
654	int val = (charge) ? 1 : -1;
655	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
656}
657
658void mem_cgroup_pgfault(struct mem_cgroup *memcg, int val)
659{
660	this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
661}
662
663void mem_cgroup_pgmajfault(struct mem_cgroup *memcg, int val)
664{
665	this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
666}
667
668static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
669					    enum mem_cgroup_events_index idx)
670{
671	unsigned long val = 0;
672	int cpu;
673
674	for_each_online_cpu(cpu)
675		val += per_cpu(memcg->stat->events[idx], cpu);
676#ifdef CONFIG_HOTPLUG_CPU
677	spin_lock(&memcg->pcp_counter_lock);
678	val += memcg->nocpu_base.events[idx];
679	spin_unlock(&memcg->pcp_counter_lock);
680#endif
681	return val;
682}
683
684static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
685					 bool file, int nr_pages)
686{
687	preempt_disable();
688
689	if (file)
690		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
691				nr_pages);
692	else
693		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
694				nr_pages);
695
696	/* pagein of a big page is an event. So, ignore page size */
697	if (nr_pages > 0)
698		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
699	else {
700		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
701		nr_pages = -nr_pages; /* for event */
702	}
703
704	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
705
706	preempt_enable();
707}
708
709unsigned long
710mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
711			unsigned int lru_mask)
712{
713	struct mem_cgroup_per_zone *mz;
714	enum lru_list l;
715	unsigned long ret = 0;
716
717	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
718
719	for_each_lru(l) {
720		if (BIT(l) & lru_mask)
721			ret += MEM_CGROUP_ZSTAT(mz, l);
722	}
723	return ret;
724}
725
726static unsigned long
727mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
728			int nid, unsigned int lru_mask)
729{
730	u64 total = 0;
731	int zid;
732
733	for (zid = 0; zid < MAX_NR_ZONES; zid++)
734		total += mem_cgroup_zone_nr_lru_pages(memcg,
735						nid, zid, lru_mask);
736
737	return total;
738}
739
740static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
741			unsigned int lru_mask)
742{
743	int nid;
744	u64 total = 0;
745
746	for_each_node_state(nid, N_HIGH_MEMORY)
747		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
748	return total;
749}
750
751static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
752				       enum mem_cgroup_events_target target)
753{
754	unsigned long val, next;
755
756	val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
757	next = __this_cpu_read(memcg->stat->targets[target]);
758	/* from time_after() in jiffies.h */
759	if ((long)next - (long)val < 0) {
760		switch (target) {
761		case MEM_CGROUP_TARGET_THRESH:
762			next = val + THRESHOLDS_EVENTS_TARGET;
763			break;
764		case MEM_CGROUP_TARGET_SOFTLIMIT:
765			next = val + SOFTLIMIT_EVENTS_TARGET;
766			break;
767		case MEM_CGROUP_TARGET_NUMAINFO:
768			next = val + NUMAINFO_EVENTS_TARGET;
769			break;
770		default:
771			break;
772		}
773		__this_cpu_write(memcg->stat->targets[target], next);
774		return true;
775	}
776	return false;
777}
778
779/*
780 * Check events in order.
781 *
782 */
783static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
784{
785	preempt_disable();
786	/* threshold event is triggered in finer grain than soft limit */
787	if (unlikely(mem_cgroup_event_ratelimit(memcg,
788						MEM_CGROUP_TARGET_THRESH))) {
789		bool do_softlimit, do_numainfo;
790
791		do_softlimit = mem_cgroup_event_ratelimit(memcg,
792						MEM_CGROUP_TARGET_SOFTLIMIT);
793#if MAX_NUMNODES > 1
794		do_numainfo = mem_cgroup_event_ratelimit(memcg,
795						MEM_CGROUP_TARGET_NUMAINFO);
796#endif
797		preempt_enable();
798
799		mem_cgroup_threshold(memcg);
800		if (unlikely(do_softlimit))
801			mem_cgroup_update_tree(memcg, page);
802#if MAX_NUMNODES > 1
803		if (unlikely(do_numainfo))
804			atomic_inc(&memcg->numainfo_events);
805#endif
806	} else
807		preempt_enable();
808}
809
810struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
811{
812	return container_of(cgroup_subsys_state(cont,
813				mem_cgroup_subsys_id), struct mem_cgroup,
814				css);
815}
816
817struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
818{
819	/*
820	 * mm_update_next_owner() may clear mm->owner to NULL
821	 * if it races with swapoff, page migration, etc.
822	 * So this can be called with p == NULL.
823	 */
824	if (unlikely(!p))
825		return NULL;
826
827	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
828				struct mem_cgroup, css);
829}
830
831struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
832{
833	struct mem_cgroup *memcg = NULL;
834
835	if (!mm)
836		return NULL;
837	/*
838	 * Because we have no locks, mm->owner's may be being moved to other
839	 * cgroup. We use css_tryget() here even if this looks
840	 * pessimistic (rather than adding locks here).
841	 */
842	rcu_read_lock();
843	do {
844		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
845		if (unlikely(!memcg))
846			break;
847	} while (!css_tryget(&memcg->css));
848	rcu_read_unlock();
849	return memcg;
850}
851
852/**
853 * mem_cgroup_iter - iterate over memory cgroup hierarchy
854 * @root: hierarchy root
855 * @prev: previously returned memcg, NULL on first invocation
856 * @reclaim: cookie for shared reclaim walks, NULL for full walks
857 *
858 * Returns references to children of the hierarchy below @root, or
859 * @root itself, or %NULL after a full round-trip.
860 *
861 * Caller must pass the return value in @prev on subsequent
862 * invocations for reference counting, or use mem_cgroup_iter_break()
863 * to cancel a hierarchy walk before the round-trip is complete.
864 *
865 * Reclaimers can specify a zone and a priority level in @reclaim to
866 * divide up the memcgs in the hierarchy among all concurrent
867 * reclaimers operating on the same zone and priority.
868 */
869struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
870				   struct mem_cgroup *prev,
871				   struct mem_cgroup_reclaim_cookie *reclaim)
872{
873	struct mem_cgroup *memcg = NULL;
874	int id = 0;
875
876	if (mem_cgroup_disabled())
877		return NULL;
878
879	if (!root)
880		root = root_mem_cgroup;
881
882	if (prev && !reclaim)
883		id = css_id(&prev->css);
884
885	if (prev && prev != root)
886		css_put(&prev->css);
887
888	if (!root->use_hierarchy && root != root_mem_cgroup) {
889		if (prev)
890			return NULL;
891		return root;
892	}
893
894	while (!memcg) {
895		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
896		struct cgroup_subsys_state *css;
897
898		if (reclaim) {
899			int nid = zone_to_nid(reclaim->zone);
900			int zid = zone_idx(reclaim->zone);
901			struct mem_cgroup_per_zone *mz;
902
903			mz = mem_cgroup_zoneinfo(root, nid, zid);
904			iter = &mz->reclaim_iter[reclaim->priority];
905			if (prev && reclaim->generation != iter->generation)
906				return NULL;
907			id = iter->position;
908		}
909
910		rcu_read_lock();
911		css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
912		if (css) {
913			if (css == &root->css || css_tryget(css))
914				memcg = container_of(css,
915						     struct mem_cgroup, css);
916		} else
917			id = 0;
918		rcu_read_unlock();
919
920		if (reclaim) {
921			iter->position = id;
922			if (!css)
923				iter->generation++;
924			else if (!prev && memcg)
925				reclaim->generation = iter->generation;
926		}
927
928		if (prev && !css)
929			return NULL;
930	}
931	return memcg;
932}
933
934/**
935 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
936 * @root: hierarchy root
937 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
938 */
939void mem_cgroup_iter_break(struct mem_cgroup *root,
940			   struct mem_cgroup *prev)
941{
942	if (!root)
943		root = root_mem_cgroup;
944	if (prev && prev != root)
945		css_put(&prev->css);
946}
947
948/*
949 * Iteration constructs for visiting all cgroups (under a tree).  If
950 * loops are exited prematurely (break), mem_cgroup_iter_break() must
951 * be used for reference counting.
952 */
953#define for_each_mem_cgroup_tree(iter, root)		\
954	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
955	     iter != NULL;				\
956	     iter = mem_cgroup_iter(root, iter, NULL))
957
958#define for_each_mem_cgroup(iter)			\
959	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
960	     iter != NULL;				\
961	     iter = mem_cgroup_iter(NULL, iter, NULL))
962
963static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
964{
965	return (memcg == root_mem_cgroup);
966}
967
968void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
969{
970	struct mem_cgroup *memcg;
971
972	if (!mm)
973		return;
974
975	rcu_read_lock();
976	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
977	if (unlikely(!memcg))
978		goto out;
979
980	switch (idx) {
981	case PGMAJFAULT:
982		mem_cgroup_pgmajfault(memcg, 1);
983		break;
984	case PGFAULT:
985		mem_cgroup_pgfault(memcg, 1);
986		break;
987	default:
988		BUG();
989	}
990out:
991	rcu_read_unlock();
992}
993EXPORT_SYMBOL(mem_cgroup_count_vm_event);
994
995/**
996 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
997 * @zone: zone of the wanted lruvec
998 * @mem: memcg of the wanted lruvec
999 *
1000 * Returns the lru list vector holding pages for the given @zone and
1001 * @mem.  This can be the global zone lruvec, if the memory controller
1002 * is disabled.
1003 */
1004struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1005				      struct mem_cgroup *memcg)
1006{
1007	struct mem_cgroup_per_zone *mz;
1008
1009	if (mem_cgroup_disabled())
1010		return &zone->lruvec;
1011
1012	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1013	return &mz->lruvec;
1014}
1015
1016/*
1017 * Following LRU functions are allowed to be used without PCG_LOCK.
1018 * Operations are called by routine of global LRU independently from memcg.
1019 * What we have to take care of here is validness of pc->mem_cgroup.
1020 *
1021 * Changes to pc->mem_cgroup happens when
1022 * 1. charge
1023 * 2. moving account
1024 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1025 * It is added to LRU before charge.
1026 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1027 * When moving account, the page is not on LRU. It's isolated.
1028 */
1029
1030/**
1031 * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
1032 * @zone: zone of the page
1033 * @page: the page
1034 * @lru: current lru
1035 *
1036 * This function accounts for @page being added to @lru, and returns
1037 * the lruvec for the given @zone and the memcg @page is charged to.
1038 *
1039 * The callsite is then responsible for physically linking the page to
1040 * the returned lruvec->lists[@lru].
1041 */
1042struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1043				       enum lru_list lru)
1044{
1045	struct mem_cgroup_per_zone *mz;
1046	struct mem_cgroup *memcg;
1047	struct page_cgroup *pc;
1048
1049	if (mem_cgroup_disabled())
1050		return &zone->lruvec;
1051
1052	pc = lookup_page_cgroup(page);
1053	VM_BUG_ON(PageCgroupAcctLRU(pc));
1054	/*
1055	 * putback:				charge:
1056	 * SetPageLRU				SetPageCgroupUsed
1057	 * smp_mb				smp_mb
1058	 * PageCgroupUsed && add to memcg LRU	PageLRU && add to memcg LRU
1059	 *
1060	 * Ensure that one of the two sides adds the page to the memcg
1061	 * LRU during a race.
1062	 */
1063	smp_mb();
1064	/*
1065	 * If the page is uncharged, it may be freed soon, but it
1066	 * could also be swap cache (readahead, swapoff) that needs to
1067	 * be reclaimable in the future.  root_mem_cgroup will babysit
1068	 * it for the time being.
1069	 */
1070	if (PageCgroupUsed(pc)) {
1071		/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1072		smp_rmb();
1073		memcg = pc->mem_cgroup;
1074		SetPageCgroupAcctLRU(pc);
1075	} else
1076		memcg = root_mem_cgroup;
1077	mz = page_cgroup_zoneinfo(memcg, page);
1078	/* compound_order() is stabilized through lru_lock */
1079	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1080	return &mz->lruvec;
1081}
1082
1083/**
1084 * mem_cgroup_lru_del_list - account for removing an lru page
1085 * @page: the page
1086 * @lru: target lru
1087 *
1088 * This function accounts for @page being removed from @lru.
1089 *
1090 * The callsite is then responsible for physically unlinking
1091 * @page->lru.
1092 */
1093void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
1094{
1095	struct mem_cgroup_per_zone *mz;
1096	struct mem_cgroup *memcg;
1097	struct page_cgroup *pc;
1098
1099	if (mem_cgroup_disabled())
1100		return;
1101
1102	pc = lookup_page_cgroup(page);
1103	/*
1104	 * root_mem_cgroup babysits uncharged LRU pages, but
1105	 * PageCgroupUsed is cleared when the page is about to get
1106	 * freed.  PageCgroupAcctLRU remembers whether the
1107	 * LRU-accounting happened against pc->mem_cgroup or
1108	 * root_mem_cgroup.
1109	 */
1110	if (TestClearPageCgroupAcctLRU(pc)) {
1111		VM_BUG_ON(!pc->mem_cgroup);
1112		memcg = pc->mem_cgroup;
1113	} else
1114		memcg = root_mem_cgroup;
1115	mz = page_cgroup_zoneinfo(memcg, page);
1116	/* huge page split is done under lru_lock. so, we have no races. */
1117	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
1118}
1119
1120void mem_cgroup_lru_del(struct page *page)
1121{
1122	mem_cgroup_lru_del_list(page, page_lru(page));
1123}
1124
1125/**
1126 * mem_cgroup_lru_move_lists - account for moving a page between lrus
1127 * @zone: zone of the page
1128 * @page: the page
1129 * @from: current lru
1130 * @to: target lru
1131 *
1132 * This function accounts for @page being moved between the lrus @from
1133 * and @to, and returns the lruvec for the given @zone and the memcg
1134 * @page is charged to.
1135 *
1136 * The callsite is then responsible for physically relinking
1137 * @page->lru to the returned lruvec->lists[@to].
1138 */
1139struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
1140					 struct page *page,
1141					 enum lru_list from,
1142					 enum lru_list to)
1143{
1144	/* XXX: Optimize this, especially for @from == @to */
1145	mem_cgroup_lru_del_list(page, from);
1146	return mem_cgroup_lru_add_list(zone, page, to);
1147}
1148
1149/*
1150 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
1151 * while it's linked to lru because the page may be reused after it's fully
1152 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
1153 * It's done under lock_page and expected that zone->lru_lock isnever held.
1154 */
1155static void mem_cgroup_lru_del_before_commit(struct page *page)
1156{
1157	enum lru_list lru;
1158	unsigned long flags;
1159	struct zone *zone = page_zone(page);
1160	struct page_cgroup *pc = lookup_page_cgroup(page);
1161
1162	/*
1163	 * Doing this check without taking ->lru_lock seems wrong but this
1164	 * is safe. Because if page_cgroup's USED bit is unset, the page
1165	 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
1166	 * set, the commit after this will fail, anyway.
1167	 * This all charge/uncharge is done under some mutual execustion.
1168	 * So, we don't need to taking care of changes in USED bit.
1169	 */
1170	if (likely(!PageLRU(page)))
1171		return;
1172
1173	spin_lock_irqsave(&zone->lru_lock, flags);
1174	lru = page_lru(page);
1175	/*
1176	 * The uncharged page could still be registered to the LRU of
1177	 * the stale pc->mem_cgroup.
1178	 *
1179	 * As pc->mem_cgroup is about to get overwritten, the old LRU
1180	 * accounting needs to be taken care of.  Let root_mem_cgroup
1181	 * babysit the page until the new memcg is responsible for it.
1182	 *
1183	 * The PCG_USED bit is guarded by lock_page() as the page is
1184	 * swapcache/pagecache.
1185	 */
1186	if (PageLRU(page) && PageCgroupAcctLRU(pc) && !PageCgroupUsed(pc)) {
1187		del_page_from_lru_list(zone, page, lru);
1188		add_page_to_lru_list(zone, page, lru);
1189	}
1190	spin_unlock_irqrestore(&zone->lru_lock, flags);
1191}
1192
1193static void mem_cgroup_lru_add_after_commit(struct page *page)
1194{
1195	enum lru_list lru;
1196	unsigned long flags;
1197	struct zone *zone = page_zone(page);
1198	struct page_cgroup *pc = lookup_page_cgroup(page);
1199	/*
1200	 * putback:				charge:
1201	 * SetPageLRU				SetPageCgroupUsed
1202	 * smp_mb				smp_mb
1203	 * PageCgroupUsed && add to memcg LRU	PageLRU && add to memcg LRU
1204	 *
1205	 * Ensure that one of the two sides adds the page to the memcg
1206	 * LRU during a race.
1207	 */
1208	smp_mb();
1209	/* taking care of that the page is added to LRU while we commit it */
1210	if (likely(!PageLRU(page)))
1211		return;
1212	spin_lock_irqsave(&zone->lru_lock, flags);
1213	lru = page_lru(page);
1214	/*
1215	 * If the page is not on the LRU, someone will soon put it
1216	 * there.  If it is, and also already accounted for on the
1217	 * memcg-side, it must be on the right lruvec as setting
1218	 * pc->mem_cgroup and PageCgroupUsed is properly ordered.
1219	 * Otherwise, root_mem_cgroup has been babysitting the page
1220	 * during the charge.  Move it to the new memcg now.
1221	 */
1222	if (PageLRU(page) && !PageCgroupAcctLRU(pc)) {
1223		del_page_from_lru_list(zone, page, lru);
1224		add_page_to_lru_list(zone, page, lru);
1225	}
1226	spin_unlock_irqrestore(&zone->lru_lock, flags);
1227}
1228
1229/*
1230 * Checks whether given mem is same or in the root_mem_cgroup's
1231 * hierarchy subtree
1232 */
1233static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1234		struct mem_cgroup *memcg)
1235{
1236	if (root_memcg != memcg) {
1237		return (root_memcg->use_hierarchy &&
1238			css_is_ancestor(&memcg->css, &root_memcg->css));
1239	}
1240
1241	return true;
1242}
1243
1244int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
1245{
1246	int ret;
1247	struct mem_cgroup *curr = NULL;
1248	struct task_struct *p;
1249
1250	p = find_lock_task_mm(task);
1251	if (!p)
1252		return 0;
1253	curr = try_get_mem_cgroup_from_mm(p->mm);
1254	task_unlock(p);
1255	if (!curr)
1256		return 0;
1257	/*
1258	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1259	 * use_hierarchy of "curr" here make this function true if hierarchy is
1260	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1261	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1262	 */
1263	ret = mem_cgroup_same_or_subtree(memcg, curr);
1264	css_put(&curr->css);
1265	return ret;
1266}
1267
1268int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
1269{
1270	unsigned long inactive_ratio;
1271	int nid = zone_to_nid(zone);
1272	int zid = zone_idx(zone);
1273	unsigned long inactive;
1274	unsigned long active;
1275	unsigned long gb;
1276
1277	inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1278						BIT(LRU_INACTIVE_ANON));
1279	active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1280					      BIT(LRU_ACTIVE_ANON));
1281
1282	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1283	if (gb)
1284		inactive_ratio = int_sqrt(10 * gb);
1285	else
1286		inactive_ratio = 1;
1287
1288	return inactive * inactive_ratio < active;
1289}
1290
1291int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
1292{
1293	unsigned long active;
1294	unsigned long inactive;
1295	int zid = zone_idx(zone);
1296	int nid = zone_to_nid(zone);
1297
1298	inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1299						BIT(LRU_INACTIVE_FILE));
1300	active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1301					      BIT(LRU_ACTIVE_FILE));
1302
1303	return (active > inactive);
1304}
1305
1306struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1307						      struct zone *zone)
1308{
1309	int nid = zone_to_nid(zone);
1310	int zid = zone_idx(zone);
1311	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1312
1313	return &mz->reclaim_stat;
1314}
1315
1316struct zone_reclaim_stat *
1317mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1318{
1319	struct page_cgroup *pc;
1320	struct mem_cgroup_per_zone *mz;
1321
1322	if (mem_cgroup_disabled())
1323		return NULL;
1324
1325	pc = lookup_page_cgroup(page);
1326	if (!PageCgroupUsed(pc))
1327		return NULL;
1328	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1329	smp_rmb();
1330	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1331	return &mz->reclaim_stat;
1332}
1333
1334#define mem_cgroup_from_res_counter(counter, member)	\
1335	container_of(counter, struct mem_cgroup, member)
1336
1337/**
1338 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1339 * @mem: the memory cgroup
1340 *
1341 * Returns the maximum amount of memory @mem can be charged with, in
1342 * pages.
1343 */
1344static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1345{
1346	unsigned long long margin;
1347
1348	margin = res_counter_margin(&memcg->res);
1349	if (do_swap_account)
1350		margin = min(margin, res_counter_margin(&memcg->memsw));
1351	return margin >> PAGE_SHIFT;
1352}
1353
1354int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1355{
1356	struct cgroup *cgrp = memcg->css.cgroup;
1357
1358	/* root ? */
1359	if (cgrp->parent == NULL)
1360		return vm_swappiness;
1361
1362	return memcg->swappiness;
1363}
1364
1365static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1366{
1367	int cpu;
1368
1369	get_online_cpus();
1370	spin_lock(&memcg->pcp_counter_lock);
1371	for_each_online_cpu(cpu)
1372		per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1373	memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1374	spin_unlock(&memcg->pcp_counter_lock);
1375	put_online_cpus();
1376
1377	synchronize_rcu();
1378}
1379
1380static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1381{
1382	int cpu;
1383
1384	if (!memcg)
1385		return;
1386	get_online_cpus();
1387	spin_lock(&memcg->pcp_counter_lock);
1388	for_each_online_cpu(cpu)
1389		per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1390	memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1391	spin_unlock(&memcg->pcp_counter_lock);
1392	put_online_cpus();
1393}
1394/*
1395 * 2 routines for checking "mem" is under move_account() or not.
1396 *
1397 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1398 *			  for avoiding race in accounting. If true,
1399 *			  pc->mem_cgroup may be overwritten.
1400 *
1401 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1402 *			  under hierarchy of moving cgroups. This is for
1403 *			  waiting at hith-memory prressure caused by "move".
1404 */
1405
1406static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
1407{
1408	VM_BUG_ON(!rcu_read_lock_held());
1409	return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1410}
1411
1412static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1413{
1414	struct mem_cgroup *from;
1415	struct mem_cgroup *to;
1416	bool ret = false;
1417	/*
1418	 * Unlike task_move routines, we access mc.to, mc.from not under
1419	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1420	 */
1421	spin_lock(&mc.lock);
1422	from = mc.from;
1423	to = mc.to;
1424	if (!from)
1425		goto unlock;
1426
1427	ret = mem_cgroup_same_or_subtree(memcg, from)
1428		|| mem_cgroup_same_or_subtree(memcg, to);
1429unlock:
1430	spin_unlock(&mc.lock);
1431	return ret;
1432}
1433
1434static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1435{
1436	if (mc.moving_task && current != mc.moving_task) {
1437		if (mem_cgroup_under_move(memcg)) {
1438			DEFINE_WAIT(wait);
1439			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1440			/* moving charge context might have finished. */
1441			if (mc.moving_task)
1442				schedule();
1443			finish_wait(&mc.waitq, &wait);
1444			return true;
1445		}
1446	}
1447	return false;
1448}
1449
1450/**
1451 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1452 * @memcg: The memory cgroup that went over limit
1453 * @p: Task that is going to be killed
1454 *
1455 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1456 * enabled
1457 */
1458void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1459{
1460	struct cgroup *task_cgrp;
1461	struct cgroup *mem_cgrp;
1462	/*
1463	 * Need a buffer in BSS, can't rely on allocations. The code relies
1464	 * on the assumption that OOM is serialized for memory controller.
1465	 * If this assumption is broken, revisit this code.
1466	 */
1467	static char memcg_name[PATH_MAX];
1468	int ret;
1469
1470	if (!memcg || !p)
1471		return;
1472
1473
1474	rcu_read_lock();
1475
1476	mem_cgrp = memcg->css.cgroup;
1477	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1478
1479	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1480	if (ret < 0) {
1481		/*
1482		 * Unfortunately, we are unable to convert to a useful name
1483		 * But we'll still print out the usage information
1484		 */
1485		rcu_read_unlock();
1486		goto done;
1487	}
1488	rcu_read_unlock();
1489
1490	printk(KERN_INFO "Task in %s killed", memcg_name);
1491
1492	rcu_read_lock();
1493	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1494	if (ret < 0) {
1495		rcu_read_unlock();
1496		goto done;
1497	}
1498	rcu_read_unlock();
1499
1500	/*
1501	 * Continues from above, so we don't need an KERN_ level
1502	 */
1503	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1504done:
1505
1506	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1507		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1508		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1509		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1510	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1511		"failcnt %llu\n",
1512		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1513		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1514		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1515}
1516
1517/*
1518 * This function returns the number of memcg under hierarchy tree. Returns
1519 * 1(self count) if no children.
1520 */
1521static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1522{
1523	int num = 0;
1524	struct mem_cgroup *iter;
1525
1526	for_each_mem_cgroup_tree(iter, memcg)
1527		num++;
1528	return num;
1529}
1530
1531/*
1532 * Return the memory (and swap, if configured) limit for a memcg.
1533 */
1534u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1535{
1536	u64 limit;
1537	u64 memsw;
1538
1539	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1540	limit += total_swap_pages << PAGE_SHIFT;
1541
1542	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1543	/*
1544	 * If memsw is finite and limits the amount of swap space available
1545	 * to this memcg, return that limit.
1546	 */
1547	return min(limit, memsw);
1548}
1549
1550static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1551					gfp_t gfp_mask,
1552					unsigned long flags)
1553{
1554	unsigned long total = 0;
1555	bool noswap = false;
1556	int loop;
1557
1558	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1559		noswap = true;
1560	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1561		noswap = true;
1562
1563	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1564		if (loop)
1565			drain_all_stock_async(memcg);
1566		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1567		/*
1568		 * Allow limit shrinkers, which are triggered directly
1569		 * by userspace, to catch signals and stop reclaim
1570		 * after minimal progress, regardless of the margin.
1571		 */
1572		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1573			break;
1574		if (mem_cgroup_margin(memcg))
1575			break;
1576		/*
1577		 * If nothing was reclaimed after two attempts, there
1578		 * may be no reclaimable pages in this hierarchy.
1579		 */
1580		if (loop && !total)
1581			break;
1582	}
1583	return total;
1584}
1585
1586/**
1587 * test_mem_cgroup_node_reclaimable
1588 * @mem: the target memcg
1589 * @nid: the node ID to be checked.
1590 * @noswap : specify true here if the user wants flle only information.
1591 *
1592 * This function returns whether the specified memcg contains any
1593 * reclaimable pages on a node. Returns true if there are any reclaimable
1594 * pages in the node.
1595 */
1596static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1597		int nid, bool noswap)
1598{
1599	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1600		return true;
1601	if (noswap || !total_swap_pages)
1602		return false;
1603	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1604		return true;
1605	return false;
1606
1607}
1608#if MAX_NUMNODES > 1
1609
1610/*
1611 * Always updating the nodemask is not very good - even if we have an empty
1612 * list or the wrong list here, we can start from some node and traverse all
1613 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1614 *
1615 */
1616static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1617{
1618	int nid;
1619	/*
1620	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1621	 * pagein/pageout changes since the last update.
1622	 */
1623	if (!atomic_read(&memcg->numainfo_events))
1624		return;
1625	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1626		return;
1627
1628	/* make a nodemask where this memcg uses memory from */
1629	memcg->scan_nodes = node_states[N_HIGH_MEMORY];
1630
1631	for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1632
1633		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1634			node_clear(nid, memcg->scan_nodes);
1635	}
1636
1637	atomic_set(&memcg->numainfo_events, 0);
1638	atomic_set(&memcg->numainfo_updating, 0);
1639}
1640
1641/*
1642 * Selecting a node where we start reclaim from. Because what we need is just
1643 * reducing usage counter, start from anywhere is O,K. Considering
1644 * memory reclaim from current node, there are pros. and cons.
1645 *
1646 * Freeing memory from current node means freeing memory from a node which
1647 * we'll use or we've used. So, it may make LRU bad. And if several threads
1648 * hit limits, it will see a contention on a node. But freeing from remote
1649 * node means more costs for memory reclaim because of memory latency.
1650 *
1651 * Now, we use round-robin. Better algorithm is welcomed.
1652 */
1653int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1654{
1655	int node;
1656
1657	mem_cgroup_may_update_nodemask(memcg);
1658	node = memcg->last_scanned_node;
1659
1660	node = next_node(node, memcg->scan_nodes);
1661	if (node == MAX_NUMNODES)
1662		node = first_node(memcg->scan_nodes);
1663	/*
1664	 * We call this when we hit limit, not when pages are added to LRU.
1665	 * No LRU may hold pages because all pages are UNEVICTABLE or
1666	 * memcg is too small and all pages are not on LRU. In that case,
1667	 * we use curret node.
1668	 */
1669	if (unlikely(node == MAX_NUMNODES))
1670		node = numa_node_id();
1671
1672	memcg->last_scanned_node = node;
1673	return node;
1674}
1675
1676/*
1677 * Check all nodes whether it contains reclaimable pages or not.
1678 * For quick scan, we make use of scan_nodes. This will allow us to skip
1679 * unused nodes. But scan_nodes is lazily updated and may not cotain
1680 * enough new information. We need to do double check.
1681 */
1682bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1683{
1684	int nid;
1685
1686	/*
1687	 * quick check...making use of scan_node.
1688	 * We can skip unused nodes.
1689	 */
1690	if (!nodes_empty(memcg->scan_nodes)) {
1691		for (nid = first_node(memcg->scan_nodes);
1692		     nid < MAX_NUMNODES;
1693		     nid = next_node(nid, memcg->scan_nodes)) {
1694
1695			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1696				return true;
1697		}
1698	}
1699	/*
1700	 * Check rest of nodes.
1701	 */
1702	for_each_node_state(nid, N_HIGH_MEMORY) {
1703		if (node_isset(nid, memcg->scan_nodes))
1704			continue;
1705		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1706			return true;
1707	}
1708	return false;
1709}
1710
1711#else
1712int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1713{
1714	return 0;
1715}
1716
1717bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1718{
1719	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1720}
1721#endif
1722
1723static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1724				   struct zone *zone,
1725				   gfp_t gfp_mask,
1726				   unsigned long *total_scanned)
1727{
1728	struct mem_cgroup *victim = NULL;
1729	int total = 0;
1730	int loop = 0;
1731	unsigned long excess;
1732	unsigned long nr_scanned;
1733	struct mem_cgroup_reclaim_cookie reclaim = {
1734		.zone = zone,
1735		.priority = 0,
1736	};
1737
1738	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
1739
1740	while (1) {
1741		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1742		if (!victim) {
1743			loop++;
1744			if (loop >= 2) {
1745				/*
1746				 * If we have not been able to reclaim
1747				 * anything, it might because there are
1748				 * no reclaimable pages under this hierarchy
1749				 */
1750				if (!total)
1751					break;
1752				/*
1753				 * We want to do more targeted reclaim.
1754				 * excess >> 2 is not to excessive so as to
1755				 * reclaim too much, nor too less that we keep
1756				 * coming back to reclaim from this cgroup
1757				 */
1758				if (total >= (excess >> 2) ||
1759					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1760					break;
1761			}
1762			continue;
1763		}
1764		if (!mem_cgroup_reclaimable(victim, false))
1765			continue;
1766		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1767						     zone, &nr_scanned);
1768		*total_scanned += nr_scanned;
1769		if (!res_counter_soft_limit_excess(&root_memcg->res))
1770			break;
1771	}
1772	mem_cgroup_iter_break(root_memcg, victim);
1773	return total;
1774}
1775
1776/*
1777 * Check OOM-Killer is already running under our hierarchy.
1778 * If someone is running, return false.
1779 * Has to be called with memcg_oom_lock
1780 */
1781static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
1782{
1783	struct mem_cgroup *iter, *failed = NULL;
1784
1785	for_each_mem_cgroup_tree(iter, memcg) {
1786		if (iter->oom_lock) {
1787			/*
1788			 * this subtree of our hierarchy is already locked
1789			 * so we cannot give a lock.
1790			 */
1791			failed = iter;
1792			mem_cgroup_iter_break(memcg, iter);
1793			break;
1794		} else
1795			iter->oom_lock = true;
1796	}
1797
1798	if (!failed)
1799		return true;
1800
1801	/*
1802	 * OK, we failed to lock the whole subtree so we have to clean up
1803	 * what we set up to the failing subtree
1804	 */
1805	for_each_mem_cgroup_tree(iter, memcg) {
1806		if (iter == failed) {
1807			mem_cgroup_iter_break(memcg, iter);
1808			break;
1809		}
1810		iter->oom_lock = false;
1811	}
1812	return false;
1813}
1814
1815/*
1816 * Has to be called with memcg_oom_lock
1817 */
1818static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1819{
1820	struct mem_cgroup *iter;
1821
1822	for_each_mem_cgroup_tree(iter, memcg)
1823		iter->oom_lock = false;
1824	return 0;
1825}
1826
1827static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1828{
1829	struct mem_cgroup *iter;
1830
1831	for_each_mem_cgroup_tree(iter, memcg)
1832		atomic_inc(&iter->under_oom);
1833}
1834
1835static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1836{
1837	struct mem_cgroup *iter;
1838
1839	/*
1840	 * When a new child is created while the hierarchy is under oom,
1841	 * mem_cgroup_oom_lock() may not be called. We have to use
1842	 * atomic_add_unless() here.
1843	 */
1844	for_each_mem_cgroup_tree(iter, memcg)
1845		atomic_add_unless(&iter->under_oom, -1, 0);
1846}
1847
1848static DEFINE_SPINLOCK(memcg_oom_lock);
1849static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1850
1851struct oom_wait_info {
1852	struct mem_cgroup *mem;
1853	wait_queue_t	wait;
1854};
1855
1856static int memcg_oom_wake_function(wait_queue_t *wait,
1857	unsigned mode, int sync, void *arg)
1858{
1859	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg,
1860			  *oom_wait_memcg;
1861	struct oom_wait_info *oom_wait_info;
1862
1863	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1864	oom_wait_memcg = oom_wait_info->mem;
1865
1866	/*
1867	 * Both of oom_wait_info->mem and wake_mem are stable under us.
1868	 * Then we can use css_is_ancestor without taking care of RCU.
1869	 */
1870	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
1871		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
1872		return 0;
1873	return autoremove_wake_function(wait, mode, sync, arg);
1874}
1875
1876static void memcg_wakeup_oom(struct mem_cgroup *memcg)
1877{
1878	/* for filtering, pass "memcg" as argument. */
1879	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1880}
1881
1882static void memcg_oom_recover(struct mem_cgroup *memcg)
1883{
1884	if (memcg && atomic_read(&memcg->under_oom))
1885		memcg_wakeup_oom(memcg);
1886}
1887
1888/*
1889 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1890 */
1891bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask)
1892{
1893	struct oom_wait_info owait;
1894	bool locked, need_to_kill;
1895
1896	owait.mem = memcg;
1897	owait.wait.flags = 0;
1898	owait.wait.func = memcg_oom_wake_function;
1899	owait.wait.private = current;
1900	INIT_LIST_HEAD(&owait.wait.task_list);
1901	need_to_kill = true;
1902	mem_cgroup_mark_under_oom(memcg);
1903
1904	/* At first, try to OOM lock hierarchy under memcg.*/
1905	spin_lock(&memcg_oom_lock);
1906	locked = mem_cgroup_oom_lock(memcg);
1907	/*
1908	 * Even if signal_pending(), we can't quit charge() loop without
1909	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1910	 * under OOM is always welcomed, use TASK_KILLABLE here.
1911	 */
1912	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1913	if (!locked || memcg->oom_kill_disable)
1914		need_to_kill = false;
1915	if (locked)
1916		mem_cgroup_oom_notify(memcg);
1917	spin_unlock(&memcg_oom_lock);
1918
1919	if (need_to_kill) {
1920		finish_wait(&memcg_oom_waitq, &owait.wait);
1921		mem_cgroup_out_of_memory(memcg, mask);
1922	} else {
1923		schedule();
1924		finish_wait(&memcg_oom_waitq, &owait.wait);
1925	}
1926	spin_lock(&memcg_oom_lock);
1927	if (locked)
1928		mem_cgroup_oom_unlock(memcg);
1929	memcg_wakeup_oom(memcg);
1930	spin_unlock(&memcg_oom_lock);
1931
1932	mem_cgroup_unmark_under_oom(memcg);
1933
1934	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1935		return false;
1936	/* Give chance to dying process */
1937	schedule_timeout_uninterruptible(1);
1938	return true;
1939}
1940
1941/*
1942 * Currently used to update mapped file statistics, but the routine can be
1943 * generalized to update other statistics as well.
1944 *
1945 * Notes: Race condition
1946 *
1947 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1948 * it tends to be costly. But considering some conditions, we doesn't need
1949 * to do so _always_.
1950 *
1951 * Considering "charge", lock_page_cgroup() is not required because all
1952 * file-stat operations happen after a page is attached to radix-tree. There
1953 * are no race with "charge".
1954 *
1955 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1956 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1957 * if there are race with "uncharge". Statistics itself is properly handled
1958 * by flags.
1959 *
1960 * Considering "move", this is an only case we see a race. To make the race
1961 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1962 * possibility of race condition. If there is, we take a lock.
1963 */
1964
1965void mem_cgroup_update_page_stat(struct page *page,
1966				 enum mem_cgroup_page_stat_item idx, int val)
1967{
1968	struct mem_cgroup *memcg;
1969	struct page_cgroup *pc = lookup_page_cgroup(page);
1970	bool need_unlock = false;
1971	unsigned long uninitialized_var(flags);
1972
1973	if (unlikely(!pc))
1974		return;
1975
1976	rcu_read_lock();
1977	memcg = pc->mem_cgroup;
1978	if (unlikely(!memcg || !PageCgroupUsed(pc)))
1979		goto out;
1980	/* pc->mem_cgroup is unstable ? */
1981	if (unlikely(mem_cgroup_stealed(memcg)) || PageTransHuge(page)) {
1982		/* take a lock against to access pc->mem_cgroup */
1983		move_lock_page_cgroup(pc, &flags);
1984		need_unlock = true;
1985		memcg = pc->mem_cgroup;
1986		if (!memcg || !PageCgroupUsed(pc))
1987			goto out;
1988	}
1989
1990	switch (idx) {
1991	case MEMCG_NR_FILE_MAPPED:
1992		if (val > 0)
1993			SetPageCgroupFileMapped(pc);
1994		else if (!page_mapped(page))
1995			ClearPageCgroupFileMapped(pc);
1996		idx = MEM_CGROUP_STAT_FILE_MAPPED;
1997		break;
1998	default:
1999		BUG();
2000	}
2001
2002	this_cpu_add(memcg->stat->count[idx], val);
2003
2004out:
2005	if (unlikely(need_unlock))
2006		move_unlock_page_cgroup(pc, &flags);
2007	rcu_read_unlock();
2008	return;
2009}
2010EXPORT_SYMBOL(mem_cgroup_update_page_stat);
2011
2012/*
2013 * size of first charge trial. "32" comes from vmscan.c's magic value.
2014 * TODO: maybe necessary to use big numbers in big irons.
2015 */
2016#define CHARGE_BATCH	32U
2017struct memcg_stock_pcp {
2018	struct mem_cgroup *cached; /* this never be root cgroup */
2019	unsigned int nr_pages;
2020	struct work_struct work;
2021	unsigned long flags;
2022#define FLUSHING_CACHED_CHARGE	(0)
2023};
2024static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2025static DEFINE_MUTEX(percpu_charge_mutex);
2026
2027/*
2028 * Try to consume stocked charge on this cpu. If success, one page is consumed
2029 * from local stock and true is returned. If the stock is 0 or charges from a
2030 * cgroup which is not current target, returns false. This stock will be
2031 * refilled.
2032 */
2033static bool consume_stock(struct mem_cgroup *memcg)
2034{
2035	struct memcg_stock_pcp *stock;
2036	bool ret = true;
2037
2038	stock = &get_cpu_var(memcg_stock);
2039	if (memcg == stock->cached && stock->nr_pages)
2040		stock->nr_pages--;
2041	else /* need to call res_counter_charge */
2042		ret = false;
2043	put_cpu_var(memcg_stock);
2044	return ret;
2045}
2046
2047/*
2048 * Returns stocks cached in percpu to res_counter and reset cached information.
2049 */
2050static void drain_stock(struct memcg_stock_pcp *stock)
2051{
2052	struct mem_cgroup *old = stock->cached;
2053
2054	if (stock->nr_pages) {
2055		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2056
2057		res_counter_uncharge(&old->res, bytes);
2058		if (do_swap_account)
2059			res_counter_uncharge(&old->memsw, bytes);
2060		stock->nr_pages = 0;
2061	}
2062	stock->cached = NULL;
2063}
2064
2065/*
2066 * This must be called under preempt disabled or must be called by
2067 * a thread which is pinned to local cpu.
2068 */
2069static void drain_local_stock(struct work_struct *dummy)
2070{
2071	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2072	drain_stock(stock);
2073	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2074}
2075
2076/*
2077 * Cache charges(val) which is from res_counter, to local per_cpu area.
2078 * This will be consumed by consume_stock() function, later.
2079 */
2080static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2081{
2082	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2083
2084	if (stock->cached != memcg) { /* reset if necessary */
2085		drain_stock(stock);
2086		stock->cached = memcg;
2087	}
2088	stock->nr_pages += nr_pages;
2089	put_cpu_var(memcg_stock);
2090}
2091
2092/*
2093 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2094 * of the hierarchy under it. sync flag says whether we should block
2095 * until the work is done.
2096 */
2097static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2098{
2099	int cpu, curcpu;
2100
2101	/* Notify other cpus that system-wide "drain" is running */
2102	get_online_cpus();
2103	curcpu = get_cpu();
2104	for_each_online_cpu(cpu) {
2105		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2106		struct mem_cgroup *memcg;
2107
2108		memcg = stock->cached;
2109		if (!memcg || !stock->nr_pages)
2110			continue;
2111		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2112			continue;
2113		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2114			if (cpu == curcpu)
2115				drain_local_stock(&stock->work);
2116			else
2117				schedule_work_on(cpu, &stock->work);
2118		}
2119	}
2120	put_cpu();
2121
2122	if (!sync)
2123		goto out;
2124
2125	for_each_online_cpu(cpu) {
2126		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2127		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2128			flush_work(&stock->work);
2129	}
2130out:
2131 	put_online_cpus();
2132}
2133
2134/*
2135 * Tries to drain stocked charges in other cpus. This function is asynchronous
2136 * and just put a work per cpu for draining localy on each cpu. Caller can
2137 * expects some charges will be back to res_counter later but cannot wait for
2138 * it.
2139 */
2140static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2141{
2142	/*
2143	 * If someone calls draining, avoid adding more kworker runs.
2144	 */
2145	if (!mutex_trylock(&percpu_charge_mutex))
2146		return;
2147	drain_all_stock(root_memcg, false);
2148	mutex_unlock(&percpu_charge_mutex);
2149}
2150
2151/* This is a synchronous drain interface. */
2152static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2153{
2154	/* called when force_empty is called */
2155	mutex_lock(&percpu_charge_mutex);
2156	drain_all_stock(root_memcg, true);
2157	mutex_unlock(&percpu_charge_mutex);
2158}
2159
2160/*
2161 * This function drains percpu counter value from DEAD cpu and
2162 * move it to local cpu. Note that this function can be preempted.
2163 */
2164static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2165{
2166	int i;
2167
2168	spin_lock(&memcg->pcp_counter_lock);
2169	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
2170		long x = per_cpu(memcg->stat->count[i], cpu);
2171
2172		per_cpu(memcg->stat->count[i], cpu) = 0;
2173		memcg->nocpu_base.count[i] += x;
2174	}
2175	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2176		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2177
2178		per_cpu(memcg->stat->events[i], cpu) = 0;
2179		memcg->nocpu_base.events[i] += x;
2180	}
2181	/* need to clear ON_MOVE value, works as a kind of lock. */
2182	per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2183	spin_unlock(&memcg->pcp_counter_lock);
2184}
2185
2186static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu)
2187{
2188	int idx = MEM_CGROUP_ON_MOVE;
2189
2190	spin_lock(&memcg->pcp_counter_lock);
2191	per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx];
2192	spin_unlock(&memcg->pcp_counter_lock);
2193}
2194
2195static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2196					unsigned long action,
2197					void *hcpu)
2198{
2199	int cpu = (unsigned long)hcpu;
2200	struct memcg_stock_pcp *stock;
2201	struct mem_cgroup *iter;
2202
2203	if ((action == CPU_ONLINE)) {
2204		for_each_mem_cgroup(iter)
2205			synchronize_mem_cgroup_on_move(iter, cpu);
2206		return NOTIFY_OK;
2207	}
2208
2209	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
2210		return NOTIFY_OK;
2211
2212	for_each_mem_cgroup(iter)
2213		mem_cgroup_drain_pcp_counter(iter, cpu);
2214
2215	stock = &per_cpu(memcg_stock, cpu);
2216	drain_stock(stock);
2217	return NOTIFY_OK;
2218}
2219
2220
2221/* See __mem_cgroup_try_charge() for details */
2222enum {
2223	CHARGE_OK,		/* success */
2224	CHARGE_RETRY,		/* need to retry but retry is not bad */
2225	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
2226	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
2227	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
2228};
2229
2230static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2231				unsigned int nr_pages, bool oom_check)
2232{
2233	unsigned long csize = nr_pages * PAGE_SIZE;
2234	struct mem_cgroup *mem_over_limit;
2235	struct res_counter *fail_res;
2236	unsigned long flags = 0;
2237	int ret;
2238
2239	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2240
2241	if (likely(!ret)) {
2242		if (!do_swap_account)
2243			return CHARGE_OK;
2244		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2245		if (likely(!ret))
2246			return CHARGE_OK;
2247
2248		res_counter_uncharge(&memcg->res, csize);
2249		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2250		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2251	} else
2252		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2253	/*
2254	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2255	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2256	 *
2257	 * Never reclaim on behalf of optional batching, retry with a
2258	 * single page instead.
2259	 */
2260	if (nr_pages == CHARGE_BATCH)
2261		return CHARGE_RETRY;
2262
2263	if (!(gfp_mask & __GFP_WAIT))
2264		return CHARGE_WOULDBLOCK;
2265
2266	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2267	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2268		return CHARGE_RETRY;
2269	/*
2270	 * Even though the limit is exceeded at this point, reclaim
2271	 * may have been able to free some pages.  Retry the charge
2272	 * before killing the task.
2273	 *
2274	 * Only for regular pages, though: huge pages are rather
2275	 * unlikely to succeed so close to the limit, and we fall back
2276	 * to regular pages anyway in case of failure.
2277	 */
2278	if (nr_pages == 1 && ret)
2279		return CHARGE_RETRY;
2280
2281	/*
2282	 * At task move, charge accounts can be doubly counted. So, it's
2283	 * better to wait until the end of task_move if something is going on.
2284	 */
2285	if (mem_cgroup_wait_acct_move(mem_over_limit))
2286		return CHARGE_RETRY;
2287
2288	/* If we don't need to call oom-killer at el, return immediately */
2289	if (!oom_check)
2290		return CHARGE_NOMEM;
2291	/* check OOM */
2292	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
2293		return CHARGE_OOM_DIE;
2294
2295	return CHARGE_RETRY;
2296}
2297
2298/*
2299 * Unlike exported interface, "oom" parameter is added. if oom==true,
2300 * oom-killer can be invoked.
2301 */
2302static int __mem_cgroup_try_charge(struct mm_struct *mm,
2303				   gfp_t gfp_mask,
2304				   unsigned int nr_pages,
2305				   struct mem_cgroup **ptr,
2306				   bool oom)
2307{
2308	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2309	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2310	struct mem_cgroup *memcg = NULL;
2311	int ret;
2312
2313	/*
2314	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2315	 * in system level. So, allow to go ahead dying process in addition to
2316	 * MEMDIE process.
2317	 */
2318	if (unlikely(test_thread_flag(TIF_MEMDIE)
2319		     || fatal_signal_pending(current)))
2320		goto bypass;
2321
2322	/*
2323	 * We always charge the cgroup the mm_struct belongs to.
2324	 * The mm_struct's mem_cgroup changes on task migration if the
2325	 * thread group leader migrates. It's possible that mm is not
2326	 * set, if so charge the init_mm (happens for pagecache usage).
2327	 */
2328	if (!*ptr && !mm)
2329		goto bypass;
2330again:
2331	if (*ptr) { /* css should be a valid one */
2332		memcg = *ptr;
2333		VM_BUG_ON(css_is_removed(&memcg->css));
2334		if (mem_cgroup_is_root(memcg))
2335			goto done;
2336		if (nr_pages == 1 && consume_stock(memcg))
2337			goto done;
2338		css_get(&memcg->css);
2339	} else {
2340		struct task_struct *p;
2341
2342		rcu_read_lock();
2343		p = rcu_dereference(mm->owner);
2344		/*
2345		 * Because we don't have task_lock(), "p" can exit.
2346		 * In that case, "memcg" can point to root or p can be NULL with
2347		 * race with swapoff. Then, we have small risk of mis-accouning.
2348		 * But such kind of mis-account by race always happens because
2349		 * we don't have cgroup_mutex(). It's overkill and we allo that
2350		 * small race, here.
2351		 * (*) swapoff at el will charge against mm-struct not against
2352		 * task-struct. So, mm->owner can be NULL.
2353		 */
2354		memcg = mem_cgroup_from_task(p);
2355		if (!memcg || mem_cgroup_is_root(memcg)) {
2356			rcu_read_unlock();
2357			goto done;
2358		}
2359		if (nr_pages == 1 && consume_stock(memcg)) {
2360			/*
2361			 * It seems dagerous to access memcg without css_get().
2362			 * But considering how consume_stok works, it's not
2363			 * necessary. If consume_stock success, some charges
2364			 * from this memcg are cached on this cpu. So, we
2365			 * don't need to call css_get()/css_tryget() before
2366			 * calling consume_stock().
2367			 */
2368			rcu_read_unlock();
2369			goto done;
2370		}
2371		/* after here, we may be blocked. we need to get refcnt */
2372		if (!css_tryget(&memcg->css)) {
2373			rcu_read_unlock();
2374			goto again;
2375		}
2376		rcu_read_unlock();
2377	}
2378
2379	do {
2380		bool oom_check;
2381
2382		/* If killed, bypass charge */
2383		if (fatal_signal_pending(current)) {
2384			css_put(&memcg->css);
2385			goto bypass;
2386		}
2387
2388		oom_check = false;
2389		if (oom && !nr_oom_retries) {
2390			oom_check = true;
2391			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2392		}
2393
2394		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
2395		switch (ret) {
2396		case CHARGE_OK:
2397			break;
2398		case CHARGE_RETRY: /* not in OOM situation but retry */
2399			batch = nr_pages;
2400			css_put(&memcg->css);
2401			memcg = NULL;
2402			goto again;
2403		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2404			css_put(&memcg->css);
2405			goto nomem;
2406		case CHARGE_NOMEM: /* OOM routine works */
2407			if (!oom) {
2408				css_put(&memcg->css);
2409				goto nomem;
2410			}
2411			/* If oom, we never return -ENOMEM */
2412			nr_oom_retries--;
2413			break;
2414		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2415			css_put(&memcg->css);
2416			goto bypass;
2417		}
2418	} while (ret != CHARGE_OK);
2419
2420	if (batch > nr_pages)
2421		refill_stock(memcg, batch - nr_pages);
2422	css_put(&memcg->css);
2423done:
2424	*ptr = memcg;
2425	return 0;
2426nomem:
2427	*ptr = NULL;
2428	return -ENOMEM;
2429bypass:
2430	*ptr = NULL;
2431	return 0;
2432}
2433
2434/*
2435 * Somemtimes we have to undo a charge we got by try_charge().
2436 * This function is for that and do uncharge, put css's refcnt.
2437 * gotten by try_charge().
2438 */
2439static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2440				       unsigned int nr_pages)
2441{
2442	if (!mem_cgroup_is_root(memcg)) {
2443		unsigned long bytes = nr_pages * PAGE_SIZE;
2444
2445		res_counter_uncharge(&memcg->res, bytes);
2446		if (do_swap_account)
2447			res_counter_uncharge(&memcg->memsw, bytes);
2448	}
2449}
2450
2451/*
2452 * A helper function to get mem_cgroup from ID. must be called under
2453 * rcu_read_lock(). The caller must check css_is_removed() or some if
2454 * it's concern. (dropping refcnt from swap can be called against removed
2455 * memcg.)
2456 */
2457static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2458{
2459	struct cgroup_subsys_state *css;
2460
2461	/* ID 0 is unused ID */
2462	if (!id)
2463		return NULL;
2464	css = css_lookup(&mem_cgroup_subsys, id);
2465	if (!css)
2466		return NULL;
2467	return container_of(css, struct mem_cgroup, css);
2468}
2469
2470struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2471{
2472	struct mem_cgroup *memcg = NULL;
2473	struct page_cgroup *pc;
2474	unsigned short id;
2475	swp_entry_t ent;
2476
2477	VM_BUG_ON(!PageLocked(page));
2478
2479	pc = lookup_page_cgroup(page);
2480	lock_page_cgroup(pc);
2481	if (PageCgroupUsed(pc)) {
2482		memcg = pc->mem_cgroup;
2483		if (memcg && !css_tryget(&memcg->css))
2484			memcg = NULL;
2485	} else if (PageSwapCache(page)) {
2486		ent.val = page_private(page);
2487		id = lookup_swap_cgroup(ent);
2488		rcu_read_lock();
2489		memcg = mem_cgroup_lookup(id);
2490		if (memcg && !css_tryget(&memcg->css))
2491			memcg = NULL;
2492		rcu_read_unlock();
2493	}
2494	unlock_page_cgroup(pc);
2495	return memcg;
2496}
2497
2498static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2499				       struct page *page,
2500				       unsigned int nr_pages,
2501				       struct page_cgroup *pc,
2502				       enum charge_type ctype)
2503{
2504	lock_page_cgroup(pc);
2505	if (unlikely(PageCgroupUsed(pc))) {
2506		unlock_page_cgroup(pc);
2507		__mem_cgroup_cancel_charge(memcg, nr_pages);
2508		return;
2509	}
2510	/*
2511	 * we don't need page_cgroup_lock about tail pages, becase they are not
2512	 * accessed by any other context at this point.
2513	 */
2514	pc->mem_cgroup = memcg;
2515	/*
2516	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2517	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2518	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2519	 * before USED bit, we need memory barrier here.
2520	 * See mem_cgroup_add_lru_list(), etc.
2521 	 */
2522	smp_wmb();
2523	switch (ctype) {
2524	case MEM_CGROUP_CHARGE_TYPE_CACHE:
2525	case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2526		SetPageCgroupCache(pc);
2527		SetPageCgroupUsed(pc);
2528		break;
2529	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2530		ClearPageCgroupCache(pc);
2531		SetPageCgroupUsed(pc);
2532		break;
2533	default:
2534		break;
2535	}
2536
2537	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
2538	unlock_page_cgroup(pc);
2539	/*
2540	 * "charge_statistics" updated event counter. Then, check it.
2541	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2542	 * if they exceeds softlimit.
2543	 */
2544	memcg_check_events(memcg, page);
2545}
2546
2547#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2548
2549#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2550			(1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2551/*
2552 * Because tail pages are not marked as "used", set it. We're under
2553 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2554 * charge/uncharge will be never happen and move_account() is done under
2555 * compound_lock(), so we don't have to take care of races.
2556 */
2557void mem_cgroup_split_huge_fixup(struct page *head)
2558{
2559	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2560	struct page_cgroup *pc;
2561	int i;
2562
2563	if (mem_cgroup_disabled())
2564		return;
2565	for (i = 1; i < HPAGE_PMD_NR; i++) {
2566		pc = head_pc + i;
2567		pc->mem_cgroup = head_pc->mem_cgroup;
2568		smp_wmb();/* see __commit_charge() */
2569		/*
2570		 * LRU flags cannot be copied because we need to add tail
2571		 * page to LRU by generic call and our hooks will be called.
2572		 */
2573		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2574	}
2575
2576	if (PageCgroupAcctLRU(head_pc)) {
2577		enum lru_list lru;
2578		struct mem_cgroup_per_zone *mz;
2579		/*
2580		 * We hold lru_lock, then, reduce counter directly.
2581		 */
2582		lru = page_lru(head);
2583		mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
2584		MEM_CGROUP_ZSTAT(mz, lru) -= HPAGE_PMD_NR - 1;
2585	}
2586}
2587#endif
2588
2589/**
2590 * mem_cgroup_move_account - move account of the page
2591 * @page: the page
2592 * @nr_pages: number of regular pages (>1 for huge pages)
2593 * @pc:	page_cgroup of the page.
2594 * @from: mem_cgroup which the page is moved from.
2595 * @to:	mem_cgroup which the page is moved to. @from != @to.
2596 * @uncharge: whether we should call uncharge and css_put against @from.
2597 *
2598 * The caller must confirm following.
2599 * - page is not on LRU (isolate_page() is useful.)
2600 * - compound_lock is held when nr_pages > 1
2601 *
2602 * This function doesn't do "charge" nor css_get to new cgroup. It should be
2603 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2604 * true, this function does "uncharge" from old cgroup, but it doesn't if
2605 * @uncharge is false, so a caller should do "uncharge".
2606 */
2607static int mem_cgroup_move_account(struct page *page,
2608				   unsigned int nr_pages,
2609				   struct page_cgroup *pc,
2610				   struct mem_cgroup *from,
2611				   struct mem_cgroup *to,
2612				   bool uncharge)
2613{
2614	unsigned long flags;
2615	int ret;
2616
2617	VM_BUG_ON(from == to);
2618	VM_BUG_ON(PageLRU(page));
2619	/*
2620	 * The page is isolated from LRU. So, collapse function
2621	 * will not handle this page. But page splitting can happen.
2622	 * Do this check under compound_page_lock(). The caller should
2623	 * hold it.
2624	 */
2625	ret = -EBUSY;
2626	if (nr_pages > 1 && !PageTransHuge(page))
2627		goto out;
2628
2629	lock_page_cgroup(pc);
2630
2631	ret = -EINVAL;
2632	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2633		goto unlock;
2634
2635	move_lock_page_cgroup(pc, &flags);
2636
2637	if (PageCgroupFileMapped(pc)) {
2638		/* Update mapped_file data for mem_cgroup */
2639		preempt_disable();
2640		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2641		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2642		preempt_enable();
2643	}
2644	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
2645	if (uncharge)
2646		/* This is not "cancel", but cancel_charge does all we need. */
2647		__mem_cgroup_cancel_charge(from, nr_pages);
2648
2649	/* caller should have done css_get */
2650	pc->mem_cgroup = to;
2651	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
2652	/*
2653	 * We charges against "to" which may not have any tasks. Then, "to"
2654	 * can be under rmdir(). But in current implementation, caller of
2655	 * this function is just force_empty() and move charge, so it's
2656	 * guaranteed that "to" is never removed. So, we don't check rmdir
2657	 * status here.
2658	 */
2659	move_unlock_page_cgroup(pc, &flags);
2660	ret = 0;
2661unlock:
2662	unlock_page_cgroup(pc);
2663	/*
2664	 * check events
2665	 */
2666	memcg_check_events(to, page);
2667	memcg_check_events(from, page);
2668out:
2669	return ret;
2670}
2671
2672/*
2673 * move charges to its parent.
2674 */
2675
2676static int mem_cgroup_move_parent(struct page *page,
2677				  struct page_cgroup *pc,
2678				  struct mem_cgroup *child,
2679				  gfp_t gfp_mask)
2680{
2681	struct cgroup *cg = child->css.cgroup;
2682	struct cgroup *pcg = cg->parent;
2683	struct mem_cgroup *parent;
2684	unsigned int nr_pages;
2685	unsigned long uninitialized_var(flags);
2686	int ret;
2687
2688	/* Is ROOT ? */
2689	if (!pcg)
2690		return -EINVAL;
2691
2692	ret = -EBUSY;
2693	if (!get_page_unless_zero(page))
2694		goto out;
2695	if (isolate_lru_page(page))
2696		goto put;
2697
2698	nr_pages = hpage_nr_pages(page);
2699
2700	parent = mem_cgroup_from_cont(pcg);
2701	ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
2702	if (ret || !parent)
2703		goto put_back;
2704
2705	if (nr_pages > 1)
2706		flags = compound_lock_irqsave(page);
2707
2708	ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
2709	if (ret)
2710		__mem_cgroup_cancel_charge(parent, nr_pages);
2711
2712	if (nr_pages > 1)
2713		compound_unlock_irqrestore(page, flags);
2714put_back:
2715	putback_lru_page(page);
2716put:
2717	put_page(page);
2718out:
2719	return ret;
2720}
2721
2722/*
2723 * Charge the memory controller for page usage.
2724 * Return
2725 * 0 if the charge was successful
2726 * < 0 if the cgroup is over its limit
2727 */
2728static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2729				gfp_t gfp_mask, enum charge_type ctype)
2730{
2731	struct mem_cgroup *memcg = NULL;
2732	unsigned int nr_pages = 1;
2733	struct page_cgroup *pc;
2734	bool oom = true;
2735	int ret;
2736
2737	if (PageTransHuge(page)) {
2738		nr_pages <<= compound_order(page);
2739		VM_BUG_ON(!PageTransHuge(page));
2740		/*
2741		 * Never OOM-kill a process for a huge page.  The
2742		 * fault handler will fall back to regular pages.
2743		 */
2744		oom = false;
2745	}
2746
2747	pc = lookup_page_cgroup(page);
2748	BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
2749
2750	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2751	if (ret || !memcg)
2752		return ret;
2753
2754	__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
2755	return 0;
2756}
2757
2758int mem_cgroup_newpage_charge(struct page *page,
2759			      struct mm_struct *mm, gfp_t gfp_mask)
2760{
2761	if (mem_cgroup_disabled())
2762		return 0;
2763	/*
2764	 * If already mapped, we don't have to account.
2765	 * If page cache, page->mapping has address_space.
2766	 * But page->mapping may have out-of-use anon_vma pointer,
2767	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2768	 * is NULL.
2769  	 */
2770	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2771		return 0;
2772	if (unlikely(!mm))
2773		mm = &init_mm;
2774	return mem_cgroup_charge_common(page, mm, gfp_mask,
2775				MEM_CGROUP_CHARGE_TYPE_MAPPED);
2776}
2777
2778static void
2779__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2780					enum charge_type ctype);
2781
2782static void
2783__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
2784					enum charge_type ctype)
2785{
2786	struct page_cgroup *pc = lookup_page_cgroup(page);
2787	/*
2788	 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2789	 * is already on LRU. It means the page may on some other page_cgroup's
2790	 * LRU. Take care of it.
2791	 */
2792	mem_cgroup_lru_del_before_commit(page);
2793	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
2794	mem_cgroup_lru_add_after_commit(page);
2795	return;
2796}
2797
2798int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2799				gfp_t gfp_mask)
2800{
2801	struct mem_cgroup *memcg = NULL;
2802	int ret;
2803
2804	if (mem_cgroup_disabled())
2805		return 0;
2806	if (PageCompound(page))
2807		return 0;
2808
2809	if (unlikely(!mm))
2810		mm = &init_mm;
2811
2812	if (page_is_file_cache(page)) {
2813		ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &memcg, true);
2814		if (ret || !memcg)
2815			return ret;
2816
2817		/*
2818		 * FUSE reuses pages without going through the final
2819		 * put that would remove them from the LRU list, make
2820		 * sure that they get relinked properly.
2821		 */
2822		__mem_cgroup_commit_charge_lrucare(page, memcg,
2823					MEM_CGROUP_CHARGE_TYPE_CACHE);
2824		return ret;
2825	}
2826	/* shmem */
2827	if (PageSwapCache(page)) {
2828		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
2829		if (!ret)
2830			__mem_cgroup_commit_charge_swapin(page, memcg,
2831					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2832	} else
2833		ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2834					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2835
2836	return ret;
2837}
2838
2839/*
2840 * While swap-in, try_charge -> commit or cancel, the page is locked.
2841 * And when try_charge() successfully returns, one refcnt to memcg without
2842 * struct page_cgroup is acquired. This refcnt will be consumed by
2843 * "commit()" or removed by "cancel()"
2844 */
2845int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2846				 struct page *page,
2847				 gfp_t mask, struct mem_cgroup **ptr)
2848{
2849	struct mem_cgroup *memcg;
2850	int ret;
2851
2852	*ptr = NULL;
2853
2854	if (mem_cgroup_disabled())
2855		return 0;
2856
2857	if (!do_swap_account)
2858		goto charge_cur_mm;
2859	/*
2860	 * A racing thread's fault, or swapoff, may have already updated
2861	 * the pte, and even removed page from swap cache: in those cases
2862	 * do_swap_page()'s pte_same() test will fail; but there's also a
2863	 * KSM case which does need to charge the page.
2864	 */
2865	if (!PageSwapCache(page))
2866		goto charge_cur_mm;
2867	memcg = try_get_mem_cgroup_from_page(page);
2868	if (!memcg)
2869		goto charge_cur_mm;
2870	*ptr = memcg;
2871	ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
2872	css_put(&memcg->css);
2873	return ret;
2874charge_cur_mm:
2875	if (unlikely(!mm))
2876		mm = &init_mm;
2877	return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
2878}
2879
2880static void
2881__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2882					enum charge_type ctype)
2883{
2884	if (mem_cgroup_disabled())
2885		return;
2886	if (!ptr)
2887		return;
2888	cgroup_exclude_rmdir(&ptr->css);
2889
2890	__mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
2891	/*
2892	 * Now swap is on-memory. This means this page may be
2893	 * counted both as mem and swap....double count.
2894	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2895	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2896	 * may call delete_from_swap_cache() before reach here.
2897	 */
2898	if (do_swap_account && PageSwapCache(page)) {
2899		swp_entry_t ent = {.val = page_private(page)};
2900		unsigned short id;
2901		struct mem_cgroup *memcg;
2902
2903		id = swap_cgroup_record(ent, 0);
2904		rcu_read_lock();
2905		memcg = mem_cgroup_lookup(id);
2906		if (memcg) {
2907			/*
2908			 * This recorded memcg can be obsolete one. So, avoid
2909			 * calling css_tryget
2910			 */
2911			if (!mem_cgroup_is_root(memcg))
2912				res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2913			mem_cgroup_swap_statistics(memcg, false);
2914			mem_cgroup_put(memcg);
2915		}
2916		rcu_read_unlock();
2917	}
2918	/*
2919	 * At swapin, we may charge account against cgroup which has no tasks.
2920	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2921	 * In that case, we need to call pre_destroy() again. check it here.
2922	 */
2923	cgroup_release_and_wakeup_rmdir(&ptr->css);
2924}
2925
2926void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2927{
2928	__mem_cgroup_commit_charge_swapin(page, ptr,
2929					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2930}
2931
2932void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
2933{
2934	if (mem_cgroup_disabled())
2935		return;
2936	if (!memcg)
2937		return;
2938	__mem_cgroup_cancel_charge(memcg, 1);
2939}
2940
2941static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
2942				   unsigned int nr_pages,
2943				   const enum charge_type ctype)
2944{
2945	struct memcg_batch_info *batch = NULL;
2946	bool uncharge_memsw = true;
2947
2948	/* If swapout, usage of swap doesn't decrease */
2949	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2950		uncharge_memsw = false;
2951
2952	batch = &current->memcg_batch;
2953	/*
2954	 * In usual, we do css_get() when we remember memcg pointer.
2955	 * But in this case, we keep res->usage until end of a series of
2956	 * uncharges. Then, it's ok to ignore memcg's refcnt.
2957	 */
2958	if (!batch->memcg)
2959		batch->memcg = memcg;
2960	/*
2961	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2962	 * In those cases, all pages freed continuously can be expected to be in
2963	 * the same cgroup and we have chance to coalesce uncharges.
2964	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2965	 * because we want to do uncharge as soon as possible.
2966	 */
2967
2968	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2969		goto direct_uncharge;
2970
2971	if (nr_pages > 1)
2972		goto direct_uncharge;
2973
2974	/*
2975	 * In typical case, batch->memcg == mem. This means we can
2976	 * merge a series of uncharges to an uncharge of res_counter.
2977	 * If not, we uncharge res_counter ony by one.
2978	 */
2979	if (batch->memcg != memcg)
2980		goto direct_uncharge;
2981	/* remember freed charge and uncharge it later */
2982	batch->nr_pages++;
2983	if (uncharge_memsw)
2984		batch->memsw_nr_pages++;
2985	return;
2986direct_uncharge:
2987	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
2988	if (uncharge_memsw)
2989		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
2990	if (unlikely(batch->memcg != memcg))
2991		memcg_oom_recover(memcg);
2992	return;
2993}
2994
2995/*
2996 * uncharge if !page_mapped(page)
2997 */
2998static struct mem_cgroup *
2999__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
3000{
3001	struct mem_cgroup *memcg = NULL;
3002	unsigned int nr_pages = 1;
3003	struct page_cgroup *pc;
3004
3005	if (mem_cgroup_disabled())
3006		return NULL;
3007
3008	if (PageSwapCache(page))
3009		return NULL;
3010
3011	if (PageTransHuge(page)) {
3012		nr_pages <<= compound_order(page);
3013		VM_BUG_ON(!PageTransHuge(page));
3014	}
3015	/*
3016	 * Check if our page_cgroup is valid
3017	 */
3018	pc = lookup_page_cgroup(page);
3019	if (unlikely(!pc || !PageCgroupUsed(pc)))
3020		return NULL;
3021
3022	lock_page_cgroup(pc);
3023
3024	memcg = pc->mem_cgroup;
3025
3026	if (!PageCgroupUsed(pc))
3027		goto unlock_out;
3028
3029	switch (ctype) {
3030	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
3031	case MEM_CGROUP_CHARGE_TYPE_DROP:
3032		/* See mem_cgroup_prepare_migration() */
3033		if (page_mapped(page) || PageCgroupMigration(pc))
3034			goto unlock_out;
3035		break;
3036	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
3037		if (!PageAnon(page)) {	/* Shared memory */
3038			if (page->mapping && !page_is_file_cache(page))
3039				goto unlock_out;
3040		} else if (page_mapped(page)) /* Anon */
3041				goto unlock_out;
3042		break;
3043	default:
3044		break;
3045	}
3046
3047	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages);
3048
3049	ClearPageCgroupUsed(pc);
3050	/*
3051	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
3052	 * freed from LRU. This is safe because uncharged page is expected not
3053	 * to be reused (freed soon). Exception is SwapCache, it's handled by
3054	 * special functions.
3055	 */
3056
3057	unlock_page_cgroup(pc);
3058	/*
3059	 * even after unlock, we have memcg->res.usage here and this memcg
3060	 * will never be freed.
3061	 */
3062	memcg_check_events(memcg, page);
3063	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3064		mem_cgroup_swap_statistics(memcg, true);
3065		mem_cgroup_get(memcg);
3066	}
3067	if (!mem_cgroup_is_root(memcg))
3068		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
3069
3070	return memcg;
3071
3072unlock_out:
3073	unlock_page_cgroup(pc);
3074	return NULL;
3075}
3076
3077void mem_cgroup_uncharge_page(struct page *page)
3078{
3079	/* early check. */
3080	if (page_mapped(page))
3081		return;
3082	if (page->mapping && !PageAnon(page))
3083		return;
3084	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
3085}
3086
3087void mem_cgroup_uncharge_cache_page(struct page *page)
3088{
3089	VM_BUG_ON(page_mapped(page));
3090	VM_BUG_ON(page->mapping);
3091	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3092}
3093
3094/*
3095 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3096 * In that cases, pages are freed continuously and we can expect pages
3097 * are in the same memcg. All these calls itself limits the number of
3098 * pages freed at once, then uncharge_start/end() is called properly.
3099 * This may be called prural(2) times in a context,
3100 */
3101
3102void mem_cgroup_uncharge_start(void)
3103{
3104	current->memcg_batch.do_batch++;
3105	/* We can do nest. */
3106	if (current->memcg_batch.do_batch == 1) {
3107		current->memcg_batch.memcg = NULL;
3108		current->memcg_batch.nr_pages = 0;
3109		current->memcg_batch.memsw_nr_pages = 0;
3110	}
3111}
3112
3113void mem_cgroup_uncharge_end(void)
3114{
3115	struct memcg_batch_info *batch = &current->memcg_batch;
3116
3117	if (!batch->do_batch)
3118		return;
3119
3120	batch->do_batch--;
3121	if (batch->do_batch) /* If stacked, do nothing. */
3122		return;
3123
3124	if (!batch->memcg)
3125		return;
3126	/*
3127	 * This "batch->memcg" is valid without any css_get/put etc...
3128	 * bacause we hide charges behind us.
3129	 */
3130	if (batch->nr_pages)
3131		res_counter_uncharge(&batch->memcg->res,
3132				     batch->nr_pages * PAGE_SIZE);
3133	if (batch->memsw_nr_pages)
3134		res_counter_uncharge(&batch->memcg->memsw,
3135				     batch->memsw_nr_pages * PAGE_SIZE);
3136	memcg_oom_recover(batch->memcg);
3137	/* forget this pointer (for sanity check) */
3138	batch->memcg = NULL;
3139}
3140
3141#ifdef CONFIG_SWAP
3142/*
3143 * called after __delete_from_swap_cache() and drop "page" account.
3144 * memcg information is recorded to swap_cgroup of "ent"
3145 */
3146void
3147mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3148{
3149	struct mem_cgroup *memcg;
3150	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3151
3152	if (!swapout) /* this was a swap cache but the swap is unused ! */
3153		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3154
3155	memcg = __mem_cgroup_uncharge_common(page, ctype);
3156
3157	/*
3158	 * record memcg information,  if swapout && memcg != NULL,
3159	 * mem_cgroup_get() was called in uncharge().
3160	 */
3161	if (do_swap_account && swapout && memcg)
3162		swap_cgroup_record(ent, css_id(&memcg->css));
3163}
3164#endif
3165
3166#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3167/*
3168 * called from swap_entry_free(). remove record in swap_cgroup and
3169 * uncharge "memsw" account.
3170 */
3171void mem_cgroup_uncharge_swap(swp_entry_t ent)
3172{
3173	struct mem_cgroup *memcg;
3174	unsigned short id;
3175
3176	if (!do_swap_account)
3177		return;
3178
3179	id = swap_cgroup_record(ent, 0);
3180	rcu_read_lock();
3181	memcg = mem_cgroup_lookup(id);
3182	if (memcg) {
3183		/*
3184		 * We uncharge this because swap is freed.
3185		 * This memcg can be obsolete one. We avoid calling css_tryget
3186		 */
3187		if (!mem_cgroup_is_root(memcg))
3188			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3189		mem_cgroup_swap_statistics(memcg, false);
3190		mem_cgroup_put(memcg);
3191	}
3192	rcu_read_unlock();
3193}
3194
3195/**
3196 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3197 * @entry: swap entry to be moved
3198 * @from:  mem_cgroup which the entry is moved from
3199 * @to:  mem_cgroup which the entry is moved to
3200 * @need_fixup: whether we should fixup res_counters and refcounts.
3201 *
3202 * It succeeds only when the swap_cgroup's record for this entry is the same
3203 * as the mem_cgroup's id of @from.
3204 *
3205 * Returns 0 on success, -EINVAL on failure.
3206 *
3207 * The caller must have charged to @to, IOW, called res_counter_charge() about
3208 * both res and memsw, and called css_get().
3209 */
3210static int mem_cgroup_move_swap_account(swp_entry_t entry,
3211		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3212{
3213	unsigned short old_id, new_id;
3214
3215	old_id = css_id(&from->css);
3216	new_id = css_id(&to->css);
3217
3218	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3219		mem_cgroup_swap_statistics(from, false);
3220		mem_cgroup_swap_statistics(to, true);
3221		/*
3222		 * This function is only called from task migration context now.
3223		 * It postpones res_counter and refcount handling till the end
3224		 * of task migration(mem_cgroup_clear_mc()) for performance
3225		 * improvement. But we cannot postpone mem_cgroup_get(to)
3226		 * because if the process that has been moved to @to does
3227		 * swap-in, the refcount of @to might be decreased to 0.
3228		 */
3229		mem_cgroup_get(to);
3230		if (need_fixup) {
3231			if (!mem_cgroup_is_root(from))
3232				res_counter_uncharge(&from->memsw, PAGE_SIZE);
3233			mem_cgroup_put(from);
3234			/*
3235			 * we charged both to->res and to->memsw, so we should
3236			 * uncharge to->res.
3237			 */
3238			if (!mem_cgroup_is_root(to))
3239				res_counter_uncharge(&to->res, PAGE_SIZE);
3240		}
3241		return 0;
3242	}
3243	return -EINVAL;
3244}
3245#else
3246static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3247		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3248{
3249	return -EINVAL;
3250}
3251#endif
3252
3253/*
3254 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3255 * page belongs to.
3256 */
3257int mem_cgroup_prepare_migration(struct page *page,
3258	struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
3259{
3260	struct mem_cgroup *memcg = NULL;
3261	struct page_cgroup *pc;
3262	enum charge_type ctype;
3263	int ret = 0;
3264
3265	*ptr = NULL;
3266
3267	VM_BUG_ON(PageTransHuge(page));
3268	if (mem_cgroup_disabled())
3269		return 0;
3270
3271	pc = lookup_page_cgroup(page);
3272	lock_page_cgroup(pc);
3273	if (PageCgroupUsed(pc)) {
3274		memcg = pc->mem_cgroup;
3275		css_get(&memcg->css);
3276		/*
3277		 * At migrating an anonymous page, its mapcount goes down
3278		 * to 0 and uncharge() will be called. But, even if it's fully
3279		 * unmapped, migration may fail and this page has to be
3280		 * charged again. We set MIGRATION flag here and delay uncharge
3281		 * until end_migration() is called
3282		 *
3283		 * Corner Case Thinking
3284		 * A)
3285		 * When the old page was mapped as Anon and it's unmap-and-freed
3286		 * while migration was ongoing.
3287		 * If unmap finds the old page, uncharge() of it will be delayed
3288		 * until end_migration(). If unmap finds a new page, it's
3289		 * uncharged when it make mapcount to be 1->0. If unmap code
3290		 * finds swap_migration_entry, the new page will not be mapped
3291		 * and end_migration() will find it(mapcount==0).
3292		 *
3293		 * B)
3294		 * When the old page was mapped but migraion fails, the kernel
3295		 * remaps it. A charge for it is kept by MIGRATION flag even
3296		 * if mapcount goes down to 0. We can do remap successfully
3297		 * without charging it again.
3298		 *
3299		 * C)
3300		 * The "old" page is under lock_page() until the end of
3301		 * migration, so, the old page itself will not be swapped-out.
3302		 * If the new page is swapped out before end_migraton, our
3303		 * hook to usual swap-out path will catch the event.
3304		 */
3305		if (PageAnon(page))
3306			SetPageCgroupMigration(pc);
3307	}
3308	unlock_page_cgroup(pc);
3309	/*
3310	 * If the page is not charged at this point,
3311	 * we return here.
3312	 */
3313	if (!memcg)
3314		return 0;
3315
3316	*ptr = memcg;
3317	ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
3318	css_put(&memcg->css);/* drop extra refcnt */
3319	if (ret || *ptr == NULL) {
3320		if (PageAnon(page)) {
3321			lock_page_cgroup(pc);
3322			ClearPageCgroupMigration(pc);
3323			unlock_page_cgroup(pc);
3324			/*
3325			 * The old page may be fully unmapped while we kept it.
3326			 */
3327			mem_cgroup_uncharge_page(page);
3328		}
3329		return -ENOMEM;
3330	}
3331	/*
3332	 * We charge new page before it's used/mapped. So, even if unlock_page()
3333	 * is called before end_migration, we can catch all events on this new
3334	 * page. In the case new page is migrated but not remapped, new page's
3335	 * mapcount will be finally 0 and we call uncharge in end_migration().
3336	 */
3337	pc = lookup_page_cgroup(newpage);
3338	if (PageAnon(page))
3339		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3340	else if (page_is_file_cache(page))
3341		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3342	else
3343		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3344	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
3345	return ret;
3346}
3347
3348/* remove redundant charge if migration failed*/
3349void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3350	struct page *oldpage, struct page *newpage, bool migration_ok)
3351{
3352	struct page *used, *unused;
3353	struct page_cgroup *pc;
3354
3355	if (!memcg)
3356		return;
3357	/* blocks rmdir() */
3358	cgroup_exclude_rmdir(&memcg->css);
3359	if (!migration_ok) {
3360		used = oldpage;
3361		unused = newpage;
3362	} else {
3363		used = newpage;
3364		unused = oldpage;
3365	}
3366	/*
3367	 * We disallowed uncharge of pages under migration because mapcount
3368	 * of the page goes down to zero, temporarly.
3369	 * Clear the flag and check the page should be charged.
3370	 */
3371	pc = lookup_page_cgroup(oldpage);
3372	lock_page_cgroup(pc);
3373	ClearPageCgroupMigration(pc);
3374	unlock_page_cgroup(pc);
3375
3376	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3377
3378	/*
3379	 * If a page is a file cache, radix-tree replacement is very atomic
3380	 * and we can skip this check. When it was an Anon page, its mapcount
3381	 * goes down to 0. But because we added MIGRATION flage, it's not
3382	 * uncharged yet. There are several case but page->mapcount check
3383	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3384	 * check. (see prepare_charge() also)
3385	 */
3386	if (PageAnon(used))
3387		mem_cgroup_uncharge_page(used);
3388	/*
3389	 * At migration, we may charge account against cgroup which has no
3390	 * tasks.
3391	 * So, rmdir()->pre_destroy() can be called while we do this charge.
3392	 * In that case, we need to call pre_destroy() again. check it here.
3393	 */
3394	cgroup_release_and_wakeup_rmdir(&memcg->css);
3395}
3396
3397/*
3398 * At replace page cache, newpage is not under any memcg but it's on
3399 * LRU. So, this function doesn't touch res_counter but handles LRU
3400 * in correct way. Both pages are locked so we cannot race with uncharge.
3401 */
3402void mem_cgroup_replace_page_cache(struct page *oldpage,
3403				  struct page *newpage)
3404{
3405	struct mem_cgroup *memcg;
3406	struct page_cgroup *pc;
3407	struct zone *zone;
3408	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3409	unsigned long flags;
3410
3411	if (mem_cgroup_disabled())
3412		return;
3413
3414	pc = lookup_page_cgroup(oldpage);
3415	/* fix accounting on old pages */
3416	lock_page_cgroup(pc);
3417	memcg = pc->mem_cgroup;
3418	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
3419	ClearPageCgroupUsed(pc);
3420	unlock_page_cgroup(pc);
3421
3422	if (PageSwapBacked(oldpage))
3423		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3424
3425	zone = page_zone(newpage);
3426	pc = lookup_page_cgroup(newpage);
3427	/*
3428	 * Even if newpage->mapping was NULL before starting replacement,
3429	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3430	 * LRU while we overwrite pc->mem_cgroup.
3431	 */
3432	spin_lock_irqsave(&zone->lru_lock, flags);
3433	if (PageLRU(newpage))
3434		del_page_from_lru_list(zone, newpage, page_lru(newpage));
3435	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
3436	if (PageLRU(newpage))
3437		add_page_to_lru_list(zone, newpage, page_lru(newpage));
3438	spin_unlock_irqrestore(&zone->lru_lock, flags);
3439}
3440
3441#ifdef CONFIG_DEBUG_VM
3442static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3443{
3444	struct page_cgroup *pc;
3445
3446	pc = lookup_page_cgroup(page);
3447	if (likely(pc) && PageCgroupUsed(pc))
3448		return pc;
3449	return NULL;
3450}
3451
3452bool mem_cgroup_bad_page_check(struct page *page)
3453{
3454	if (mem_cgroup_disabled())
3455		return false;
3456
3457	return lookup_page_cgroup_used(page) != NULL;
3458}
3459
3460void mem_cgroup_print_bad_page(struct page *page)
3461{
3462	struct page_cgroup *pc;
3463
3464	pc = lookup_page_cgroup_used(page);
3465	if (pc) {
3466		int ret = -1;
3467		char *path;
3468
3469		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3470		       pc, pc->flags, pc->mem_cgroup);
3471
3472		path = kmalloc(PATH_MAX, GFP_KERNEL);
3473		if (path) {
3474			rcu_read_lock();
3475			ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3476							path, PATH_MAX);
3477			rcu_read_unlock();
3478		}
3479
3480		printk(KERN_CONT "(%s)\n",
3481				(ret < 0) ? "cannot get the path" : path);
3482		kfree(path);
3483	}
3484}
3485#endif
3486
3487static DEFINE_MUTEX(set_limit_mutex);
3488
3489static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3490				unsigned long long val)
3491{
3492	int retry_count;
3493	u64 memswlimit, memlimit;
3494	int ret = 0;
3495	int children = mem_cgroup_count_children(memcg);
3496	u64 curusage, oldusage;
3497	int enlarge;
3498
3499	/*
3500	 * For keeping hierarchical_reclaim simple, how long we should retry
3501	 * is depends on callers. We set our retry-count to be function
3502	 * of # of children which we should visit in this loop.
3503	 */
3504	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3505
3506	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3507
3508	enlarge = 0;
3509	while (retry_count) {
3510		if (signal_pending(current)) {
3511			ret = -EINTR;
3512			break;
3513		}
3514		/*
3515		 * Rather than hide all in some function, I do this in
3516		 * open coded manner. You see what this really does.
3517		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3518		 */
3519		mutex_lock(&set_limit_mutex);
3520		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3521		if (memswlimit < val) {
3522			ret = -EINVAL;
3523			mutex_unlock(&set_limit_mutex);
3524			break;
3525		}
3526
3527		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3528		if (memlimit < val)
3529			enlarge = 1;
3530
3531		ret = res_counter_set_limit(&memcg->res, val);
3532		if (!ret) {
3533			if (memswlimit == val)
3534				memcg->memsw_is_minimum = true;
3535			else
3536				memcg->memsw_is_minimum = false;
3537		}
3538		mutex_unlock(&set_limit_mutex);
3539
3540		if (!ret)
3541			break;
3542
3543		mem_cgroup_reclaim(memcg, GFP_KERNEL,
3544				   MEM_CGROUP_RECLAIM_SHRINK);
3545		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3546		/* Usage is reduced ? */
3547  		if (curusage >= oldusage)
3548			retry_count--;
3549		else
3550			oldusage = curusage;
3551	}
3552	if (!ret && enlarge)
3553		memcg_oom_recover(memcg);
3554
3555	return ret;
3556}
3557
3558static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3559					unsigned long long val)
3560{
3561	int retry_count;
3562	u64 memlimit, memswlimit, oldusage, curusage;
3563	int children = mem_cgroup_count_children(memcg);
3564	int ret = -EBUSY;
3565	int enlarge = 0;
3566
3567	/* see mem_cgroup_resize_res_limit */
3568 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3569	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3570	while (retry_count) {
3571		if (signal_pending(current)) {
3572			ret = -EINTR;
3573			break;
3574		}
3575		/*
3576		 * Rather than hide all in some function, I do this in
3577		 * open coded manner. You see what this really does.
3578		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3579		 */
3580		mutex_lock(&set_limit_mutex);
3581		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3582		if (memlimit > val) {
3583			ret = -EINVAL;
3584			mutex_unlock(&set_limit_mutex);
3585			break;
3586		}
3587		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3588		if (memswlimit < val)
3589			enlarge = 1;
3590		ret = res_counter_set_limit(&memcg->memsw, val);
3591		if (!ret) {
3592			if (memlimit == val)
3593				memcg->memsw_is_minimum = true;
3594			else
3595				memcg->memsw_is_minimum = false;
3596		}
3597		mutex_unlock(&set_limit_mutex);
3598
3599		if (!ret)
3600			break;
3601
3602		mem_cgroup_reclaim(memcg, GFP_KERNEL,
3603				   MEM_CGROUP_RECLAIM_NOSWAP |
3604				   MEM_CGROUP_RECLAIM_SHRINK);
3605		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3606		/* Usage is reduced ? */
3607		if (curusage >= oldusage)
3608			retry_count--;
3609		else
3610			oldusage = curusage;
3611	}
3612	if (!ret && enlarge)
3613		memcg_oom_recover(memcg);
3614	return ret;
3615}
3616
3617unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3618					    gfp_t gfp_mask,
3619					    unsigned long *total_scanned)
3620{
3621	unsigned long nr_reclaimed = 0;
3622	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3623	unsigned long reclaimed;
3624	int loop = 0;
3625	struct mem_cgroup_tree_per_zone *mctz;
3626	unsigned long long excess;
3627	unsigned long nr_scanned;
3628
3629	if (order > 0)
3630		return 0;
3631
3632	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3633	/*
3634	 * This loop can run a while, specially if mem_cgroup's continuously
3635	 * keep exceeding their soft limit and putting the system under
3636	 * pressure
3637	 */
3638	do {
3639		if (next_mz)
3640			mz = next_mz;
3641		else
3642			mz = mem_cgroup_largest_soft_limit_node(mctz);
3643		if (!mz)
3644			break;
3645
3646		nr_scanned = 0;
3647		reclaimed = mem_cgroup_soft_reclaim(mz->mem, zone,
3648						    gfp_mask, &nr_scanned);
3649		nr_reclaimed += reclaimed;
3650		*total_scanned += nr_scanned;
3651		spin_lock(&mctz->lock);
3652
3653		/*
3654		 * If we failed to reclaim anything from this memory cgroup
3655		 * it is time to move on to the next cgroup
3656		 */
3657		next_mz = NULL;
3658		if (!reclaimed) {
3659			do {
3660				/*
3661				 * Loop until we find yet another one.
3662				 *
3663				 * By the time we get the soft_limit lock
3664				 * again, someone might have aded the
3665				 * group back on the RB tree. Iterate to
3666				 * make sure we get a different mem.
3667				 * mem_cgroup_largest_soft_limit_node returns
3668				 * NULL if no other cgroup is present on
3669				 * the tree
3670				 */
3671				next_mz =
3672				__mem_cgroup_largest_soft_limit_node(mctz);
3673				if (next_mz == mz)
3674					css_put(&next_mz->mem->css);
3675				else /* next_mz == NULL or other memcg */
3676					break;
3677			} while (1);
3678		}
3679		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3680		excess = res_counter_soft_limit_excess(&mz->mem->res);
3681		/*
3682		 * One school of thought says that we should not add
3683		 * back the node to the tree if reclaim returns 0.
3684		 * But our reclaim could return 0, simply because due
3685		 * to priority we are exposing a smaller subset of
3686		 * memory to reclaim from. Consider this as a longer
3687		 * term TODO.
3688		 */
3689		/* If excess == 0, no tree ops */
3690		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3691		spin_unlock(&mctz->lock);
3692		css_put(&mz->mem->css);
3693		loop++;
3694		/*
3695		 * Could not reclaim anything and there are no more
3696		 * mem cgroups to try or we seem to be looping without
3697		 * reclaiming anything.
3698		 */
3699		if (!nr_reclaimed &&
3700			(next_mz == NULL ||
3701			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3702			break;
3703	} while (!nr_reclaimed);
3704	if (next_mz)
3705		css_put(&next_mz->mem->css);
3706	return nr_reclaimed;
3707}
3708
3709/*
3710 * This routine traverse page_cgroup in given list and drop them all.
3711 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3712 */
3713static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3714				int node, int zid, enum lru_list lru)
3715{
3716	struct mem_cgroup_per_zone *mz;
3717	unsigned long flags, loop;
3718	struct list_head *list;
3719	struct page *busy;
3720	struct zone *zone;
3721	int ret = 0;
3722
3723	zone = &NODE_DATA(node)->node_zones[zid];
3724	mz = mem_cgroup_zoneinfo(memcg, node, zid);
3725	list = &mz->lruvec.lists[lru];
3726
3727	loop = MEM_CGROUP_ZSTAT(mz, lru);
3728	/* give some margin against EBUSY etc...*/
3729	loop += 256;
3730	busy = NULL;
3731	while (loop--) {
3732		struct page_cgroup *pc;
3733		struct page *page;
3734
3735		ret = 0;
3736		spin_lock_irqsave(&zone->lru_lock, flags);
3737		if (list_empty(list)) {
3738			spin_unlock_irqrestore(&zone->lru_lock, flags);
3739			break;
3740		}
3741		page = list_entry(list->prev, struct page, lru);
3742		if (busy == page) {
3743			list_move(&page->lru, list);
3744			busy = NULL;
3745			spin_unlock_irqrestore(&zone->lru_lock, flags);
3746			continue;
3747		}
3748		spin_unlock_irqrestore(&zone->lru_lock, flags);
3749
3750		pc = lookup_page_cgroup(page);
3751
3752		ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
3753		if (ret == -ENOMEM)
3754			break;
3755
3756		if (ret == -EBUSY || ret == -EINVAL) {
3757			/* found lock contention or "pc" is obsolete. */
3758			busy = page;
3759			cond_resched();
3760		} else
3761			busy = NULL;
3762	}
3763
3764	if (!ret && !list_empty(list))
3765		return -EBUSY;
3766	return ret;
3767}
3768
3769/*
3770 * make mem_cgroup's charge to be 0 if there is no task.
3771 * This enables deleting this mem_cgroup.
3772 */
3773static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
3774{
3775	int ret;
3776	int node, zid, shrink;
3777	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3778	struct cgroup *cgrp = memcg->css.cgroup;
3779
3780	css_get(&memcg->css);
3781
3782	shrink = 0;
3783	/* should free all ? */
3784	if (free_all)
3785		goto try_to_free;
3786move_account:
3787	do {
3788		ret = -EBUSY;
3789		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3790			goto out;
3791		ret = -EINTR;
3792		if (signal_pending(current))
3793			goto out;
3794		/* This is for making all *used* pages to be on LRU. */
3795		lru_add_drain_all();
3796		drain_all_stock_sync(memcg);
3797		ret = 0;
3798		mem_cgroup_start_move(memcg);
3799		for_each_node_state(node, N_HIGH_MEMORY) {
3800			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3801				enum lru_list l;
3802				for_each_lru(l) {
3803					ret = mem_cgroup_force_empty_list(memcg,
3804							node, zid, l);
3805					if (ret)
3806						break;
3807				}
3808			}
3809			if (ret)
3810				break;
3811		}
3812		mem_cgroup_end_move(memcg);
3813		memcg_oom_recover(memcg);
3814		/* it seems parent cgroup doesn't have enough mem */
3815		if (ret == -ENOMEM)
3816			goto try_to_free;
3817		cond_resched();
3818	/* "ret" should also be checked to ensure all lists are empty. */
3819	} while (memcg->res.usage > 0 || ret);
3820out:
3821	css_put(&memcg->css);
3822	return ret;
3823
3824try_to_free:
3825	/* returns EBUSY if there is a task or if we come here twice. */
3826	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3827		ret = -EBUSY;
3828		goto out;
3829	}
3830	/* we call try-to-free pages for make this cgroup empty */
3831	lru_add_drain_all();
3832	/* try to free all pages in this cgroup */
3833	shrink = 1;
3834	while (nr_retries && memcg->res.usage > 0) {
3835		int progress;
3836
3837		if (signal_pending(current)) {
3838			ret = -EINTR;
3839			goto out;
3840		}
3841		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
3842						false);
3843		if (!progress) {
3844			nr_retries--;
3845			/* maybe some writeback is necessary */
3846			congestion_wait(BLK_RW_ASYNC, HZ/10);
3847		}
3848
3849	}
3850	lru_add_drain();
3851	/* try move_account...there may be some *locked* pages. */
3852	goto move_account;
3853}
3854
3855int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3856{
3857	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3858}
3859
3860
3861static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3862{
3863	return mem_cgroup_from_cont(cont)->use_hierarchy;
3864}
3865
3866static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3867					u64 val)
3868{
3869	int retval = 0;
3870	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3871	struct cgroup *parent = cont->parent;
3872	struct mem_cgroup *parent_memcg = NULL;
3873
3874	if (parent)
3875		parent_memcg = mem_cgroup_from_cont(parent);
3876
3877	cgroup_lock();
3878	/*
3879	 * If parent's use_hierarchy is set, we can't make any modifications
3880	 * in the child subtrees. If it is unset, then the change can
3881	 * occur, provided the current cgroup has no children.
3882	 *
3883	 * For the root cgroup, parent_mem is NULL, we allow value to be
3884	 * set if there are no children.
3885	 */
3886	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3887				(val == 1 || val == 0)) {
3888		if (list_empty(&cont->children))
3889			memcg->use_hierarchy = val;
3890		else
3891			retval = -EBUSY;
3892	} else
3893		retval = -EINVAL;
3894	cgroup_unlock();
3895
3896	return retval;
3897}
3898
3899
3900static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
3901					       enum mem_cgroup_stat_index idx)
3902{
3903	struct mem_cgroup *iter;
3904	long val = 0;
3905
3906	/* Per-cpu values can be negative, use a signed accumulator */
3907	for_each_mem_cgroup_tree(iter, memcg)
3908		val += mem_cgroup_read_stat(iter, idx);
3909
3910	if (val < 0) /* race ? */
3911		val = 0;
3912	return val;
3913}
3914
3915static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3916{
3917	u64 val;
3918
3919	if (!mem_cgroup_is_root(memcg)) {
3920		if (!swap)
3921			return res_counter_read_u64(&memcg->res, RES_USAGE);
3922		else
3923			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
3924	}
3925
3926	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
3927	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
3928
3929	if (swap)
3930		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
3931
3932	return val << PAGE_SHIFT;
3933}
3934
3935static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3936{
3937	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3938	u64 val;
3939	int type, name;
3940
3941	type = MEMFILE_TYPE(cft->private);
3942	name = MEMFILE_ATTR(cft->private);
3943	switch (type) {
3944	case _MEM:
3945		if (name == RES_USAGE)
3946			val = mem_cgroup_usage(memcg, false);
3947		else
3948			val = res_counter_read_u64(&memcg->res, name);
3949		break;
3950	case _MEMSWAP:
3951		if (name == RES_USAGE)
3952			val = mem_cgroup_usage(memcg, true);
3953		else
3954			val = res_counter_read_u64(&memcg->memsw, name);
3955		break;
3956	default:
3957		BUG();
3958		break;
3959	}
3960	return val;
3961}
3962/*
3963 * The user of this function is...
3964 * RES_LIMIT.
3965 */
3966static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3967			    const char *buffer)
3968{
3969	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3970	int type, name;
3971	unsigned long long val;
3972	int ret;
3973
3974	type = MEMFILE_TYPE(cft->private);
3975	name = MEMFILE_ATTR(cft->private);
3976	switch (name) {
3977	case RES_LIMIT:
3978		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3979			ret = -EINVAL;
3980			break;
3981		}
3982		/* This function does all necessary parse...reuse it */
3983		ret = res_counter_memparse_write_strategy(buffer, &val);
3984		if (ret)
3985			break;
3986		if (type == _MEM)
3987			ret = mem_cgroup_resize_limit(memcg, val);
3988		else
3989			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3990		break;
3991	case RES_SOFT_LIMIT:
3992		ret = res_counter_memparse_write_strategy(buffer, &val);
3993		if (ret)
3994			break;
3995		/*
3996		 * For memsw, soft limits are hard to implement in terms
3997		 * of semantics, for now, we support soft limits for
3998		 * control without swap
3999		 */
4000		if (type == _MEM)
4001			ret = res_counter_set_soft_limit(&memcg->res, val);
4002		else
4003			ret = -EINVAL;
4004		break;
4005	default:
4006		ret = -EINVAL; /* should be BUG() ? */
4007		break;
4008	}
4009	return ret;
4010}
4011
4012static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
4013		unsigned long long *mem_limit, unsigned long long *memsw_limit)
4014{
4015	struct cgroup *cgroup;
4016	unsigned long long min_limit, min_memsw_limit, tmp;
4017
4018	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4019	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4020	cgroup = memcg->css.cgroup;
4021	if (!memcg->use_hierarchy)
4022		goto out;
4023
4024	while (cgroup->parent) {
4025		cgroup = cgroup->parent;
4026		memcg = mem_cgroup_from_cont(cgroup);
4027		if (!memcg->use_hierarchy)
4028			break;
4029		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
4030		min_limit = min(min_limit, tmp);
4031		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4032		min_memsw_limit = min(min_memsw_limit, tmp);
4033	}
4034out:
4035	*mem_limit = min_limit;
4036	*memsw_limit = min_memsw_limit;
4037	return;
4038}
4039
4040static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
4041{
4042	struct mem_cgroup *memcg;
4043	int type, name;
4044
4045	memcg = mem_cgroup_from_cont(cont);
4046	type = MEMFILE_TYPE(event);
4047	name = MEMFILE_ATTR(event);
4048	switch (name) {
4049	case RES_MAX_USAGE:
4050		if (type == _MEM)
4051			res_counter_reset_max(&memcg->res);
4052		else
4053			res_counter_reset_max(&memcg->memsw);
4054		break;
4055	case RES_FAILCNT:
4056		if (type == _MEM)
4057			res_counter_reset_failcnt(&memcg->res);
4058		else
4059			res_counter_reset_failcnt(&memcg->memsw);
4060		break;
4061	}
4062
4063	return 0;
4064}
4065
4066static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
4067					struct cftype *cft)
4068{
4069	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
4070}
4071
4072#ifdef CONFIG_MMU
4073static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4074					struct cftype *cft, u64 val)
4075{
4076	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4077
4078	if (val >= (1 << NR_MOVE_TYPE))
4079		return -EINVAL;
4080	/*
4081	 * We check this value several times in both in can_attach() and
4082	 * attach(), so we need cgroup lock to prevent this value from being
4083	 * inconsistent.
4084	 */
4085	cgroup_lock();
4086	memcg->move_charge_at_immigrate = val;
4087	cgroup_unlock();
4088
4089	return 0;
4090}
4091#else
4092static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4093					struct cftype *cft, u64 val)
4094{
4095	return -ENOSYS;
4096}
4097#endif
4098
4099
4100/* For read statistics */
4101enum {
4102	MCS_CACHE,
4103	MCS_RSS,
4104	MCS_FILE_MAPPED,
4105	MCS_PGPGIN,
4106	MCS_PGPGOUT,
4107	MCS_SWAP,
4108	MCS_PGFAULT,
4109	MCS_PGMAJFAULT,
4110	MCS_INACTIVE_ANON,
4111	MCS_ACTIVE_ANON,
4112	MCS_INACTIVE_FILE,
4113	MCS_ACTIVE_FILE,
4114	MCS_UNEVICTABLE,
4115	NR_MCS_STAT,
4116};
4117
4118struct mcs_total_stat {
4119	s64 stat[NR_MCS_STAT];
4120};
4121
4122struct {
4123	char *local_name;
4124	char *total_name;
4125} memcg_stat_strings[NR_MCS_STAT] = {
4126	{"cache", "total_cache"},
4127	{"rss", "total_rss"},
4128	{"mapped_file", "total_mapped_file"},
4129	{"pgpgin", "total_pgpgin"},
4130	{"pgpgout", "total_pgpgout"},
4131	{"swap", "total_swap"},
4132	{"pgfault", "total_pgfault"},
4133	{"pgmajfault", "total_pgmajfault"},
4134	{"inactive_anon", "total_inactive_anon"},
4135	{"active_anon", "total_active_anon"},
4136	{"inactive_file", "total_inactive_file"},
4137	{"active_file", "total_active_file"},
4138	{"unevictable", "total_unevictable"}
4139};
4140
4141
4142static void
4143mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
4144{
4145	s64 val;
4146
4147	/* per cpu stat */
4148	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
4149	s->stat[MCS_CACHE] += val * PAGE_SIZE;
4150	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
4151	s->stat[MCS_RSS] += val * PAGE_SIZE;
4152	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
4153	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
4154	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
4155	s->stat[MCS_PGPGIN] += val;
4156	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
4157	s->stat[MCS_PGPGOUT] += val;
4158	if (do_swap_account) {
4159		val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
4160		s->stat[MCS_SWAP] += val * PAGE_SIZE;
4161	}
4162	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
4163	s->stat[MCS_PGFAULT] += val;
4164	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
4165	s->stat[MCS_PGMAJFAULT] += val;
4166
4167	/* per zone stat */
4168	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
4169	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
4170	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
4171	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
4172	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
4173	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
4174	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
4175	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
4176	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4177	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
4178}
4179
4180static void
4181mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
4182{
4183	struct mem_cgroup *iter;
4184
4185	for_each_mem_cgroup_tree(iter, memcg)
4186		mem_cgroup_get_local_stat(iter, s);
4187}
4188
4189#ifdef CONFIG_NUMA
4190static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4191{
4192	int nid;
4193	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4194	unsigned long node_nr;
4195	struct cgroup *cont = m->private;
4196	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4197
4198	total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL);
4199	seq_printf(m, "total=%lu", total_nr);
4200	for_each_node_state(nid, N_HIGH_MEMORY) {
4201		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL);
4202		seq_printf(m, " N%d=%lu", nid, node_nr);
4203	}
4204	seq_putc(m, '\n');
4205
4206	file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE);
4207	seq_printf(m, "file=%lu", file_nr);
4208	for_each_node_state(nid, N_HIGH_MEMORY) {
4209		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4210				LRU_ALL_FILE);
4211		seq_printf(m, " N%d=%lu", nid, node_nr);
4212	}
4213	seq_putc(m, '\n');
4214
4215	anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON);
4216	seq_printf(m, "anon=%lu", anon_nr);
4217	for_each_node_state(nid, N_HIGH_MEMORY) {
4218		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4219				LRU_ALL_ANON);
4220		seq_printf(m, " N%d=%lu", nid, node_nr);
4221	}
4222	seq_putc(m, '\n');
4223
4224	unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE));
4225	seq_printf(m, "unevictable=%lu", unevictable_nr);
4226	for_each_node_state(nid, N_HIGH_MEMORY) {
4227		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4228				BIT(LRU_UNEVICTABLE));
4229		seq_printf(m, " N%d=%lu", nid, node_nr);
4230	}
4231	seq_putc(m, '\n');
4232	return 0;
4233}
4234#endif /* CONFIG_NUMA */
4235
4236static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4237				 struct cgroup_map_cb *cb)
4238{
4239	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4240	struct mcs_total_stat mystat;
4241	int i;
4242
4243	memset(&mystat, 0, sizeof(mystat));
4244	mem_cgroup_get_local_stat(mem_cont, &mystat);
4245
4246
4247	for (i = 0; i < NR_MCS_STAT; i++) {
4248		if (i == MCS_SWAP && !do_swap_account)
4249			continue;
4250		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
4251	}
4252
4253	/* Hierarchical information */
4254	{
4255		unsigned long long limit, memsw_limit;
4256		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
4257		cb->fill(cb, "hierarchical_memory_limit", limit);
4258		if (do_swap_account)
4259			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4260	}
4261
4262	memset(&mystat, 0, sizeof(mystat));
4263	mem_cgroup_get_total_stat(mem_cont, &mystat);
4264	for (i = 0; i < NR_MCS_STAT; i++) {
4265		if (i == MCS_SWAP && !do_swap_account)
4266			continue;
4267		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
4268	}
4269
4270#ifdef CONFIG_DEBUG_VM
4271	{
4272		int nid, zid;
4273		struct mem_cgroup_per_zone *mz;
4274		unsigned long recent_rotated[2] = {0, 0};
4275		unsigned long recent_scanned[2] = {0, 0};
4276
4277		for_each_online_node(nid)
4278			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4279				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
4280
4281				recent_rotated[0] +=
4282					mz->reclaim_stat.recent_rotated[0];
4283				recent_rotated[1] +=
4284					mz->reclaim_stat.recent_rotated[1];
4285				recent_scanned[0] +=
4286					mz->reclaim_stat.recent_scanned[0];
4287				recent_scanned[1] +=
4288					mz->reclaim_stat.recent_scanned[1];
4289			}
4290		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
4291		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
4292		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
4293		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
4294	}
4295#endif
4296
4297	return 0;
4298}
4299
4300static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4301{
4302	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4303
4304	return mem_cgroup_swappiness(memcg);
4305}
4306
4307static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4308				       u64 val)
4309{
4310	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4311	struct mem_cgroup *parent;
4312
4313	if (val > 100)
4314		return -EINVAL;
4315
4316	if (cgrp->parent == NULL)
4317		return -EINVAL;
4318
4319	parent = mem_cgroup_from_cont(cgrp->parent);
4320
4321	cgroup_lock();
4322
4323	/* If under hierarchy, only empty-root can set this value */
4324	if ((parent->use_hierarchy) ||
4325	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4326		cgroup_unlock();
4327		return -EINVAL;
4328	}
4329
4330	memcg->swappiness = val;
4331
4332	cgroup_unlock();
4333
4334	return 0;
4335}
4336
4337static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4338{
4339	struct mem_cgroup_threshold_ary *t;
4340	u64 usage;
4341	int i;
4342
4343	rcu_read_lock();
4344	if (!swap)
4345		t = rcu_dereference(memcg->thresholds.primary);
4346	else
4347		t = rcu_dereference(memcg->memsw_thresholds.primary);
4348
4349	if (!t)
4350		goto unlock;
4351
4352	usage = mem_cgroup_usage(memcg, swap);
4353
4354	/*
4355	 * current_threshold points to threshold just below usage.
4356	 * If it's not true, a threshold was crossed after last
4357	 * call of __mem_cgroup_threshold().
4358	 */
4359	i = t->current_threshold;
4360
4361	/*
4362	 * Iterate backward over array of thresholds starting from
4363	 * current_threshold and check if a threshold is crossed.
4364	 * If none of thresholds below usage is crossed, we read
4365	 * only one element of the array here.
4366	 */
4367	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4368		eventfd_signal(t->entries[i].eventfd, 1);
4369
4370	/* i = current_threshold + 1 */
4371	i++;
4372
4373	/*
4374	 * Iterate forward over array of thresholds starting from
4375	 * current_threshold+1 and check if a threshold is crossed.
4376	 * If none of thresholds above usage is crossed, we read
4377	 * only one element of the array here.
4378	 */
4379	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4380		eventfd_signal(t->entries[i].eventfd, 1);
4381
4382	/* Update current_threshold */
4383	t->current_threshold = i - 1;
4384unlock:
4385	rcu_read_unlock();
4386}
4387
4388static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4389{
4390	while (memcg) {
4391		__mem_cgroup_threshold(memcg, false);
4392		if (do_swap_account)
4393			__mem_cgroup_threshold(memcg, true);
4394
4395		memcg = parent_mem_cgroup(memcg);
4396	}
4397}
4398
4399static int compare_thresholds(const void *a, const void *b)
4400{
4401	const struct mem_cgroup_threshold *_a = a;
4402	const struct mem_cgroup_threshold *_b = b;
4403
4404	return _a->threshold - _b->threshold;
4405}
4406
4407static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4408{
4409	struct mem_cgroup_eventfd_list *ev;
4410
4411	list_for_each_entry(ev, &memcg->oom_notify, list)
4412		eventfd_signal(ev->eventfd, 1);
4413	return 0;
4414}
4415
4416static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4417{
4418	struct mem_cgroup *iter;
4419
4420	for_each_mem_cgroup_tree(iter, memcg)
4421		mem_cgroup_oom_notify_cb(iter);
4422}
4423
4424static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4425	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4426{
4427	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4428	struct mem_cgroup_thresholds *thresholds;
4429	struct mem_cgroup_threshold_ary *new;
4430	int type = MEMFILE_TYPE(cft->private);
4431	u64 threshold, usage;
4432	int i, size, ret;
4433
4434	ret = res_counter_memparse_write_strategy(args, &threshold);
4435	if (ret)
4436		return ret;
4437
4438	mutex_lock(&memcg->thresholds_lock);
4439
4440	if (type == _MEM)
4441		thresholds = &memcg->thresholds;
4442	else if (type == _MEMSWAP)
4443		thresholds = &memcg->memsw_thresholds;
4444	else
4445		BUG();
4446
4447	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4448
4449	/* Check if a threshold crossed before adding a new one */
4450	if (thresholds->primary)
4451		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4452
4453	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4454
4455	/* Allocate memory for new array of thresholds */
4456	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4457			GFP_KERNEL);
4458	if (!new) {
4459		ret = -ENOMEM;
4460		goto unlock;
4461	}
4462	new->size = size;
4463
4464	/* Copy thresholds (if any) to new array */
4465	if (thresholds->primary) {
4466		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4467				sizeof(struct mem_cgroup_threshold));
4468	}
4469
4470	/* Add new threshold */
4471	new->entries[size - 1].eventfd = eventfd;
4472	new->entries[size - 1].threshold = threshold;
4473
4474	/* Sort thresholds. Registering of new threshold isn't time-critical */
4475	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4476			compare_thresholds, NULL);
4477
4478	/* Find current threshold */
4479	new->current_threshold = -1;
4480	for (i = 0; i < size; i++) {
4481		if (new->entries[i].threshold < usage) {
4482			/*
4483			 * new->current_threshold will not be used until
4484			 * rcu_assign_pointer(), so it's safe to increment
4485			 * it here.
4486			 */
4487			++new->current_threshold;
4488		}
4489	}
4490
4491	/* Free old spare buffer and save old primary buffer as spare */
4492	kfree(thresholds->spare);
4493	thresholds->spare = thresholds->primary;
4494
4495	rcu_assign_pointer(thresholds->primary, new);
4496
4497	/* To be sure that nobody uses thresholds */
4498	synchronize_rcu();
4499
4500unlock:
4501	mutex_unlock(&memcg->thresholds_lock);
4502
4503	return ret;
4504}
4505
4506static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4507	struct cftype *cft, struct eventfd_ctx *eventfd)
4508{
4509	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4510	struct mem_cgroup_thresholds *thresholds;
4511	struct mem_cgroup_threshold_ary *new;
4512	int type = MEMFILE_TYPE(cft->private);
4513	u64 usage;
4514	int i, j, size;
4515
4516	mutex_lock(&memcg->thresholds_lock);
4517	if (type == _MEM)
4518		thresholds = &memcg->thresholds;
4519	else if (type == _MEMSWAP)
4520		thresholds = &memcg->memsw_thresholds;
4521	else
4522		BUG();
4523
4524	/*
4525	 * Something went wrong if we trying to unregister a threshold
4526	 * if we don't have thresholds
4527	 */
4528	BUG_ON(!thresholds);
4529
4530	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4531
4532	/* Check if a threshold crossed before removing */
4533	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4534
4535	/* Calculate new number of threshold */
4536	size = 0;
4537	for (i = 0; i < thresholds->primary->size; i++) {
4538		if (thresholds->primary->entries[i].eventfd != eventfd)
4539			size++;
4540	}
4541
4542	new = thresholds->spare;
4543
4544	/* Set thresholds array to NULL if we don't have thresholds */
4545	if (!size) {
4546		kfree(new);
4547		new = NULL;
4548		goto swap_buffers;
4549	}
4550
4551	new->size = size;
4552
4553	/* Copy thresholds and find current threshold */
4554	new->current_threshold = -1;
4555	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4556		if (thresholds->primary->entries[i].eventfd == eventfd)
4557			continue;
4558
4559		new->entries[j] = thresholds->primary->entries[i];
4560		if (new->entries[j].threshold < usage) {
4561			/*
4562			 * new->current_threshold will not be used
4563			 * until rcu_assign_pointer(), so it's safe to increment
4564			 * it here.
4565			 */
4566			++new->current_threshold;
4567		}
4568		j++;
4569	}
4570
4571swap_buffers:
4572	/* Swap primary and spare array */
4573	thresholds->spare = thresholds->primary;
4574	rcu_assign_pointer(thresholds->primary, new);
4575
4576	/* To be sure that nobody uses thresholds */
4577	synchronize_rcu();
4578
4579	mutex_unlock(&memcg->thresholds_lock);
4580}
4581
4582static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4583	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4584{
4585	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4586	struct mem_cgroup_eventfd_list *event;
4587	int type = MEMFILE_TYPE(cft->private);
4588
4589	BUG_ON(type != _OOM_TYPE);
4590	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4591	if (!event)
4592		return -ENOMEM;
4593
4594	spin_lock(&memcg_oom_lock);
4595
4596	event->eventfd = eventfd;
4597	list_add(&event->list, &memcg->oom_notify);
4598
4599	/* already in OOM ? */
4600	if (atomic_read(&memcg->under_oom))
4601		eventfd_signal(eventfd, 1);
4602	spin_unlock(&memcg_oom_lock);
4603
4604	return 0;
4605}
4606
4607static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4608	struct cftype *cft, struct eventfd_ctx *eventfd)
4609{
4610	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4611	struct mem_cgroup_eventfd_list *ev, *tmp;
4612	int type = MEMFILE_TYPE(cft->private);
4613
4614	BUG_ON(type != _OOM_TYPE);
4615
4616	spin_lock(&memcg_oom_lock);
4617
4618	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4619		if (ev->eventfd == eventfd) {
4620			list_del(&ev->list);
4621			kfree(ev);
4622		}
4623	}
4624
4625	spin_unlock(&memcg_oom_lock);
4626}
4627
4628static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4629	struct cftype *cft,  struct cgroup_map_cb *cb)
4630{
4631	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4632
4633	cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
4634
4635	if (atomic_read(&memcg->under_oom))
4636		cb->fill(cb, "under_oom", 1);
4637	else
4638		cb->fill(cb, "under_oom", 0);
4639	return 0;
4640}
4641
4642static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4643	struct cftype *cft, u64 val)
4644{
4645	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4646	struct mem_cgroup *parent;
4647
4648	/* cannot set to root cgroup and only 0 and 1 are allowed */
4649	if (!cgrp->parent || !((val == 0) || (val == 1)))
4650		return -EINVAL;
4651
4652	parent = mem_cgroup_from_cont(cgrp->parent);
4653
4654	cgroup_lock();
4655	/* oom-kill-disable is a flag for subhierarchy. */
4656	if ((parent->use_hierarchy) ||
4657	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4658		cgroup_unlock();
4659		return -EINVAL;
4660	}
4661	memcg->oom_kill_disable = val;
4662	if (!val)
4663		memcg_oom_recover(memcg);
4664	cgroup_unlock();
4665	return 0;
4666}
4667
4668#ifdef CONFIG_NUMA
4669static const struct file_operations mem_control_numa_stat_file_operations = {
4670	.read = seq_read,
4671	.llseek = seq_lseek,
4672	.release = single_release,
4673};
4674
4675static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4676{
4677	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4678
4679	file->f_op = &mem_control_numa_stat_file_operations;
4680	return single_open(file, mem_control_numa_stat_show, cont);
4681}
4682#endif /* CONFIG_NUMA */
4683
4684#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
4685static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
4686{
4687	/*
4688	 * Part of this would be better living in a separate allocation
4689	 * function, leaving us with just the cgroup tree population work.
4690	 * We, however, depend on state such as network's proto_list that
4691	 * is only initialized after cgroup creation. I found the less
4692	 * cumbersome way to deal with it to defer it all to populate time
4693	 */
4694	return mem_cgroup_sockets_init(cont, ss);
4695};
4696
4697static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
4698				struct cgroup *cont)
4699{
4700	mem_cgroup_sockets_destroy(cont, ss);
4701}
4702#else
4703static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
4704{
4705	return 0;
4706}
4707
4708static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
4709				struct cgroup *cont)
4710{
4711}
4712#endif
4713
4714static struct cftype mem_cgroup_files[] = {
4715	{
4716		.name = "usage_in_bytes",
4717		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4718		.read_u64 = mem_cgroup_read,
4719		.register_event = mem_cgroup_usage_register_event,
4720		.unregister_event = mem_cgroup_usage_unregister_event,
4721	},
4722	{
4723		.name = "max_usage_in_bytes",
4724		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4725		.trigger = mem_cgroup_reset,
4726		.read_u64 = mem_cgroup_read,
4727	},
4728	{
4729		.name = "limit_in_bytes",
4730		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4731		.write_string = mem_cgroup_write,
4732		.read_u64 = mem_cgroup_read,
4733	},
4734	{
4735		.name = "soft_limit_in_bytes",
4736		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4737		.write_string = mem_cgroup_write,
4738		.read_u64 = mem_cgroup_read,
4739	},
4740	{
4741		.name = "failcnt",
4742		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4743		.trigger = mem_cgroup_reset,
4744		.read_u64 = mem_cgroup_read,
4745	},
4746	{
4747		.name = "stat",
4748		.read_map = mem_control_stat_show,
4749	},
4750	{
4751		.name = "force_empty",
4752		.trigger = mem_cgroup_force_empty_write,
4753	},
4754	{
4755		.name = "use_hierarchy",
4756		.write_u64 = mem_cgroup_hierarchy_write,
4757		.read_u64 = mem_cgroup_hierarchy_read,
4758	},
4759	{
4760		.name = "swappiness",
4761		.read_u64 = mem_cgroup_swappiness_read,
4762		.write_u64 = mem_cgroup_swappiness_write,
4763	},
4764	{
4765		.name = "move_charge_at_immigrate",
4766		.read_u64 = mem_cgroup_move_charge_read,
4767		.write_u64 = mem_cgroup_move_charge_write,
4768	},
4769	{
4770		.name = "oom_control",
4771		.read_map = mem_cgroup_oom_control_read,
4772		.write_u64 = mem_cgroup_oom_control_write,
4773		.register_event = mem_cgroup_oom_register_event,
4774		.unregister_event = mem_cgroup_oom_unregister_event,
4775		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4776	},
4777#ifdef CONFIG_NUMA
4778	{
4779		.name = "numa_stat",
4780		.open = mem_control_numa_stat_open,
4781		.mode = S_IRUGO,
4782	},
4783#endif
4784};
4785
4786#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4787static struct cftype memsw_cgroup_files[] = {
4788	{
4789		.name = "memsw.usage_in_bytes",
4790		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4791		.read_u64 = mem_cgroup_read,
4792		.register_event = mem_cgroup_usage_register_event,
4793		.unregister_event = mem_cgroup_usage_unregister_event,
4794	},
4795	{
4796		.name = "memsw.max_usage_in_bytes",
4797		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4798		.trigger = mem_cgroup_reset,
4799		.read_u64 = mem_cgroup_read,
4800	},
4801	{
4802		.name = "memsw.limit_in_bytes",
4803		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4804		.write_string = mem_cgroup_write,
4805		.read_u64 = mem_cgroup_read,
4806	},
4807	{
4808		.name = "memsw.failcnt",
4809		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4810		.trigger = mem_cgroup_reset,
4811		.read_u64 = mem_cgroup_read,
4812	},
4813};
4814
4815static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4816{
4817	if (!do_swap_account)
4818		return 0;
4819	return cgroup_add_files(cont, ss, memsw_cgroup_files,
4820				ARRAY_SIZE(memsw_cgroup_files));
4821};
4822#else
4823static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4824{
4825	return 0;
4826}
4827#endif
4828
4829static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4830{
4831	struct mem_cgroup_per_node *pn;
4832	struct mem_cgroup_per_zone *mz;
4833	enum lru_list l;
4834	int zone, tmp = node;
4835	/*
4836	 * This routine is called against possible nodes.
4837	 * But it's BUG to call kmalloc() against offline node.
4838	 *
4839	 * TODO: this routine can waste much memory for nodes which will
4840	 *       never be onlined. It's better to use memory hotplug callback
4841	 *       function.
4842	 */
4843	if (!node_state(node, N_NORMAL_MEMORY))
4844		tmp = -1;
4845	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4846	if (!pn)
4847		return 1;
4848
4849	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4850		mz = &pn->zoneinfo[zone];
4851		for_each_lru(l)
4852			INIT_LIST_HEAD(&mz->lruvec.lists[l]);
4853		mz->usage_in_excess = 0;
4854		mz->on_tree = false;
4855		mz->mem = memcg;
4856	}
4857	memcg->info.nodeinfo[node] = pn;
4858	return 0;
4859}
4860
4861static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4862{
4863	kfree(memcg->info.nodeinfo[node]);
4864}
4865
4866static struct mem_cgroup *mem_cgroup_alloc(void)
4867{
4868	struct mem_cgroup *mem;
4869	int size = sizeof(struct mem_cgroup);
4870
4871	/* Can be very big if MAX_NUMNODES is very big */
4872	if (size < PAGE_SIZE)
4873		mem = kzalloc(size, GFP_KERNEL);
4874	else
4875		mem = vzalloc(size);
4876
4877	if (!mem)
4878		return NULL;
4879
4880	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4881	if (!mem->stat)
4882		goto out_free;
4883	spin_lock_init(&mem->pcp_counter_lock);
4884	return mem;
4885
4886out_free:
4887	if (size < PAGE_SIZE)
4888		kfree(mem);
4889	else
4890		vfree(mem);
4891	return NULL;
4892}
4893
4894/*
4895 * At destroying mem_cgroup, references from swap_cgroup can remain.
4896 * (scanning all at force_empty is too costly...)
4897 *
4898 * Instead of clearing all references at force_empty, we remember
4899 * the number of reference from swap_cgroup and free mem_cgroup when
4900 * it goes down to 0.
4901 *
4902 * Removal of cgroup itself succeeds regardless of refs from swap.
4903 */
4904
4905static void __mem_cgroup_free(struct mem_cgroup *memcg)
4906{
4907	int node;
4908
4909	mem_cgroup_remove_from_trees(memcg);
4910	free_css_id(&mem_cgroup_subsys, &memcg->css);
4911
4912	for_each_node_state(node, N_POSSIBLE)
4913		free_mem_cgroup_per_zone_info(memcg, node);
4914
4915	free_percpu(memcg->stat);
4916	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4917		kfree(memcg);
4918	else
4919		vfree(memcg);
4920}
4921
4922static void mem_cgroup_get(struct mem_cgroup *memcg)
4923{
4924	atomic_inc(&memcg->refcnt);
4925}
4926
4927static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
4928{
4929	if (atomic_sub_and_test(count, &memcg->refcnt)) {
4930		struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4931		__mem_cgroup_free(memcg);
4932		if (parent)
4933			mem_cgroup_put(parent);
4934	}
4935}
4936
4937static void mem_cgroup_put(struct mem_cgroup *memcg)
4938{
4939	__mem_cgroup_put(memcg, 1);
4940}
4941
4942/*
4943 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4944 */
4945struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4946{
4947	if (!memcg->res.parent)
4948		return NULL;
4949	return mem_cgroup_from_res_counter(memcg->res.parent, res);
4950}
4951EXPORT_SYMBOL(parent_mem_cgroup);
4952
4953#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4954static void __init enable_swap_cgroup(void)
4955{
4956	if (!mem_cgroup_disabled() && really_do_swap_account)
4957		do_swap_account = 1;
4958}
4959#else
4960static void __init enable_swap_cgroup(void)
4961{
4962}
4963#endif
4964
4965static int mem_cgroup_soft_limit_tree_init(void)
4966{
4967	struct mem_cgroup_tree_per_node *rtpn;
4968	struct mem_cgroup_tree_per_zone *rtpz;
4969	int tmp, node, zone;
4970
4971	for_each_node_state(node, N_POSSIBLE) {
4972		tmp = node;
4973		if (!node_state(node, N_NORMAL_MEMORY))
4974			tmp = -1;
4975		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4976		if (!rtpn)
4977			return 1;
4978
4979		soft_limit_tree.rb_tree_per_node[node] = rtpn;
4980
4981		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4982			rtpz = &rtpn->rb_tree_per_zone[zone];
4983			rtpz->rb_root = RB_ROOT;
4984			spin_lock_init(&rtpz->lock);
4985		}
4986	}
4987	return 0;
4988}
4989
4990static struct cgroup_subsys_state * __ref
4991mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4992{
4993	struct mem_cgroup *memcg, *parent;
4994	long error = -ENOMEM;
4995	int node;
4996
4997	memcg = mem_cgroup_alloc();
4998	if (!memcg)
4999		return ERR_PTR(error);
5000
5001	for_each_node_state(node, N_POSSIBLE)
5002		if (alloc_mem_cgroup_per_zone_info(memcg, node))
5003			goto free_out;
5004
5005	/* root ? */
5006	if (cont->parent == NULL) {
5007		int cpu;
5008		enable_swap_cgroup();
5009		parent = NULL;
5010		if (mem_cgroup_soft_limit_tree_init())
5011			goto free_out;
5012		root_mem_cgroup = memcg;
5013		for_each_possible_cpu(cpu) {
5014			struct memcg_stock_pcp *stock =
5015						&per_cpu(memcg_stock, cpu);
5016			INIT_WORK(&stock->work, drain_local_stock);
5017		}
5018		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5019	} else {
5020		parent = mem_cgroup_from_cont(cont->parent);
5021		memcg->use_hierarchy = parent->use_hierarchy;
5022		memcg->oom_kill_disable = parent->oom_kill_disable;
5023	}
5024
5025	if (parent && parent->use_hierarchy) {
5026		res_counter_init(&memcg->res, &parent->res);
5027		res_counter_init(&memcg->memsw, &parent->memsw);
5028		/*
5029		 * We increment refcnt of the parent to ensure that we can
5030		 * safely access it on res_counter_charge/uncharge.
5031		 * This refcnt will be decremented when freeing this
5032		 * mem_cgroup(see mem_cgroup_put).
5033		 */
5034		mem_cgroup_get(parent);
5035	} else {
5036		res_counter_init(&memcg->res, NULL);
5037		res_counter_init(&memcg->memsw, NULL);
5038	}
5039	memcg->last_scanned_node = MAX_NUMNODES;
5040	INIT_LIST_HEAD(&memcg->oom_notify);
5041
5042	if (parent)
5043		memcg->swappiness = mem_cgroup_swappiness(parent);
5044	atomic_set(&memcg->refcnt, 1);
5045	memcg->move_charge_at_immigrate = 0;
5046	mutex_init(&memcg->thresholds_lock);
5047	return &memcg->css;
5048free_out:
5049	__mem_cgroup_free(memcg);
5050	return ERR_PTR(error);
5051}
5052
5053static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
5054					struct cgroup *cont)
5055{
5056	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5057
5058	return mem_cgroup_force_empty(memcg, false);
5059}
5060
5061static void mem_cgroup_destroy(struct cgroup_subsys *ss,
5062				struct cgroup *cont)
5063{
5064	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5065
5066	kmem_cgroup_destroy(ss, cont);
5067
5068	mem_cgroup_put(memcg);
5069}
5070
5071static int mem_cgroup_populate(struct cgroup_subsys *ss,
5072				struct cgroup *cont)
5073{
5074	int ret;
5075
5076	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
5077				ARRAY_SIZE(mem_cgroup_files));
5078
5079	if (!ret)
5080		ret = register_memsw_files(cont, ss);
5081
5082	if (!ret)
5083		ret = register_kmem_files(cont, ss);
5084
5085	return ret;
5086}
5087
5088#ifdef CONFIG_MMU
5089/* Handlers for move charge at task migration. */
5090#define PRECHARGE_COUNT_AT_ONCE	256
5091static int mem_cgroup_do_precharge(unsigned long count)
5092{
5093	int ret = 0;
5094	int batch_count = PRECHARGE_COUNT_AT_ONCE;
5095	struct mem_cgroup *memcg = mc.to;
5096
5097	if (mem_cgroup_is_root(memcg)) {
5098		mc.precharge += count;
5099		/* we don't need css_get for root */
5100		return ret;
5101	}
5102	/* try to charge at once */
5103	if (count > 1) {
5104		struct res_counter *dummy;
5105		/*
5106		 * "memcg" cannot be under rmdir() because we've already checked
5107		 * by cgroup_lock_live_cgroup() that it is not removed and we
5108		 * are still under the same cgroup_mutex. So we can postpone
5109		 * css_get().
5110		 */
5111		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
5112			goto one_by_one;
5113		if (do_swap_account && res_counter_charge(&memcg->memsw,
5114						PAGE_SIZE * count, &dummy)) {
5115			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
5116			goto one_by_one;
5117		}
5118		mc.precharge += count;
5119		return ret;
5120	}
5121one_by_one:
5122	/* fall back to one by one charge */
5123	while (count--) {
5124		if (signal_pending(current)) {
5125			ret = -EINTR;
5126			break;
5127		}
5128		if (!batch_count--) {
5129			batch_count = PRECHARGE_COUNT_AT_ONCE;
5130			cond_resched();
5131		}
5132		ret = __mem_cgroup_try_charge(NULL,
5133					GFP_KERNEL, 1, &memcg, false);
5134		if (ret || !memcg)
5135			/* mem_cgroup_clear_mc() will do uncharge later */
5136			return -ENOMEM;
5137		mc.precharge++;
5138	}
5139	return ret;
5140}
5141
5142/**
5143 * is_target_pte_for_mc - check a pte whether it is valid for move charge
5144 * @vma: the vma the pte to be checked belongs
5145 * @addr: the address corresponding to the pte to be checked
5146 * @ptent: the pte to be checked
5147 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5148 *
5149 * Returns
5150 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5151 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5152 *     move charge. if @target is not NULL, the page is stored in target->page
5153 *     with extra refcnt got(Callers should handle it).
5154 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5155 *     target for charge migration. if @target is not NULL, the entry is stored
5156 *     in target->ent.
5157 *
5158 * Called with pte lock held.
5159 */
5160union mc_target {
5161	struct page	*page;
5162	swp_entry_t	ent;
5163};
5164
5165enum mc_target_type {
5166	MC_TARGET_NONE,	/* not used */
5167	MC_TARGET_PAGE,
5168	MC_TARGET_SWAP,
5169};
5170
5171static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5172						unsigned long addr, pte_t ptent)
5173{
5174	struct page *page = vm_normal_page(vma, addr, ptent);
5175
5176	if (!page || !page_mapped(page))
5177		return NULL;
5178	if (PageAnon(page)) {
5179		/* we don't move shared anon */
5180		if (!move_anon() || page_mapcount(page) > 2)
5181			return NULL;
5182	} else if (!move_file())
5183		/* we ignore mapcount for file pages */
5184		return NULL;
5185	if (!get_page_unless_zero(page))
5186		return NULL;
5187
5188	return page;
5189}
5190
5191static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5192			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5193{
5194	int usage_count;
5195	struct page *page = NULL;
5196	swp_entry_t ent = pte_to_swp_entry(ptent);
5197
5198	if (!move_anon() || non_swap_entry(ent))
5199		return NULL;
5200	usage_count = mem_cgroup_count_swap_user(ent, &page);
5201	if (usage_count > 1) { /* we don't move shared anon */
5202		if (page)
5203			put_page(page);
5204		return NULL;
5205	}
5206	if (do_swap_account)
5207		entry->val = ent.val;
5208
5209	return page;
5210}
5211
5212static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5213			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5214{
5215	struct page *page = NULL;
5216	struct inode *inode;
5217	struct address_space *mapping;
5218	pgoff_t pgoff;
5219
5220	if (!vma->vm_file) /* anonymous vma */
5221		return NULL;
5222	if (!move_file())
5223		return NULL;
5224
5225	inode = vma->vm_file->f_path.dentry->d_inode;
5226	mapping = vma->vm_file->f_mapping;
5227	if (pte_none(ptent))
5228		pgoff = linear_page_index(vma, addr);
5229	else /* pte_file(ptent) is true */
5230		pgoff = pte_to_pgoff(ptent);
5231
5232	/* page is moved even if it's not RSS of this task(page-faulted). */
5233	page = find_get_page(mapping, pgoff);
5234
5235#ifdef CONFIG_SWAP
5236	/* shmem/tmpfs may report page out on swap: account for that too. */
5237	if (radix_tree_exceptional_entry(page)) {
5238		swp_entry_t swap = radix_to_swp_entry(page);
5239		if (do_swap_account)
5240			*entry = swap;
5241		page = find_get_page(&swapper_space, swap.val);
5242	}
5243#endif
5244	return page;
5245}
5246
5247static int is_target_pte_for_mc(struct vm_area_struct *vma,
5248		unsigned long addr, pte_t ptent, union mc_target *target)
5249{
5250	struct page *page = NULL;
5251	struct page_cgroup *pc;
5252	int ret = 0;
5253	swp_entry_t ent = { .val = 0 };
5254
5255	if (pte_present(ptent))
5256		page = mc_handle_present_pte(vma, addr, ptent);
5257	else if (is_swap_pte(ptent))
5258		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5259	else if (pte_none(ptent) || pte_file(ptent))
5260		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5261
5262	if (!page && !ent.val)
5263		return 0;
5264	if (page) {
5265		pc = lookup_page_cgroup(page);
5266		/*
5267		 * Do only loose check w/o page_cgroup lock.
5268		 * mem_cgroup_move_account() checks the pc is valid or not under
5269		 * the lock.
5270		 */
5271		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5272			ret = MC_TARGET_PAGE;
5273			if (target)
5274				target->page = page;
5275		}
5276		if (!ret || !target)
5277			put_page(page);
5278	}
5279	/* There is a swap entry and a page doesn't exist or isn't charged */
5280	if (ent.val && !ret &&
5281			css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
5282		ret = MC_TARGET_SWAP;
5283		if (target)
5284			target->ent = ent;
5285	}
5286	return ret;
5287}
5288
5289static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5290					unsigned long addr, unsigned long end,
5291					struct mm_walk *walk)
5292{
5293	struct vm_area_struct *vma = walk->private;
5294	pte_t *pte;
5295	spinlock_t *ptl;
5296
5297	split_huge_page_pmd(walk->mm, pmd);
5298
5299	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5300	for (; addr != end; pte++, addr += PAGE_SIZE)
5301		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
5302			mc.precharge++;	/* increment precharge temporarily */
5303	pte_unmap_unlock(pte - 1, ptl);
5304	cond_resched();
5305
5306	return 0;
5307}
5308
5309static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5310{
5311	unsigned long precharge;
5312	struct vm_area_struct *vma;
5313
5314	down_read(&mm->mmap_sem);
5315	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5316		struct mm_walk mem_cgroup_count_precharge_walk = {
5317			.pmd_entry = mem_cgroup_count_precharge_pte_range,
5318			.mm = mm,
5319			.private = vma,
5320		};
5321		if (is_vm_hugetlb_page(vma))
5322			continue;
5323		walk_page_range(vma->vm_start, vma->vm_end,
5324					&mem_cgroup_count_precharge_walk);
5325	}
5326	up_read(&mm->mmap_sem);
5327
5328	precharge = mc.precharge;
5329	mc.precharge = 0;
5330
5331	return precharge;
5332}
5333
5334static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5335{
5336	unsigned long precharge = mem_cgroup_count_precharge(mm);
5337
5338	VM_BUG_ON(mc.moving_task);
5339	mc.moving_task = current;
5340	return mem_cgroup_do_precharge(precharge);
5341}
5342
5343/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5344static void __mem_cgroup_clear_mc(void)
5345{
5346	struct mem_cgroup *from = mc.from;
5347	struct mem_cgroup *to = mc.to;
5348
5349	/* we must uncharge all the leftover precharges from mc.to */
5350	if (mc.precharge) {
5351		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
5352		mc.precharge = 0;
5353	}
5354	/*
5355	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5356	 * we must uncharge here.
5357	 */
5358	if (mc.moved_charge) {
5359		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5360		mc.moved_charge = 0;
5361	}
5362	/* we must fixup refcnts and charges */
5363	if (mc.moved_swap) {
5364		/* uncharge swap account from the old cgroup */
5365		if (!mem_cgroup_is_root(mc.from))
5366			res_counter_uncharge(&mc.from->memsw,
5367						PAGE_SIZE * mc.moved_swap);
5368		__mem_cgroup_put(mc.from, mc.moved_swap);
5369
5370		if (!mem_cgroup_is_root(mc.to)) {
5371			/*
5372			 * we charged both to->res and to->memsw, so we should
5373			 * uncharge to->res.
5374			 */
5375			res_counter_uncharge(&mc.to->res,
5376						PAGE_SIZE * mc.moved_swap);
5377		}
5378		/* we've already done mem_cgroup_get(mc.to) */
5379		mc.moved_swap = 0;
5380	}
5381	memcg_oom_recover(from);
5382	memcg_oom_recover(to);
5383	wake_up_all(&mc.waitq);
5384}
5385
5386static void mem_cgroup_clear_mc(void)
5387{
5388	struct mem_cgroup *from = mc.from;
5389
5390	/*
5391	 * we must clear moving_task before waking up waiters at the end of
5392	 * task migration.
5393	 */
5394	mc.moving_task = NULL;
5395	__mem_cgroup_clear_mc();
5396	spin_lock(&mc.lock);
5397	mc.from = NULL;
5398	mc.to = NULL;
5399	spin_unlock(&mc.lock);
5400	mem_cgroup_end_move(from);
5401}
5402
5403static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5404				struct cgroup *cgroup,
5405				struct cgroup_taskset *tset)
5406{
5407	struct task_struct *p = cgroup_taskset_first(tset);
5408	int ret = 0;
5409	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
5410
5411	if (memcg->move_charge_at_immigrate) {
5412		struct mm_struct *mm;
5413		struct mem_cgroup *from = mem_cgroup_from_task(p);
5414
5415		VM_BUG_ON(from == memcg);
5416
5417		mm = get_task_mm(p);
5418		if (!mm)
5419			return 0;
5420		/* We move charges only when we move a owner of the mm */
5421		if (mm->owner == p) {
5422			VM_BUG_ON(mc.from);
5423			VM_BUG_ON(mc.to);
5424			VM_BUG_ON(mc.precharge);
5425			VM_BUG_ON(mc.moved_charge);
5426			VM_BUG_ON(mc.moved_swap);
5427			mem_cgroup_start_move(from);
5428			spin_lock(&mc.lock);
5429			mc.from = from;
5430			mc.to = memcg;
5431			spin_unlock(&mc.lock);
5432			/* We set mc.moving_task later */
5433
5434			ret = mem_cgroup_precharge_mc(mm);
5435			if (ret)
5436				mem_cgroup_clear_mc();
5437		}
5438		mmput(mm);
5439	}
5440	return ret;
5441}
5442
5443static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5444				struct cgroup *cgroup,
5445				struct cgroup_taskset *tset)
5446{
5447	mem_cgroup_clear_mc();
5448}
5449
5450static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5451				unsigned long addr, unsigned long end,
5452				struct mm_walk *walk)
5453{
5454	int ret = 0;
5455	struct vm_area_struct *vma = walk->private;
5456	pte_t *pte;
5457	spinlock_t *ptl;
5458
5459	split_huge_page_pmd(walk->mm, pmd);
5460retry:
5461	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5462	for (; addr != end; addr += PAGE_SIZE) {
5463		pte_t ptent = *(pte++);
5464		union mc_target target;
5465		int type;
5466		struct page *page;
5467		struct page_cgroup *pc;
5468		swp_entry_t ent;
5469
5470		if (!mc.precharge)
5471			break;
5472
5473		type = is_target_pte_for_mc(vma, addr, ptent, &target);
5474		switch (type) {
5475		case MC_TARGET_PAGE:
5476			page = target.page;
5477			if (isolate_lru_page(page))
5478				goto put;
5479			pc = lookup_page_cgroup(page);
5480			if (!mem_cgroup_move_account(page, 1, pc,
5481						     mc.from, mc.to, false)) {
5482				mc.precharge--;
5483				/* we uncharge from mc.from later. */
5484				mc.moved_charge++;
5485			}
5486			putback_lru_page(page);
5487put:			/* is_target_pte_for_mc() gets the page */
5488			put_page(page);
5489			break;
5490		case MC_TARGET_SWAP:
5491			ent = target.ent;
5492			if (!mem_cgroup_move_swap_account(ent,
5493						mc.from, mc.to, false)) {
5494				mc.precharge--;
5495				/* we fixup refcnts and charges later. */
5496				mc.moved_swap++;
5497			}
5498			break;
5499		default:
5500			break;
5501		}
5502	}
5503	pte_unmap_unlock(pte - 1, ptl);
5504	cond_resched();
5505
5506	if (addr != end) {
5507		/*
5508		 * We have consumed all precharges we got in can_attach().
5509		 * We try charge one by one, but don't do any additional
5510		 * charges to mc.to if we have failed in charge once in attach()
5511		 * phase.
5512		 */
5513		ret = mem_cgroup_do_precharge(1);
5514		if (!ret)
5515			goto retry;
5516	}
5517
5518	return ret;
5519}
5520
5521static void mem_cgroup_move_charge(struct mm_struct *mm)
5522{
5523	struct vm_area_struct *vma;
5524
5525	lru_add_drain_all();
5526retry:
5527	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5528		/*
5529		 * Someone who are holding the mmap_sem might be waiting in
5530		 * waitq. So we cancel all extra charges, wake up all waiters,
5531		 * and retry. Because we cancel precharges, we might not be able
5532		 * to move enough charges, but moving charge is a best-effort
5533		 * feature anyway, so it wouldn't be a big problem.
5534		 */
5535		__mem_cgroup_clear_mc();
5536		cond_resched();
5537		goto retry;
5538	}
5539	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5540		int ret;
5541		struct mm_walk mem_cgroup_move_charge_walk = {
5542			.pmd_entry = mem_cgroup_move_charge_pte_range,
5543			.mm = mm,
5544			.private = vma,
5545		};
5546		if (is_vm_hugetlb_page(vma))
5547			continue;
5548		ret = walk_page_range(vma->vm_start, vma->vm_end,
5549						&mem_cgroup_move_charge_walk);
5550		if (ret)
5551			/*
5552			 * means we have consumed all precharges and failed in
5553			 * doing additional charge. Just abandon here.
5554			 */
5555			break;
5556	}
5557	up_read(&mm->mmap_sem);
5558}
5559
5560static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5561				struct cgroup *cont,
5562				struct cgroup_taskset *tset)
5563{
5564	struct task_struct *p = cgroup_taskset_first(tset);
5565	struct mm_struct *mm = get_task_mm(p);
5566
5567	if (mm) {
5568		if (mc.to)
5569			mem_cgroup_move_charge(mm);
5570		put_swap_token(mm);
5571		mmput(mm);
5572	}
5573	if (mc.to)
5574		mem_cgroup_clear_mc();
5575}
5576#else	/* !CONFIG_MMU */
5577static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5578				struct cgroup *cgroup,
5579				struct cgroup_taskset *tset)
5580{
5581	return 0;
5582}
5583static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5584				struct cgroup *cgroup,
5585				struct cgroup_taskset *tset)
5586{
5587}
5588static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5589				struct cgroup *cont,
5590				struct cgroup_taskset *tset)
5591{
5592}
5593#endif
5594
5595struct cgroup_subsys mem_cgroup_subsys = {
5596	.name = "memory",
5597	.subsys_id = mem_cgroup_subsys_id,
5598	.create = mem_cgroup_create,
5599	.pre_destroy = mem_cgroup_pre_destroy,
5600	.destroy = mem_cgroup_destroy,
5601	.populate = mem_cgroup_populate,
5602	.can_attach = mem_cgroup_can_attach,
5603	.cancel_attach = mem_cgroup_cancel_attach,
5604	.attach = mem_cgroup_move_task,
5605	.early_init = 0,
5606	.use_id = 1,
5607};
5608
5609#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5610static int __init enable_swap_account(char *s)
5611{
5612	/* consider enabled if no parameter or 1 is given */
5613	if (!strcmp(s, "1"))
5614		really_do_swap_account = 1;
5615	else if (!strcmp(s, "0"))
5616		really_do_swap_account = 0;
5617	return 1;
5618}
5619__setup("swapaccount=", enable_swap_account);
5620
5621#endif
5622