memcontrol.c revision 7ffd4ca7a2cdd7a18f0b499a4e9e0e7cf36ba018
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
27#include <linux/mm.h>
28#include <linux/hugetlb.h>
29#include <linux/pagemap.h>
30#include <linux/smp.h>
31#include <linux/page-flags.h>
32#include <linux/backing-dev.h>
33#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
35#include <linux/limits.h>
36#include <linux/mutex.h>
37#include <linux/rbtree.h>
38#include <linux/slab.h>
39#include <linux/swap.h>
40#include <linux/swapops.h>
41#include <linux/spinlock.h>
42#include <linux/eventfd.h>
43#include <linux/sort.h>
44#include <linux/fs.h>
45#include <linux/seq_file.h>
46#include <linux/vmalloc.h>
47#include <linux/mm_inline.h>
48#include <linux/page_cgroup.h>
49#include <linux/cpu.h>
50#include <linux/oom.h>
51#include "internal.h"
52
53#include <asm/uaccess.h>
54
55#include <trace/events/vmscan.h>
56
57struct cgroup_subsys mem_cgroup_subsys __read_mostly;
58#define MEM_CGROUP_RECLAIM_RETRIES	5
59struct mem_cgroup *root_mem_cgroup __read_mostly;
60
61#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63int do_swap_account __read_mostly;
64
65/* for remember boot option*/
66#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
67static int really_do_swap_account __initdata = 1;
68#else
69static int really_do_swap_account __initdata = 0;
70#endif
71
72#else
73#define do_swap_account		(0)
74#endif
75
76/*
77 * Per memcg event counter is incremented at every pagein/pageout. This counter
78 * is used for trigger some periodic events. This is straightforward and better
79 * than using jiffies etc. to handle periodic memcg event.
80 *
81 * These values will be used as !((event) & ((1 <<(thresh)) - 1))
82 */
83#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
84#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
85
86/*
87 * Statistics for memory cgroup.
88 */
89enum mem_cgroup_stat_index {
90	/*
91	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
92	 */
93	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
94	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
95	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
96	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
97	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
98	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
99	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
100	/* incremented at every  pagein/pageout */
101	MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
102	MEM_CGROUP_ON_MOVE,	/* someone is moving account between groups */
103
104	MEM_CGROUP_STAT_NSTATS,
105};
106
107struct mem_cgroup_stat_cpu {
108	s64 count[MEM_CGROUP_STAT_NSTATS];
109};
110
111/*
112 * per-zone information in memory controller.
113 */
114struct mem_cgroup_per_zone {
115	/*
116	 * spin_lock to protect the per cgroup LRU
117	 */
118	struct list_head	lists[NR_LRU_LISTS];
119	unsigned long		count[NR_LRU_LISTS];
120
121	struct zone_reclaim_stat reclaim_stat;
122	struct rb_node		tree_node;	/* RB tree node */
123	unsigned long long	usage_in_excess;/* Set to the value by which */
124						/* the soft limit is exceeded*/
125	bool			on_tree;
126	struct mem_cgroup	*mem;		/* Back pointer, we cannot */
127						/* use container_of	   */
128};
129/* Macro for accessing counter */
130#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
131
132struct mem_cgroup_per_node {
133	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
134};
135
136struct mem_cgroup_lru_info {
137	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
138};
139
140/*
141 * Cgroups above their limits are maintained in a RB-Tree, independent of
142 * their hierarchy representation
143 */
144
145struct mem_cgroup_tree_per_zone {
146	struct rb_root rb_root;
147	spinlock_t lock;
148};
149
150struct mem_cgroup_tree_per_node {
151	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
152};
153
154struct mem_cgroup_tree {
155	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
156};
157
158static struct mem_cgroup_tree soft_limit_tree __read_mostly;
159
160struct mem_cgroup_threshold {
161	struct eventfd_ctx *eventfd;
162	u64 threshold;
163};
164
165/* For threshold */
166struct mem_cgroup_threshold_ary {
167	/* An array index points to threshold just below usage. */
168	int current_threshold;
169	/* Size of entries[] */
170	unsigned int size;
171	/* Array of thresholds */
172	struct mem_cgroup_threshold entries[0];
173};
174
175struct mem_cgroup_thresholds {
176	/* Primary thresholds array */
177	struct mem_cgroup_threshold_ary *primary;
178	/*
179	 * Spare threshold array.
180	 * This is needed to make mem_cgroup_unregister_event() "never fail".
181	 * It must be able to store at least primary->size - 1 entries.
182	 */
183	struct mem_cgroup_threshold_ary *spare;
184};
185
186/* for OOM */
187struct mem_cgroup_eventfd_list {
188	struct list_head list;
189	struct eventfd_ctx *eventfd;
190};
191
192static void mem_cgroup_threshold(struct mem_cgroup *mem);
193static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
194
195/*
196 * The memory controller data structure. The memory controller controls both
197 * page cache and RSS per cgroup. We would eventually like to provide
198 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
199 * to help the administrator determine what knobs to tune.
200 *
201 * TODO: Add a water mark for the memory controller. Reclaim will begin when
202 * we hit the water mark. May be even add a low water mark, such that
203 * no reclaim occurs from a cgroup at it's low water mark, this is
204 * a feature that will be implemented much later in the future.
205 */
206struct mem_cgroup {
207	struct cgroup_subsys_state css;
208	/*
209	 * the counter to account for memory usage
210	 */
211	struct res_counter res;
212	/*
213	 * the counter to account for mem+swap usage.
214	 */
215	struct res_counter memsw;
216	/*
217	 * Per cgroup active and inactive list, similar to the
218	 * per zone LRU lists.
219	 */
220	struct mem_cgroup_lru_info info;
221	/*
222	 * While reclaiming in a hierarchy, we cache the last child we
223	 * reclaimed from.
224	 */
225	int last_scanned_child;
226	/*
227	 * Should the accounting and control be hierarchical, per subtree?
228	 */
229	bool use_hierarchy;
230	atomic_t	oom_lock;
231	atomic_t	refcnt;
232
233	unsigned int	swappiness;
234	/* OOM-Killer disable */
235	int		oom_kill_disable;
236
237	/* set when res.limit == memsw.limit */
238	bool		memsw_is_minimum;
239
240	/* protect arrays of thresholds */
241	struct mutex thresholds_lock;
242
243	/* thresholds for memory usage. RCU-protected */
244	struct mem_cgroup_thresholds thresholds;
245
246	/* thresholds for mem+swap usage. RCU-protected */
247	struct mem_cgroup_thresholds memsw_thresholds;
248
249	/* For oom notifier event fd */
250	struct list_head oom_notify;
251
252	/*
253	 * Should we move charges of a task when a task is moved into this
254	 * mem_cgroup ? And what type of charges should we move ?
255	 */
256	unsigned long 	move_charge_at_immigrate;
257	/*
258	 * percpu counter.
259	 */
260	struct mem_cgroup_stat_cpu *stat;
261	/*
262	 * used when a cpu is offlined or other synchronizations
263	 * See mem_cgroup_read_stat().
264	 */
265	struct mem_cgroup_stat_cpu nocpu_base;
266	spinlock_t pcp_counter_lock;
267};
268
269/* Stuffs for move charges at task migration. */
270/*
271 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
272 * left-shifted bitmap of these types.
273 */
274enum move_type {
275	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
276	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
277	NR_MOVE_TYPE,
278};
279
280/* "mc" and its members are protected by cgroup_mutex */
281static struct move_charge_struct {
282	spinlock_t	  lock; /* for from, to */
283	struct mem_cgroup *from;
284	struct mem_cgroup *to;
285	unsigned long precharge;
286	unsigned long moved_charge;
287	unsigned long moved_swap;
288	struct task_struct *moving_task;	/* a task moving charges */
289	wait_queue_head_t waitq;		/* a waitq for other context */
290} mc = {
291	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
292	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
293};
294
295static bool move_anon(void)
296{
297	return test_bit(MOVE_CHARGE_TYPE_ANON,
298					&mc.to->move_charge_at_immigrate);
299}
300
301static bool move_file(void)
302{
303	return test_bit(MOVE_CHARGE_TYPE_FILE,
304					&mc.to->move_charge_at_immigrate);
305}
306
307/*
308 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
309 * limit reclaim to prevent infinite loops, if they ever occur.
310 */
311#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
312#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)
313
314enum charge_type {
315	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
316	MEM_CGROUP_CHARGE_TYPE_MAPPED,
317	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
318	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
319	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
320	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
321	NR_CHARGE_TYPE,
322};
323
324/* for encoding cft->private value on file */
325#define _MEM			(0)
326#define _MEMSWAP		(1)
327#define _OOM_TYPE		(2)
328#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
329#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
330#define MEMFILE_ATTR(val)	((val) & 0xffff)
331/* Used for OOM nofiier */
332#define OOM_CONTROL		(0)
333
334/*
335 * Reclaim flags for mem_cgroup_hierarchical_reclaim
336 */
337#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
338#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
339#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
340#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
341#define MEM_CGROUP_RECLAIM_SOFT_BIT	0x2
342#define MEM_CGROUP_RECLAIM_SOFT		(1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
343
344static void mem_cgroup_get(struct mem_cgroup *mem);
345static void mem_cgroup_put(struct mem_cgroup *mem);
346static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
347static void drain_all_stock_async(void);
348
349static struct mem_cgroup_per_zone *
350mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
351{
352	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
353}
354
355struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
356{
357	return &mem->css;
358}
359
360static struct mem_cgroup_per_zone *
361page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
362{
363	int nid = page_to_nid(page);
364	int zid = page_zonenum(page);
365
366	return mem_cgroup_zoneinfo(mem, nid, zid);
367}
368
369static struct mem_cgroup_tree_per_zone *
370soft_limit_tree_node_zone(int nid, int zid)
371{
372	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
373}
374
375static struct mem_cgroup_tree_per_zone *
376soft_limit_tree_from_page(struct page *page)
377{
378	int nid = page_to_nid(page);
379	int zid = page_zonenum(page);
380
381	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
382}
383
384static void
385__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
386				struct mem_cgroup_per_zone *mz,
387				struct mem_cgroup_tree_per_zone *mctz,
388				unsigned long long new_usage_in_excess)
389{
390	struct rb_node **p = &mctz->rb_root.rb_node;
391	struct rb_node *parent = NULL;
392	struct mem_cgroup_per_zone *mz_node;
393
394	if (mz->on_tree)
395		return;
396
397	mz->usage_in_excess = new_usage_in_excess;
398	if (!mz->usage_in_excess)
399		return;
400	while (*p) {
401		parent = *p;
402		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
403					tree_node);
404		if (mz->usage_in_excess < mz_node->usage_in_excess)
405			p = &(*p)->rb_left;
406		/*
407		 * We can't avoid mem cgroups that are over their soft
408		 * limit by the same amount
409		 */
410		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
411			p = &(*p)->rb_right;
412	}
413	rb_link_node(&mz->tree_node, parent, p);
414	rb_insert_color(&mz->tree_node, &mctz->rb_root);
415	mz->on_tree = true;
416}
417
418static void
419__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
420				struct mem_cgroup_per_zone *mz,
421				struct mem_cgroup_tree_per_zone *mctz)
422{
423	if (!mz->on_tree)
424		return;
425	rb_erase(&mz->tree_node, &mctz->rb_root);
426	mz->on_tree = false;
427}
428
429static void
430mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
431				struct mem_cgroup_per_zone *mz,
432				struct mem_cgroup_tree_per_zone *mctz)
433{
434	spin_lock(&mctz->lock);
435	__mem_cgroup_remove_exceeded(mem, mz, mctz);
436	spin_unlock(&mctz->lock);
437}
438
439
440static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
441{
442	unsigned long long excess;
443	struct mem_cgroup_per_zone *mz;
444	struct mem_cgroup_tree_per_zone *mctz;
445	int nid = page_to_nid(page);
446	int zid = page_zonenum(page);
447	mctz = soft_limit_tree_from_page(page);
448
449	/*
450	 * Necessary to update all ancestors when hierarchy is used.
451	 * because their event counter is not touched.
452	 */
453	for (; mem; mem = parent_mem_cgroup(mem)) {
454		mz = mem_cgroup_zoneinfo(mem, nid, zid);
455		excess = res_counter_soft_limit_excess(&mem->res);
456		/*
457		 * We have to update the tree if mz is on RB-tree or
458		 * mem is over its softlimit.
459		 */
460		if (excess || mz->on_tree) {
461			spin_lock(&mctz->lock);
462			/* if on-tree, remove it */
463			if (mz->on_tree)
464				__mem_cgroup_remove_exceeded(mem, mz, mctz);
465			/*
466			 * Insert again. mz->usage_in_excess will be updated.
467			 * If excess is 0, no tree ops.
468			 */
469			__mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
470			spin_unlock(&mctz->lock);
471		}
472	}
473}
474
475static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
476{
477	int node, zone;
478	struct mem_cgroup_per_zone *mz;
479	struct mem_cgroup_tree_per_zone *mctz;
480
481	for_each_node_state(node, N_POSSIBLE) {
482		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
483			mz = mem_cgroup_zoneinfo(mem, node, zone);
484			mctz = soft_limit_tree_node_zone(node, zone);
485			mem_cgroup_remove_exceeded(mem, mz, mctz);
486		}
487	}
488}
489
490static struct mem_cgroup_per_zone *
491__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
492{
493	struct rb_node *rightmost = NULL;
494	struct mem_cgroup_per_zone *mz;
495
496retry:
497	mz = NULL;
498	rightmost = rb_last(&mctz->rb_root);
499	if (!rightmost)
500		goto done;		/* Nothing to reclaim from */
501
502	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
503	/*
504	 * Remove the node now but someone else can add it back,
505	 * we will to add it back at the end of reclaim to its correct
506	 * position in the tree.
507	 */
508	__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
509	if (!res_counter_soft_limit_excess(&mz->mem->res) ||
510		!css_tryget(&mz->mem->css))
511		goto retry;
512done:
513	return mz;
514}
515
516static struct mem_cgroup_per_zone *
517mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
518{
519	struct mem_cgroup_per_zone *mz;
520
521	spin_lock(&mctz->lock);
522	mz = __mem_cgroup_largest_soft_limit_node(mctz);
523	spin_unlock(&mctz->lock);
524	return mz;
525}
526
527/*
528 * Implementation Note: reading percpu statistics for memcg.
529 *
530 * Both of vmstat[] and percpu_counter has threshold and do periodic
531 * synchronization to implement "quick" read. There are trade-off between
532 * reading cost and precision of value. Then, we may have a chance to implement
533 * a periodic synchronizion of counter in memcg's counter.
534 *
535 * But this _read() function is used for user interface now. The user accounts
536 * memory usage by memory cgroup and he _always_ requires exact value because
537 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
538 * have to visit all online cpus and make sum. So, for now, unnecessary
539 * synchronization is not implemented. (just implemented for cpu hotplug)
540 *
541 * If there are kernel internal actions which can make use of some not-exact
542 * value, and reading all cpu value can be performance bottleneck in some
543 * common workload, threashold and synchonization as vmstat[] should be
544 * implemented.
545 */
546static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
547		enum mem_cgroup_stat_index idx)
548{
549	int cpu;
550	s64 val = 0;
551
552	get_online_cpus();
553	for_each_online_cpu(cpu)
554		val += per_cpu(mem->stat->count[idx], cpu);
555#ifdef CONFIG_HOTPLUG_CPU
556	spin_lock(&mem->pcp_counter_lock);
557	val += mem->nocpu_base.count[idx];
558	spin_unlock(&mem->pcp_counter_lock);
559#endif
560	put_online_cpus();
561	return val;
562}
563
564static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
565{
566	s64 ret;
567
568	ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
569	ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
570	return ret;
571}
572
573static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
574					 bool charge)
575{
576	int val = (charge) ? 1 : -1;
577	this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
578}
579
580static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
581					 bool file, int nr_pages)
582{
583	preempt_disable();
584
585	if (file)
586		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
587	else
588		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
589
590	/* pagein of a big page is an event. So, ignore page size */
591	if (nr_pages > 0)
592		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
593	else {
594		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
595		nr_pages = -nr_pages; /* for event */
596	}
597
598	__this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
599
600	preempt_enable();
601}
602
603static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
604					enum lru_list idx)
605{
606	int nid, zid;
607	struct mem_cgroup_per_zone *mz;
608	u64 total = 0;
609
610	for_each_online_node(nid)
611		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
612			mz = mem_cgroup_zoneinfo(mem, nid, zid);
613			total += MEM_CGROUP_ZSTAT(mz, idx);
614		}
615	return total;
616}
617
618static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
619{
620	s64 val;
621
622	val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
623
624	return !(val & ((1 << event_mask_shift) - 1));
625}
626
627/*
628 * Check events in order.
629 *
630 */
631static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
632{
633	/* threshold event is triggered in finer grain than soft limit */
634	if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
635		mem_cgroup_threshold(mem);
636		if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
637			mem_cgroup_update_tree(mem, page);
638	}
639}
640
641static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
642{
643	return container_of(cgroup_subsys_state(cont,
644				mem_cgroup_subsys_id), struct mem_cgroup,
645				css);
646}
647
648struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
649{
650	/*
651	 * mm_update_next_owner() may clear mm->owner to NULL
652	 * if it races with swapoff, page migration, etc.
653	 * So this can be called with p == NULL.
654	 */
655	if (unlikely(!p))
656		return NULL;
657
658	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
659				struct mem_cgroup, css);
660}
661
662static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
663{
664	struct mem_cgroup *mem = NULL;
665
666	if (!mm)
667		return NULL;
668	/*
669	 * Because we have no locks, mm->owner's may be being moved to other
670	 * cgroup. We use css_tryget() here even if this looks
671	 * pessimistic (rather than adding locks here).
672	 */
673	rcu_read_lock();
674	do {
675		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
676		if (unlikely(!mem))
677			break;
678	} while (!css_tryget(&mem->css));
679	rcu_read_unlock();
680	return mem;
681}
682
683/* The caller has to guarantee "mem" exists before calling this */
684static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
685{
686	struct cgroup_subsys_state *css;
687	int found;
688
689	if (!mem) /* ROOT cgroup has the smallest ID */
690		return root_mem_cgroup; /*css_put/get against root is ignored*/
691	if (!mem->use_hierarchy) {
692		if (css_tryget(&mem->css))
693			return mem;
694		return NULL;
695	}
696	rcu_read_lock();
697	/*
698	 * searching a memory cgroup which has the smallest ID under given
699	 * ROOT cgroup. (ID >= 1)
700	 */
701	css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
702	if (css && css_tryget(css))
703		mem = container_of(css, struct mem_cgroup, css);
704	else
705		mem = NULL;
706	rcu_read_unlock();
707	return mem;
708}
709
710static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
711					struct mem_cgroup *root,
712					bool cond)
713{
714	int nextid = css_id(&iter->css) + 1;
715	int found;
716	int hierarchy_used;
717	struct cgroup_subsys_state *css;
718
719	hierarchy_used = iter->use_hierarchy;
720
721	css_put(&iter->css);
722	/* If no ROOT, walk all, ignore hierarchy */
723	if (!cond || (root && !hierarchy_used))
724		return NULL;
725
726	if (!root)
727		root = root_mem_cgroup;
728
729	do {
730		iter = NULL;
731		rcu_read_lock();
732
733		css = css_get_next(&mem_cgroup_subsys, nextid,
734				&root->css, &found);
735		if (css && css_tryget(css))
736			iter = container_of(css, struct mem_cgroup, css);
737		rcu_read_unlock();
738		/* If css is NULL, no more cgroups will be found */
739		nextid = found + 1;
740	} while (css && !iter);
741
742	return iter;
743}
744/*
745 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
746 * be careful that "break" loop is not allowed. We have reference count.
747 * Instead of that modify "cond" to be false and "continue" to exit the loop.
748 */
749#define for_each_mem_cgroup_tree_cond(iter, root, cond)	\
750	for (iter = mem_cgroup_start_loop(root);\
751	     iter != NULL;\
752	     iter = mem_cgroup_get_next(iter, root, cond))
753
754#define for_each_mem_cgroup_tree(iter, root) \
755	for_each_mem_cgroup_tree_cond(iter, root, true)
756
757#define for_each_mem_cgroup_all(iter) \
758	for_each_mem_cgroup_tree_cond(iter, NULL, true)
759
760
761static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
762{
763	return (mem == root_mem_cgroup);
764}
765
766/*
767 * Following LRU functions are allowed to be used without PCG_LOCK.
768 * Operations are called by routine of global LRU independently from memcg.
769 * What we have to take care of here is validness of pc->mem_cgroup.
770 *
771 * Changes to pc->mem_cgroup happens when
772 * 1. charge
773 * 2. moving account
774 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
775 * It is added to LRU before charge.
776 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
777 * When moving account, the page is not on LRU. It's isolated.
778 */
779
780void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
781{
782	struct page_cgroup *pc;
783	struct mem_cgroup_per_zone *mz;
784
785	if (mem_cgroup_disabled())
786		return;
787	pc = lookup_page_cgroup(page);
788	/* can happen while we handle swapcache. */
789	if (!TestClearPageCgroupAcctLRU(pc))
790		return;
791	VM_BUG_ON(!pc->mem_cgroup);
792	/*
793	 * We don't check PCG_USED bit. It's cleared when the "page" is finally
794	 * removed from global LRU.
795	 */
796	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
797	/* huge page split is done under lru_lock. so, we have no races. */
798	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
799	if (mem_cgroup_is_root(pc->mem_cgroup))
800		return;
801	VM_BUG_ON(list_empty(&pc->lru));
802	list_del_init(&pc->lru);
803}
804
805void mem_cgroup_del_lru(struct page *page)
806{
807	mem_cgroup_del_lru_list(page, page_lru(page));
808}
809
810/*
811 * Writeback is about to end against a page which has been marked for immediate
812 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
813 * inactive list.
814 */
815void mem_cgroup_rotate_reclaimable_page(struct page *page)
816{
817	struct mem_cgroup_per_zone *mz;
818	struct page_cgroup *pc;
819	enum lru_list lru = page_lru(page);
820
821	if (mem_cgroup_disabled())
822		return;
823
824	pc = lookup_page_cgroup(page);
825	/* unused or root page is not rotated. */
826	if (!PageCgroupUsed(pc))
827		return;
828	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
829	smp_rmb();
830	if (mem_cgroup_is_root(pc->mem_cgroup))
831		return;
832	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
833	list_move_tail(&pc->lru, &mz->lists[lru]);
834}
835
836void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
837{
838	struct mem_cgroup_per_zone *mz;
839	struct page_cgroup *pc;
840
841	if (mem_cgroup_disabled())
842		return;
843
844	pc = lookup_page_cgroup(page);
845	/* unused or root page is not rotated. */
846	if (!PageCgroupUsed(pc))
847		return;
848	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
849	smp_rmb();
850	if (mem_cgroup_is_root(pc->mem_cgroup))
851		return;
852	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
853	list_move(&pc->lru, &mz->lists[lru]);
854}
855
856void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
857{
858	struct page_cgroup *pc;
859	struct mem_cgroup_per_zone *mz;
860
861	if (mem_cgroup_disabled())
862		return;
863	pc = lookup_page_cgroup(page);
864	VM_BUG_ON(PageCgroupAcctLRU(pc));
865	if (!PageCgroupUsed(pc))
866		return;
867	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
868	smp_rmb();
869	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
870	/* huge page split is done under lru_lock. so, we have no races. */
871	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
872	SetPageCgroupAcctLRU(pc);
873	if (mem_cgroup_is_root(pc->mem_cgroup))
874		return;
875	list_add(&pc->lru, &mz->lists[lru]);
876}
877
878/*
879 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
880 * lru because the page may.be reused after it's fully uncharged (because of
881 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
882 * it again. This function is only used to charge SwapCache. It's done under
883 * lock_page and expected that zone->lru_lock is never held.
884 */
885static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
886{
887	unsigned long flags;
888	struct zone *zone = page_zone(page);
889	struct page_cgroup *pc = lookup_page_cgroup(page);
890
891	spin_lock_irqsave(&zone->lru_lock, flags);
892	/*
893	 * Forget old LRU when this page_cgroup is *not* used. This Used bit
894	 * is guarded by lock_page() because the page is SwapCache.
895	 */
896	if (!PageCgroupUsed(pc))
897		mem_cgroup_del_lru_list(page, page_lru(page));
898	spin_unlock_irqrestore(&zone->lru_lock, flags);
899}
900
901static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
902{
903	unsigned long flags;
904	struct zone *zone = page_zone(page);
905	struct page_cgroup *pc = lookup_page_cgroup(page);
906
907	spin_lock_irqsave(&zone->lru_lock, flags);
908	/* link when the page is linked to LRU but page_cgroup isn't */
909	if (PageLRU(page) && !PageCgroupAcctLRU(pc))
910		mem_cgroup_add_lru_list(page, page_lru(page));
911	spin_unlock_irqrestore(&zone->lru_lock, flags);
912}
913
914
915void mem_cgroup_move_lists(struct page *page,
916			   enum lru_list from, enum lru_list to)
917{
918	if (mem_cgroup_disabled())
919		return;
920	mem_cgroup_del_lru_list(page, from);
921	mem_cgroup_add_lru_list(page, to);
922}
923
924int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
925{
926	int ret;
927	struct mem_cgroup *curr = NULL;
928	struct task_struct *p;
929
930	p = find_lock_task_mm(task);
931	if (!p)
932		return 0;
933	curr = try_get_mem_cgroup_from_mm(p->mm);
934	task_unlock(p);
935	if (!curr)
936		return 0;
937	/*
938	 * We should check use_hierarchy of "mem" not "curr". Because checking
939	 * use_hierarchy of "curr" here make this function true if hierarchy is
940	 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
941	 * hierarchy(even if use_hierarchy is disabled in "mem").
942	 */
943	if (mem->use_hierarchy)
944		ret = css_is_ancestor(&curr->css, &mem->css);
945	else
946		ret = (curr == mem);
947	css_put(&curr->css);
948	return ret;
949}
950
951static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
952{
953	unsigned long active;
954	unsigned long inactive;
955	unsigned long gb;
956	unsigned long inactive_ratio;
957
958	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
959	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
960
961	gb = (inactive + active) >> (30 - PAGE_SHIFT);
962	if (gb)
963		inactive_ratio = int_sqrt(10 * gb);
964	else
965		inactive_ratio = 1;
966
967	if (present_pages) {
968		present_pages[0] = inactive;
969		present_pages[1] = active;
970	}
971
972	return inactive_ratio;
973}
974
975int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
976{
977	unsigned long active;
978	unsigned long inactive;
979	unsigned long present_pages[2];
980	unsigned long inactive_ratio;
981
982	inactive_ratio = calc_inactive_ratio(memcg, present_pages);
983
984	inactive = present_pages[0];
985	active = present_pages[1];
986
987	if (inactive * inactive_ratio < active)
988		return 1;
989
990	return 0;
991}
992
993int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
994{
995	unsigned long active;
996	unsigned long inactive;
997
998	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
999	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
1000
1001	return (active > inactive);
1002}
1003
1004unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
1005				       struct zone *zone,
1006				       enum lru_list lru)
1007{
1008	int nid = zone_to_nid(zone);
1009	int zid = zone_idx(zone);
1010	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1011
1012	return MEM_CGROUP_ZSTAT(mz, lru);
1013}
1014
1015struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1016						      struct zone *zone)
1017{
1018	int nid = zone_to_nid(zone);
1019	int zid = zone_idx(zone);
1020	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1021
1022	return &mz->reclaim_stat;
1023}
1024
1025struct zone_reclaim_stat *
1026mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1027{
1028	struct page_cgroup *pc;
1029	struct mem_cgroup_per_zone *mz;
1030
1031	if (mem_cgroup_disabled())
1032		return NULL;
1033
1034	pc = lookup_page_cgroup(page);
1035	if (!PageCgroupUsed(pc))
1036		return NULL;
1037	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1038	smp_rmb();
1039	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1040	return &mz->reclaim_stat;
1041}
1042
1043unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1044					struct list_head *dst,
1045					unsigned long *scanned, int order,
1046					int mode, struct zone *z,
1047					struct mem_cgroup *mem_cont,
1048					int active, int file)
1049{
1050	unsigned long nr_taken = 0;
1051	struct page *page;
1052	unsigned long scan;
1053	LIST_HEAD(pc_list);
1054	struct list_head *src;
1055	struct page_cgroup *pc, *tmp;
1056	int nid = zone_to_nid(z);
1057	int zid = zone_idx(z);
1058	struct mem_cgroup_per_zone *mz;
1059	int lru = LRU_FILE * file + active;
1060	int ret;
1061
1062	BUG_ON(!mem_cont);
1063	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1064	src = &mz->lists[lru];
1065
1066	scan = 0;
1067	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1068		if (scan >= nr_to_scan)
1069			break;
1070
1071		if (unlikely(!PageCgroupUsed(pc)))
1072			continue;
1073
1074		page = lookup_cgroup_page(pc);
1075
1076		if (unlikely(!PageLRU(page)))
1077			continue;
1078
1079		scan++;
1080		ret = __isolate_lru_page(page, mode, file);
1081		switch (ret) {
1082		case 0:
1083			list_move(&page->lru, dst);
1084			mem_cgroup_del_lru(page);
1085			nr_taken += hpage_nr_pages(page);
1086			break;
1087		case -EBUSY:
1088			/* we don't affect global LRU but rotate in our LRU */
1089			mem_cgroup_rotate_lru_list(page, page_lru(page));
1090			break;
1091		default:
1092			break;
1093		}
1094	}
1095
1096	*scanned = scan;
1097
1098	trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1099				      0, 0, 0, mode);
1100
1101	return nr_taken;
1102}
1103
1104#define mem_cgroup_from_res_counter(counter, member)	\
1105	container_of(counter, struct mem_cgroup, member)
1106
1107/**
1108 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1109 * @mem: the memory cgroup
1110 *
1111 * Returns the maximum amount of memory @mem can be charged with, in
1112 * bytes.
1113 */
1114static unsigned long long mem_cgroup_margin(struct mem_cgroup *mem)
1115{
1116	unsigned long long margin;
1117
1118	margin = res_counter_margin(&mem->res);
1119	if (do_swap_account)
1120		margin = min(margin, res_counter_margin(&mem->memsw));
1121	return margin;
1122}
1123
1124static unsigned int get_swappiness(struct mem_cgroup *memcg)
1125{
1126	struct cgroup *cgrp = memcg->css.cgroup;
1127
1128	/* root ? */
1129	if (cgrp->parent == NULL)
1130		return vm_swappiness;
1131
1132	return memcg->swappiness;
1133}
1134
1135static void mem_cgroup_start_move(struct mem_cgroup *mem)
1136{
1137	int cpu;
1138
1139	get_online_cpus();
1140	spin_lock(&mem->pcp_counter_lock);
1141	for_each_online_cpu(cpu)
1142		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1143	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1144	spin_unlock(&mem->pcp_counter_lock);
1145	put_online_cpus();
1146
1147	synchronize_rcu();
1148}
1149
1150static void mem_cgroup_end_move(struct mem_cgroup *mem)
1151{
1152	int cpu;
1153
1154	if (!mem)
1155		return;
1156	get_online_cpus();
1157	spin_lock(&mem->pcp_counter_lock);
1158	for_each_online_cpu(cpu)
1159		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1160	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1161	spin_unlock(&mem->pcp_counter_lock);
1162	put_online_cpus();
1163}
1164/*
1165 * 2 routines for checking "mem" is under move_account() or not.
1166 *
1167 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1168 *			  for avoiding race in accounting. If true,
1169 *			  pc->mem_cgroup may be overwritten.
1170 *
1171 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1172 *			  under hierarchy of moving cgroups. This is for
1173 *			  waiting at hith-memory prressure caused by "move".
1174 */
1175
1176static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1177{
1178	VM_BUG_ON(!rcu_read_lock_held());
1179	return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1180}
1181
1182static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1183{
1184	struct mem_cgroup *from;
1185	struct mem_cgroup *to;
1186	bool ret = false;
1187	/*
1188	 * Unlike task_move routines, we access mc.to, mc.from not under
1189	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1190	 */
1191	spin_lock(&mc.lock);
1192	from = mc.from;
1193	to = mc.to;
1194	if (!from)
1195		goto unlock;
1196	if (from == mem || to == mem
1197	    || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1198	    || (mem->use_hierarchy && css_is_ancestor(&to->css,	&mem->css)))
1199		ret = true;
1200unlock:
1201	spin_unlock(&mc.lock);
1202	return ret;
1203}
1204
1205static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1206{
1207	if (mc.moving_task && current != mc.moving_task) {
1208		if (mem_cgroup_under_move(mem)) {
1209			DEFINE_WAIT(wait);
1210			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1211			/* moving charge context might have finished. */
1212			if (mc.moving_task)
1213				schedule();
1214			finish_wait(&mc.waitq, &wait);
1215			return true;
1216		}
1217	}
1218	return false;
1219}
1220
1221/**
1222 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1223 * @memcg: The memory cgroup that went over limit
1224 * @p: Task that is going to be killed
1225 *
1226 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1227 * enabled
1228 */
1229void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1230{
1231	struct cgroup *task_cgrp;
1232	struct cgroup *mem_cgrp;
1233	/*
1234	 * Need a buffer in BSS, can't rely on allocations. The code relies
1235	 * on the assumption that OOM is serialized for memory controller.
1236	 * If this assumption is broken, revisit this code.
1237	 */
1238	static char memcg_name[PATH_MAX];
1239	int ret;
1240
1241	if (!memcg || !p)
1242		return;
1243
1244
1245	rcu_read_lock();
1246
1247	mem_cgrp = memcg->css.cgroup;
1248	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1249
1250	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1251	if (ret < 0) {
1252		/*
1253		 * Unfortunately, we are unable to convert to a useful name
1254		 * But we'll still print out the usage information
1255		 */
1256		rcu_read_unlock();
1257		goto done;
1258	}
1259	rcu_read_unlock();
1260
1261	printk(KERN_INFO "Task in %s killed", memcg_name);
1262
1263	rcu_read_lock();
1264	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1265	if (ret < 0) {
1266		rcu_read_unlock();
1267		goto done;
1268	}
1269	rcu_read_unlock();
1270
1271	/*
1272	 * Continues from above, so we don't need an KERN_ level
1273	 */
1274	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1275done:
1276
1277	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1278		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1279		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1280		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1281	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1282		"failcnt %llu\n",
1283		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1284		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1285		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1286}
1287
1288/*
1289 * This function returns the number of memcg under hierarchy tree. Returns
1290 * 1(self count) if no children.
1291 */
1292static int mem_cgroup_count_children(struct mem_cgroup *mem)
1293{
1294	int num = 0;
1295	struct mem_cgroup *iter;
1296
1297	for_each_mem_cgroup_tree(iter, mem)
1298		num++;
1299	return num;
1300}
1301
1302/*
1303 * Return the memory (and swap, if configured) limit for a memcg.
1304 */
1305u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1306{
1307	u64 limit;
1308	u64 memsw;
1309
1310	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1311	limit += total_swap_pages << PAGE_SHIFT;
1312
1313	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1314	/*
1315	 * If memsw is finite and limits the amount of swap space available
1316	 * to this memcg, return that limit.
1317	 */
1318	return min(limit, memsw);
1319}
1320
1321/*
1322 * Visit the first child (need not be the first child as per the ordering
1323 * of the cgroup list, since we track last_scanned_child) of @mem and use
1324 * that to reclaim free pages from.
1325 */
1326static struct mem_cgroup *
1327mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1328{
1329	struct mem_cgroup *ret = NULL;
1330	struct cgroup_subsys_state *css;
1331	int nextid, found;
1332
1333	if (!root_mem->use_hierarchy) {
1334		css_get(&root_mem->css);
1335		ret = root_mem;
1336	}
1337
1338	while (!ret) {
1339		rcu_read_lock();
1340		nextid = root_mem->last_scanned_child + 1;
1341		css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1342				   &found);
1343		if (css && css_tryget(css))
1344			ret = container_of(css, struct mem_cgroup, css);
1345
1346		rcu_read_unlock();
1347		/* Updates scanning parameter */
1348		if (!css) {
1349			/* this means start scan from ID:1 */
1350			root_mem->last_scanned_child = 0;
1351		} else
1352			root_mem->last_scanned_child = found;
1353	}
1354
1355	return ret;
1356}
1357
1358/*
1359 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1360 * we reclaimed from, so that we don't end up penalizing one child extensively
1361 * based on its position in the children list.
1362 *
1363 * root_mem is the original ancestor that we've been reclaim from.
1364 *
1365 * We give up and return to the caller when we visit root_mem twice.
1366 * (other groups can be removed while we're walking....)
1367 *
1368 * If shrink==true, for avoiding to free too much, this returns immedieately.
1369 */
1370static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1371						struct zone *zone,
1372						gfp_t gfp_mask,
1373						unsigned long reclaim_options)
1374{
1375	struct mem_cgroup *victim;
1376	int ret, total = 0;
1377	int loop = 0;
1378	bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1379	bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1380	bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1381	unsigned long excess;
1382
1383	excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1384
1385	/* If memsw_is_minimum==1, swap-out is of-no-use. */
1386	if (root_mem->memsw_is_minimum)
1387		noswap = true;
1388
1389	while (1) {
1390		victim = mem_cgroup_select_victim(root_mem);
1391		if (victim == root_mem) {
1392			loop++;
1393			if (loop >= 1)
1394				drain_all_stock_async();
1395			if (loop >= 2) {
1396				/*
1397				 * If we have not been able to reclaim
1398				 * anything, it might because there are
1399				 * no reclaimable pages under this hierarchy
1400				 */
1401				if (!check_soft || !total) {
1402					css_put(&victim->css);
1403					break;
1404				}
1405				/*
1406				 * We want to do more targetted reclaim.
1407				 * excess >> 2 is not to excessive so as to
1408				 * reclaim too much, nor too less that we keep
1409				 * coming back to reclaim from this cgroup
1410				 */
1411				if (total >= (excess >> 2) ||
1412					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1413					css_put(&victim->css);
1414					break;
1415				}
1416			}
1417		}
1418		if (!mem_cgroup_local_usage(victim)) {
1419			/* this cgroup's local usage == 0 */
1420			css_put(&victim->css);
1421			continue;
1422		}
1423		/* we use swappiness of local cgroup */
1424		if (check_soft)
1425			ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1426				noswap, get_swappiness(victim), zone);
1427		else
1428			ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1429						noswap, get_swappiness(victim));
1430		css_put(&victim->css);
1431		/*
1432		 * At shrinking usage, we can't check we should stop here or
1433		 * reclaim more. It's depends on callers. last_scanned_child
1434		 * will work enough for keeping fairness under tree.
1435		 */
1436		if (shrink)
1437			return ret;
1438		total += ret;
1439		if (check_soft) {
1440			if (!res_counter_soft_limit_excess(&root_mem->res))
1441				return total;
1442		} else if (mem_cgroup_margin(root_mem))
1443			return 1 + total;
1444	}
1445	return total;
1446}
1447
1448/*
1449 * Check OOM-Killer is already running under our hierarchy.
1450 * If someone is running, return false.
1451 */
1452static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1453{
1454	int x, lock_count = 0;
1455	struct mem_cgroup *iter;
1456
1457	for_each_mem_cgroup_tree(iter, mem) {
1458		x = atomic_inc_return(&iter->oom_lock);
1459		lock_count = max(x, lock_count);
1460	}
1461
1462	if (lock_count == 1)
1463		return true;
1464	return false;
1465}
1466
1467static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1468{
1469	struct mem_cgroup *iter;
1470
1471	/*
1472	 * When a new child is created while the hierarchy is under oom,
1473	 * mem_cgroup_oom_lock() may not be called. We have to use
1474	 * atomic_add_unless() here.
1475	 */
1476	for_each_mem_cgroup_tree(iter, mem)
1477		atomic_add_unless(&iter->oom_lock, -1, 0);
1478	return 0;
1479}
1480
1481
1482static DEFINE_MUTEX(memcg_oom_mutex);
1483static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1484
1485struct oom_wait_info {
1486	struct mem_cgroup *mem;
1487	wait_queue_t	wait;
1488};
1489
1490static int memcg_oom_wake_function(wait_queue_t *wait,
1491	unsigned mode, int sync, void *arg)
1492{
1493	struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1494	struct oom_wait_info *oom_wait_info;
1495
1496	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1497
1498	if (oom_wait_info->mem == wake_mem)
1499		goto wakeup;
1500	/* if no hierarchy, no match */
1501	if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1502		return 0;
1503	/*
1504	 * Both of oom_wait_info->mem and wake_mem are stable under us.
1505	 * Then we can use css_is_ancestor without taking care of RCU.
1506	 */
1507	if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1508	    !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1509		return 0;
1510
1511wakeup:
1512	return autoremove_wake_function(wait, mode, sync, arg);
1513}
1514
1515static void memcg_wakeup_oom(struct mem_cgroup *mem)
1516{
1517	/* for filtering, pass "mem" as argument. */
1518	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1519}
1520
1521static void memcg_oom_recover(struct mem_cgroup *mem)
1522{
1523	if (mem && atomic_read(&mem->oom_lock))
1524		memcg_wakeup_oom(mem);
1525}
1526
1527/*
1528 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1529 */
1530bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1531{
1532	struct oom_wait_info owait;
1533	bool locked, need_to_kill;
1534
1535	owait.mem = mem;
1536	owait.wait.flags = 0;
1537	owait.wait.func = memcg_oom_wake_function;
1538	owait.wait.private = current;
1539	INIT_LIST_HEAD(&owait.wait.task_list);
1540	need_to_kill = true;
1541	/* At first, try to OOM lock hierarchy under mem.*/
1542	mutex_lock(&memcg_oom_mutex);
1543	locked = mem_cgroup_oom_lock(mem);
1544	/*
1545	 * Even if signal_pending(), we can't quit charge() loop without
1546	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1547	 * under OOM is always welcomed, use TASK_KILLABLE here.
1548	 */
1549	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1550	if (!locked || mem->oom_kill_disable)
1551		need_to_kill = false;
1552	if (locked)
1553		mem_cgroup_oom_notify(mem);
1554	mutex_unlock(&memcg_oom_mutex);
1555
1556	if (need_to_kill) {
1557		finish_wait(&memcg_oom_waitq, &owait.wait);
1558		mem_cgroup_out_of_memory(mem, mask);
1559	} else {
1560		schedule();
1561		finish_wait(&memcg_oom_waitq, &owait.wait);
1562	}
1563	mutex_lock(&memcg_oom_mutex);
1564	mem_cgroup_oom_unlock(mem);
1565	memcg_wakeup_oom(mem);
1566	mutex_unlock(&memcg_oom_mutex);
1567
1568	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1569		return false;
1570	/* Give chance to dying process */
1571	schedule_timeout(1);
1572	return true;
1573}
1574
1575/*
1576 * Currently used to update mapped file statistics, but the routine can be
1577 * generalized to update other statistics as well.
1578 *
1579 * Notes: Race condition
1580 *
1581 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1582 * it tends to be costly. But considering some conditions, we doesn't need
1583 * to do so _always_.
1584 *
1585 * Considering "charge", lock_page_cgroup() is not required because all
1586 * file-stat operations happen after a page is attached to radix-tree. There
1587 * are no race with "charge".
1588 *
1589 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1590 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1591 * if there are race with "uncharge". Statistics itself is properly handled
1592 * by flags.
1593 *
1594 * Considering "move", this is an only case we see a race. To make the race
1595 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1596 * possibility of race condition. If there is, we take a lock.
1597 */
1598
1599void mem_cgroup_update_page_stat(struct page *page,
1600				 enum mem_cgroup_page_stat_item idx, int val)
1601{
1602	struct mem_cgroup *mem;
1603	struct page_cgroup *pc = lookup_page_cgroup(page);
1604	bool need_unlock = false;
1605	unsigned long uninitialized_var(flags);
1606
1607	if (unlikely(!pc))
1608		return;
1609
1610	rcu_read_lock();
1611	mem = pc->mem_cgroup;
1612	if (unlikely(!mem || !PageCgroupUsed(pc)))
1613		goto out;
1614	/* pc->mem_cgroup is unstable ? */
1615	if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
1616		/* take a lock against to access pc->mem_cgroup */
1617		move_lock_page_cgroup(pc, &flags);
1618		need_unlock = true;
1619		mem = pc->mem_cgroup;
1620		if (!mem || !PageCgroupUsed(pc))
1621			goto out;
1622	}
1623
1624	switch (idx) {
1625	case MEMCG_NR_FILE_MAPPED:
1626		if (val > 0)
1627			SetPageCgroupFileMapped(pc);
1628		else if (!page_mapped(page))
1629			ClearPageCgroupFileMapped(pc);
1630		idx = MEM_CGROUP_STAT_FILE_MAPPED;
1631		break;
1632	default:
1633		BUG();
1634	}
1635
1636	this_cpu_add(mem->stat->count[idx], val);
1637
1638out:
1639	if (unlikely(need_unlock))
1640		move_unlock_page_cgroup(pc, &flags);
1641	rcu_read_unlock();
1642	return;
1643}
1644EXPORT_SYMBOL(mem_cgroup_update_page_stat);
1645
1646/*
1647 * size of first charge trial. "32" comes from vmscan.c's magic value.
1648 * TODO: maybe necessary to use big numbers in big irons.
1649 */
1650#define CHARGE_SIZE	(32 * PAGE_SIZE)
1651struct memcg_stock_pcp {
1652	struct mem_cgroup *cached; /* this never be root cgroup */
1653	unsigned int nr_pages;
1654	struct work_struct work;
1655};
1656static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1657static atomic_t memcg_drain_count;
1658
1659/*
1660 * Try to consume stocked charge on this cpu. If success, one page is consumed
1661 * from local stock and true is returned. If the stock is 0 or charges from a
1662 * cgroup which is not current target, returns false. This stock will be
1663 * refilled.
1664 */
1665static bool consume_stock(struct mem_cgroup *mem)
1666{
1667	struct memcg_stock_pcp *stock;
1668	bool ret = true;
1669
1670	stock = &get_cpu_var(memcg_stock);
1671	if (mem == stock->cached && stock->nr_pages)
1672		stock->nr_pages--;
1673	else /* need to call res_counter_charge */
1674		ret = false;
1675	put_cpu_var(memcg_stock);
1676	return ret;
1677}
1678
1679/*
1680 * Returns stocks cached in percpu to res_counter and reset cached information.
1681 */
1682static void drain_stock(struct memcg_stock_pcp *stock)
1683{
1684	struct mem_cgroup *old = stock->cached;
1685
1686	if (stock->nr_pages) {
1687		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
1688
1689		res_counter_uncharge(&old->res, bytes);
1690		if (do_swap_account)
1691			res_counter_uncharge(&old->memsw, bytes);
1692		stock->nr_pages = 0;
1693	}
1694	stock->cached = NULL;
1695}
1696
1697/*
1698 * This must be called under preempt disabled or must be called by
1699 * a thread which is pinned to local cpu.
1700 */
1701static void drain_local_stock(struct work_struct *dummy)
1702{
1703	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1704	drain_stock(stock);
1705}
1706
1707/*
1708 * Cache charges(val) which is from res_counter, to local per_cpu area.
1709 * This will be consumed by consume_stock() function, later.
1710 */
1711static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
1712{
1713	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1714
1715	if (stock->cached != mem) { /* reset if necessary */
1716		drain_stock(stock);
1717		stock->cached = mem;
1718	}
1719	stock->nr_pages += nr_pages;
1720	put_cpu_var(memcg_stock);
1721}
1722
1723/*
1724 * Tries to drain stocked charges in other cpus. This function is asynchronous
1725 * and just put a work per cpu for draining localy on each cpu. Caller can
1726 * expects some charges will be back to res_counter later but cannot wait for
1727 * it.
1728 */
1729static void drain_all_stock_async(void)
1730{
1731	int cpu;
1732	/* This function is for scheduling "drain" in asynchronous way.
1733	 * The result of "drain" is not directly handled by callers. Then,
1734	 * if someone is calling drain, we don't have to call drain more.
1735	 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1736	 * there is a race. We just do loose check here.
1737	 */
1738	if (atomic_read(&memcg_drain_count))
1739		return;
1740	/* Notify other cpus that system-wide "drain" is running */
1741	atomic_inc(&memcg_drain_count);
1742	get_online_cpus();
1743	for_each_online_cpu(cpu) {
1744		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1745		schedule_work_on(cpu, &stock->work);
1746	}
1747 	put_online_cpus();
1748	atomic_dec(&memcg_drain_count);
1749	/* We don't wait for flush_work */
1750}
1751
1752/* This is a synchronous drain interface. */
1753static void drain_all_stock_sync(void)
1754{
1755	/* called when force_empty is called */
1756	atomic_inc(&memcg_drain_count);
1757	schedule_on_each_cpu(drain_local_stock);
1758	atomic_dec(&memcg_drain_count);
1759}
1760
1761/*
1762 * This function drains percpu counter value from DEAD cpu and
1763 * move it to local cpu. Note that this function can be preempted.
1764 */
1765static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
1766{
1767	int i;
1768
1769	spin_lock(&mem->pcp_counter_lock);
1770	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
1771		s64 x = per_cpu(mem->stat->count[i], cpu);
1772
1773		per_cpu(mem->stat->count[i], cpu) = 0;
1774		mem->nocpu_base.count[i] += x;
1775	}
1776	/* need to clear ON_MOVE value, works as a kind of lock. */
1777	per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
1778	spin_unlock(&mem->pcp_counter_lock);
1779}
1780
1781static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
1782{
1783	int idx = MEM_CGROUP_ON_MOVE;
1784
1785	spin_lock(&mem->pcp_counter_lock);
1786	per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
1787	spin_unlock(&mem->pcp_counter_lock);
1788}
1789
1790static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
1791					unsigned long action,
1792					void *hcpu)
1793{
1794	int cpu = (unsigned long)hcpu;
1795	struct memcg_stock_pcp *stock;
1796	struct mem_cgroup *iter;
1797
1798	if ((action == CPU_ONLINE)) {
1799		for_each_mem_cgroup_all(iter)
1800			synchronize_mem_cgroup_on_move(iter, cpu);
1801		return NOTIFY_OK;
1802	}
1803
1804	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
1805		return NOTIFY_OK;
1806
1807	for_each_mem_cgroup_all(iter)
1808		mem_cgroup_drain_pcp_counter(iter, cpu);
1809
1810	stock = &per_cpu(memcg_stock, cpu);
1811	drain_stock(stock);
1812	return NOTIFY_OK;
1813}
1814
1815
1816/* See __mem_cgroup_try_charge() for details */
1817enum {
1818	CHARGE_OK,		/* success */
1819	CHARGE_RETRY,		/* need to retry but retry is not bad */
1820	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
1821	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
1822	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
1823};
1824
1825static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1826				int csize, bool oom_check)
1827{
1828	struct mem_cgroup *mem_over_limit;
1829	struct res_counter *fail_res;
1830	unsigned long flags = 0;
1831	int ret;
1832
1833	ret = res_counter_charge(&mem->res, csize, &fail_res);
1834
1835	if (likely(!ret)) {
1836		if (!do_swap_account)
1837			return CHARGE_OK;
1838		ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1839		if (likely(!ret))
1840			return CHARGE_OK;
1841
1842		res_counter_uncharge(&mem->res, csize);
1843		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
1844		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1845	} else
1846		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
1847	/*
1848	 * csize can be either a huge page (HPAGE_SIZE), a batch of
1849	 * regular pages (CHARGE_SIZE), or a single regular page
1850	 * (PAGE_SIZE).
1851	 *
1852	 * Never reclaim on behalf of optional batching, retry with a
1853	 * single page instead.
1854	 */
1855	if (csize == CHARGE_SIZE)
1856		return CHARGE_RETRY;
1857
1858	if (!(gfp_mask & __GFP_WAIT))
1859		return CHARGE_WOULDBLOCK;
1860
1861	ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1862					      gfp_mask, flags);
1863	if (mem_cgroup_margin(mem_over_limit) >= csize)
1864		return CHARGE_RETRY;
1865	/*
1866	 * Even though the limit is exceeded at this point, reclaim
1867	 * may have been able to free some pages.  Retry the charge
1868	 * before killing the task.
1869	 *
1870	 * Only for regular pages, though: huge pages are rather
1871	 * unlikely to succeed so close to the limit, and we fall back
1872	 * to regular pages anyway in case of failure.
1873	 */
1874	if (csize == PAGE_SIZE && ret)
1875		return CHARGE_RETRY;
1876
1877	/*
1878	 * At task move, charge accounts can be doubly counted. So, it's
1879	 * better to wait until the end of task_move if something is going on.
1880	 */
1881	if (mem_cgroup_wait_acct_move(mem_over_limit))
1882		return CHARGE_RETRY;
1883
1884	/* If we don't need to call oom-killer at el, return immediately */
1885	if (!oom_check)
1886		return CHARGE_NOMEM;
1887	/* check OOM */
1888	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
1889		return CHARGE_OOM_DIE;
1890
1891	return CHARGE_RETRY;
1892}
1893
1894/*
1895 * Unlike exported interface, "oom" parameter is added. if oom==true,
1896 * oom-killer can be invoked.
1897 */
1898static int __mem_cgroup_try_charge(struct mm_struct *mm,
1899				   gfp_t gfp_mask,
1900				   struct mem_cgroup **memcg, bool oom,
1901				   int page_size)
1902{
1903	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1904	struct mem_cgroup *mem = NULL;
1905	int ret;
1906	int csize = max(CHARGE_SIZE, (unsigned long) page_size);
1907
1908	/*
1909	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1910	 * in system level. So, allow to go ahead dying process in addition to
1911	 * MEMDIE process.
1912	 */
1913	if (unlikely(test_thread_flag(TIF_MEMDIE)
1914		     || fatal_signal_pending(current)))
1915		goto bypass;
1916
1917	/*
1918	 * We always charge the cgroup the mm_struct belongs to.
1919	 * The mm_struct's mem_cgroup changes on task migration if the
1920	 * thread group leader migrates. It's possible that mm is not
1921	 * set, if so charge the init_mm (happens for pagecache usage).
1922	 */
1923	if (!*memcg && !mm)
1924		goto bypass;
1925again:
1926	if (*memcg) { /* css should be a valid one */
1927		mem = *memcg;
1928		VM_BUG_ON(css_is_removed(&mem->css));
1929		if (mem_cgroup_is_root(mem))
1930			goto done;
1931		if (page_size == PAGE_SIZE && consume_stock(mem))
1932			goto done;
1933		css_get(&mem->css);
1934	} else {
1935		struct task_struct *p;
1936
1937		rcu_read_lock();
1938		p = rcu_dereference(mm->owner);
1939		/*
1940		 * Because we don't have task_lock(), "p" can exit.
1941		 * In that case, "mem" can point to root or p can be NULL with
1942		 * race with swapoff. Then, we have small risk of mis-accouning.
1943		 * But such kind of mis-account by race always happens because
1944		 * we don't have cgroup_mutex(). It's overkill and we allo that
1945		 * small race, here.
1946		 * (*) swapoff at el will charge against mm-struct not against
1947		 * task-struct. So, mm->owner can be NULL.
1948		 */
1949		mem = mem_cgroup_from_task(p);
1950		if (!mem || mem_cgroup_is_root(mem)) {
1951			rcu_read_unlock();
1952			goto done;
1953		}
1954		if (page_size == PAGE_SIZE && consume_stock(mem)) {
1955			/*
1956			 * It seems dagerous to access memcg without css_get().
1957			 * But considering how consume_stok works, it's not
1958			 * necessary. If consume_stock success, some charges
1959			 * from this memcg are cached on this cpu. So, we
1960			 * don't need to call css_get()/css_tryget() before
1961			 * calling consume_stock().
1962			 */
1963			rcu_read_unlock();
1964			goto done;
1965		}
1966		/* after here, we may be blocked. we need to get refcnt */
1967		if (!css_tryget(&mem->css)) {
1968			rcu_read_unlock();
1969			goto again;
1970		}
1971		rcu_read_unlock();
1972	}
1973
1974	do {
1975		bool oom_check;
1976
1977		/* If killed, bypass charge */
1978		if (fatal_signal_pending(current)) {
1979			css_put(&mem->css);
1980			goto bypass;
1981		}
1982
1983		oom_check = false;
1984		if (oom && !nr_oom_retries) {
1985			oom_check = true;
1986			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1987		}
1988
1989		ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
1990
1991		switch (ret) {
1992		case CHARGE_OK:
1993			break;
1994		case CHARGE_RETRY: /* not in OOM situation but retry */
1995			csize = page_size;
1996			css_put(&mem->css);
1997			mem = NULL;
1998			goto again;
1999		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2000			css_put(&mem->css);
2001			goto nomem;
2002		case CHARGE_NOMEM: /* OOM routine works */
2003			if (!oom) {
2004				css_put(&mem->css);
2005				goto nomem;
2006			}
2007			/* If oom, we never return -ENOMEM */
2008			nr_oom_retries--;
2009			break;
2010		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2011			css_put(&mem->css);
2012			goto bypass;
2013		}
2014	} while (ret != CHARGE_OK);
2015
2016	if (csize > page_size)
2017		refill_stock(mem, (csize - page_size) >> PAGE_SHIFT);
2018	css_put(&mem->css);
2019done:
2020	*memcg = mem;
2021	return 0;
2022nomem:
2023	*memcg = NULL;
2024	return -ENOMEM;
2025bypass:
2026	*memcg = NULL;
2027	return 0;
2028}
2029
2030/*
2031 * Somemtimes we have to undo a charge we got by try_charge().
2032 * This function is for that and do uncharge, put css's refcnt.
2033 * gotten by try_charge().
2034 */
2035static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
2036				       unsigned int nr_pages)
2037{
2038	if (!mem_cgroup_is_root(mem)) {
2039		unsigned long bytes = nr_pages * PAGE_SIZE;
2040
2041		res_counter_uncharge(&mem->res, bytes);
2042		if (do_swap_account)
2043			res_counter_uncharge(&mem->memsw, bytes);
2044	}
2045}
2046
2047/*
2048 * A helper function to get mem_cgroup from ID. must be called under
2049 * rcu_read_lock(). The caller must check css_is_removed() or some if
2050 * it's concern. (dropping refcnt from swap can be called against removed
2051 * memcg.)
2052 */
2053static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2054{
2055	struct cgroup_subsys_state *css;
2056
2057	/* ID 0 is unused ID */
2058	if (!id)
2059		return NULL;
2060	css = css_lookup(&mem_cgroup_subsys, id);
2061	if (!css)
2062		return NULL;
2063	return container_of(css, struct mem_cgroup, css);
2064}
2065
2066struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2067{
2068	struct mem_cgroup *mem = NULL;
2069	struct page_cgroup *pc;
2070	unsigned short id;
2071	swp_entry_t ent;
2072
2073	VM_BUG_ON(!PageLocked(page));
2074
2075	pc = lookup_page_cgroup(page);
2076	lock_page_cgroup(pc);
2077	if (PageCgroupUsed(pc)) {
2078		mem = pc->mem_cgroup;
2079		if (mem && !css_tryget(&mem->css))
2080			mem = NULL;
2081	} else if (PageSwapCache(page)) {
2082		ent.val = page_private(page);
2083		id = lookup_swap_cgroup(ent);
2084		rcu_read_lock();
2085		mem = mem_cgroup_lookup(id);
2086		if (mem && !css_tryget(&mem->css))
2087			mem = NULL;
2088		rcu_read_unlock();
2089	}
2090	unlock_page_cgroup(pc);
2091	return mem;
2092}
2093
2094static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
2095				       struct page *page,
2096				       struct page_cgroup *pc,
2097				       enum charge_type ctype,
2098				       int page_size)
2099{
2100	int nr_pages = page_size >> PAGE_SHIFT;
2101
2102	lock_page_cgroup(pc);
2103	if (unlikely(PageCgroupUsed(pc))) {
2104		unlock_page_cgroup(pc);
2105		__mem_cgroup_cancel_charge(mem, nr_pages);
2106		return;
2107	}
2108	/*
2109	 * we don't need page_cgroup_lock about tail pages, becase they are not
2110	 * accessed by any other context at this point.
2111	 */
2112	pc->mem_cgroup = mem;
2113	/*
2114	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2115	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2116	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2117	 * before USED bit, we need memory barrier here.
2118	 * See mem_cgroup_add_lru_list(), etc.
2119 	 */
2120	smp_wmb();
2121	switch (ctype) {
2122	case MEM_CGROUP_CHARGE_TYPE_CACHE:
2123	case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2124		SetPageCgroupCache(pc);
2125		SetPageCgroupUsed(pc);
2126		break;
2127	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2128		ClearPageCgroupCache(pc);
2129		SetPageCgroupUsed(pc);
2130		break;
2131	default:
2132		break;
2133	}
2134
2135	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
2136	unlock_page_cgroup(pc);
2137	/*
2138	 * "charge_statistics" updated event counter. Then, check it.
2139	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2140	 * if they exceeds softlimit.
2141	 */
2142	memcg_check_events(mem, page);
2143}
2144
2145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2146
2147#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2148			(1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2149/*
2150 * Because tail pages are not marked as "used", set it. We're under
2151 * zone->lru_lock, 'splitting on pmd' and compund_lock.
2152 */
2153void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2154{
2155	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2156	struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2157	unsigned long flags;
2158
2159	if (mem_cgroup_disabled())
2160		return;
2161	/*
2162	 * We have no races with charge/uncharge but will have races with
2163	 * page state accounting.
2164	 */
2165	move_lock_page_cgroup(head_pc, &flags);
2166
2167	tail_pc->mem_cgroup = head_pc->mem_cgroup;
2168	smp_wmb(); /* see __commit_charge() */
2169	if (PageCgroupAcctLRU(head_pc)) {
2170		enum lru_list lru;
2171		struct mem_cgroup_per_zone *mz;
2172
2173		/*
2174		 * LRU flags cannot be copied because we need to add tail
2175		 *.page to LRU by generic call and our hook will be called.
2176		 * We hold lru_lock, then, reduce counter directly.
2177		 */
2178		lru = page_lru(head);
2179		mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
2180		MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2181	}
2182	tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2183	move_unlock_page_cgroup(head_pc, &flags);
2184}
2185#endif
2186
2187/**
2188 * mem_cgroup_move_account - move account of the page
2189 * @page: the page
2190 * @pc:	page_cgroup of the page.
2191 * @from: mem_cgroup which the page is moved from.
2192 * @to:	mem_cgroup which the page is moved to. @from != @to.
2193 * @uncharge: whether we should call uncharge and css_put against @from.
2194 * @charge_size: number of bytes to charge (regular or huge page)
2195 *
2196 * The caller must confirm following.
2197 * - page is not on LRU (isolate_page() is useful.)
2198 * - compound_lock is held when charge_size > PAGE_SIZE
2199 *
2200 * This function doesn't do "charge" nor css_get to new cgroup. It should be
2201 * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
2202 * true, this function does "uncharge" from old cgroup, but it doesn't if
2203 * @uncharge is false, so a caller should do "uncharge".
2204 */
2205static int mem_cgroup_move_account(struct page *page, struct page_cgroup *pc,
2206				   struct mem_cgroup *from, struct mem_cgroup *to,
2207				   bool uncharge, int charge_size)
2208{
2209	int nr_pages = charge_size >> PAGE_SHIFT;
2210	unsigned long flags;
2211	int ret;
2212
2213	VM_BUG_ON(from == to);
2214	VM_BUG_ON(PageLRU(page));
2215	/*
2216	 * The page is isolated from LRU. So, collapse function
2217	 * will not handle this page. But page splitting can happen.
2218	 * Do this check under compound_page_lock(). The caller should
2219	 * hold it.
2220	 */
2221	ret = -EBUSY;
2222	if (charge_size > PAGE_SIZE && !PageTransHuge(page))
2223		goto out;
2224
2225	lock_page_cgroup(pc);
2226
2227	ret = -EINVAL;
2228	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2229		goto unlock;
2230
2231	move_lock_page_cgroup(pc, &flags);
2232
2233	if (PageCgroupFileMapped(pc)) {
2234		/* Update mapped_file data for mem_cgroup */
2235		preempt_disable();
2236		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2237		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2238		preempt_enable();
2239	}
2240	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
2241	if (uncharge)
2242		/* This is not "cancel", but cancel_charge does all we need. */
2243		__mem_cgroup_cancel_charge(from, nr_pages);
2244
2245	/* caller should have done css_get */
2246	pc->mem_cgroup = to;
2247	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
2248	/*
2249	 * We charges against "to" which may not have any tasks. Then, "to"
2250	 * can be under rmdir(). But in current implementation, caller of
2251	 * this function is just force_empty() and move charge, so it's
2252	 * garanteed that "to" is never removed. So, we don't check rmdir
2253	 * status here.
2254	 */
2255	move_unlock_page_cgroup(pc, &flags);
2256	ret = 0;
2257unlock:
2258	unlock_page_cgroup(pc);
2259	/*
2260	 * check events
2261	 */
2262	memcg_check_events(to, page);
2263	memcg_check_events(from, page);
2264out:
2265	return ret;
2266}
2267
2268/*
2269 * move charges to its parent.
2270 */
2271
2272static int mem_cgroup_move_parent(struct page *page,
2273				  struct page_cgroup *pc,
2274				  struct mem_cgroup *child,
2275				  gfp_t gfp_mask)
2276{
2277	struct cgroup *cg = child->css.cgroup;
2278	struct cgroup *pcg = cg->parent;
2279	struct mem_cgroup *parent;
2280	int page_size = PAGE_SIZE;
2281	unsigned long flags;
2282	int ret;
2283
2284	/* Is ROOT ? */
2285	if (!pcg)
2286		return -EINVAL;
2287
2288	ret = -EBUSY;
2289	if (!get_page_unless_zero(page))
2290		goto out;
2291	if (isolate_lru_page(page))
2292		goto put;
2293
2294	if (PageTransHuge(page))
2295		page_size = HPAGE_SIZE;
2296
2297	parent = mem_cgroup_from_cont(pcg);
2298	ret = __mem_cgroup_try_charge(NULL, gfp_mask,
2299				&parent, false, page_size);
2300	if (ret || !parent)
2301		goto put_back;
2302
2303	if (page_size > PAGE_SIZE)
2304		flags = compound_lock_irqsave(page);
2305
2306	ret = mem_cgroup_move_account(page, pc, child, parent, true, page_size);
2307	if (ret)
2308		__mem_cgroup_cancel_charge(parent, page_size >> PAGE_SHIFT);
2309
2310	if (page_size > PAGE_SIZE)
2311		compound_unlock_irqrestore(page, flags);
2312put_back:
2313	putback_lru_page(page);
2314put:
2315	put_page(page);
2316out:
2317	return ret;
2318}
2319
2320/*
2321 * Charge the memory controller for page usage.
2322 * Return
2323 * 0 if the charge was successful
2324 * < 0 if the cgroup is over its limit
2325 */
2326static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2327				gfp_t gfp_mask, enum charge_type ctype)
2328{
2329	struct mem_cgroup *mem = NULL;
2330	int page_size = PAGE_SIZE;
2331	struct page_cgroup *pc;
2332	bool oom = true;
2333	int ret;
2334
2335	if (PageTransHuge(page)) {
2336		page_size <<= compound_order(page);
2337		VM_BUG_ON(!PageTransHuge(page));
2338		/*
2339		 * Never OOM-kill a process for a huge page.  The
2340		 * fault handler will fall back to regular pages.
2341		 */
2342		oom = false;
2343	}
2344
2345	pc = lookup_page_cgroup(page);
2346	BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
2347
2348	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
2349	if (ret || !mem)
2350		return ret;
2351
2352	__mem_cgroup_commit_charge(mem, page, pc, ctype, page_size);
2353	return 0;
2354}
2355
2356int mem_cgroup_newpage_charge(struct page *page,
2357			      struct mm_struct *mm, gfp_t gfp_mask)
2358{
2359	if (mem_cgroup_disabled())
2360		return 0;
2361	/*
2362	 * If already mapped, we don't have to account.
2363	 * If page cache, page->mapping has address_space.
2364	 * But page->mapping may have out-of-use anon_vma pointer,
2365	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2366	 * is NULL.
2367  	 */
2368	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2369		return 0;
2370	if (unlikely(!mm))
2371		mm = &init_mm;
2372	return mem_cgroup_charge_common(page, mm, gfp_mask,
2373				MEM_CGROUP_CHARGE_TYPE_MAPPED);
2374}
2375
2376static void
2377__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2378					enum charge_type ctype);
2379
2380int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2381				gfp_t gfp_mask)
2382{
2383	int ret;
2384
2385	if (mem_cgroup_disabled())
2386		return 0;
2387	if (PageCompound(page))
2388		return 0;
2389	/*
2390	 * Corner case handling. This is called from add_to_page_cache()
2391	 * in usual. But some FS (shmem) precharges this page before calling it
2392	 * and call add_to_page_cache() with GFP_NOWAIT.
2393	 *
2394	 * For GFP_NOWAIT case, the page may be pre-charged before calling
2395	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2396	 * charge twice. (It works but has to pay a bit larger cost.)
2397	 * And when the page is SwapCache, it should take swap information
2398	 * into account. This is under lock_page() now.
2399	 */
2400	if (!(gfp_mask & __GFP_WAIT)) {
2401		struct page_cgroup *pc;
2402
2403		pc = lookup_page_cgroup(page);
2404		if (!pc)
2405			return 0;
2406		lock_page_cgroup(pc);
2407		if (PageCgroupUsed(pc)) {
2408			unlock_page_cgroup(pc);
2409			return 0;
2410		}
2411		unlock_page_cgroup(pc);
2412	}
2413
2414	if (unlikely(!mm))
2415		mm = &init_mm;
2416
2417	if (page_is_file_cache(page))
2418		return mem_cgroup_charge_common(page, mm, gfp_mask,
2419				MEM_CGROUP_CHARGE_TYPE_CACHE);
2420
2421	/* shmem */
2422	if (PageSwapCache(page)) {
2423		struct mem_cgroup *mem;
2424
2425		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2426		if (!ret)
2427			__mem_cgroup_commit_charge_swapin(page, mem,
2428					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2429	} else
2430		ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2431					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2432
2433	return ret;
2434}
2435
2436/*
2437 * While swap-in, try_charge -> commit or cancel, the page is locked.
2438 * And when try_charge() successfully returns, one refcnt to memcg without
2439 * struct page_cgroup is acquired. This refcnt will be consumed by
2440 * "commit()" or removed by "cancel()"
2441 */
2442int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2443				 struct page *page,
2444				 gfp_t mask, struct mem_cgroup **ptr)
2445{
2446	struct mem_cgroup *mem;
2447	int ret;
2448
2449	*ptr = NULL;
2450
2451	if (mem_cgroup_disabled())
2452		return 0;
2453
2454	if (!do_swap_account)
2455		goto charge_cur_mm;
2456	/*
2457	 * A racing thread's fault, or swapoff, may have already updated
2458	 * the pte, and even removed page from swap cache: in those cases
2459	 * do_swap_page()'s pte_same() test will fail; but there's also a
2460	 * KSM case which does need to charge the page.
2461	 */
2462	if (!PageSwapCache(page))
2463		goto charge_cur_mm;
2464	mem = try_get_mem_cgroup_from_page(page);
2465	if (!mem)
2466		goto charge_cur_mm;
2467	*ptr = mem;
2468	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
2469	css_put(&mem->css);
2470	return ret;
2471charge_cur_mm:
2472	if (unlikely(!mm))
2473		mm = &init_mm;
2474	return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
2475}
2476
2477static void
2478__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2479					enum charge_type ctype)
2480{
2481	struct page_cgroup *pc;
2482
2483	if (mem_cgroup_disabled())
2484		return;
2485	if (!ptr)
2486		return;
2487	cgroup_exclude_rmdir(&ptr->css);
2488	pc = lookup_page_cgroup(page);
2489	mem_cgroup_lru_del_before_commit_swapcache(page);
2490	__mem_cgroup_commit_charge(ptr, page, pc, ctype, PAGE_SIZE);
2491	mem_cgroup_lru_add_after_commit_swapcache(page);
2492	/*
2493	 * Now swap is on-memory. This means this page may be
2494	 * counted both as mem and swap....double count.
2495	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2496	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2497	 * may call delete_from_swap_cache() before reach here.
2498	 */
2499	if (do_swap_account && PageSwapCache(page)) {
2500		swp_entry_t ent = {.val = page_private(page)};
2501		unsigned short id;
2502		struct mem_cgroup *memcg;
2503
2504		id = swap_cgroup_record(ent, 0);
2505		rcu_read_lock();
2506		memcg = mem_cgroup_lookup(id);
2507		if (memcg) {
2508			/*
2509			 * This recorded memcg can be obsolete one. So, avoid
2510			 * calling css_tryget
2511			 */
2512			if (!mem_cgroup_is_root(memcg))
2513				res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2514			mem_cgroup_swap_statistics(memcg, false);
2515			mem_cgroup_put(memcg);
2516		}
2517		rcu_read_unlock();
2518	}
2519	/*
2520	 * At swapin, we may charge account against cgroup which has no tasks.
2521	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2522	 * In that case, we need to call pre_destroy() again. check it here.
2523	 */
2524	cgroup_release_and_wakeup_rmdir(&ptr->css);
2525}
2526
2527void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2528{
2529	__mem_cgroup_commit_charge_swapin(page, ptr,
2530					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2531}
2532
2533void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2534{
2535	if (mem_cgroup_disabled())
2536		return;
2537	if (!mem)
2538		return;
2539	__mem_cgroup_cancel_charge(mem, 1);
2540}
2541
2542static void
2543__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
2544	      int page_size)
2545{
2546	struct memcg_batch_info *batch = NULL;
2547	bool uncharge_memsw = true;
2548	/* If swapout, usage of swap doesn't decrease */
2549	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2550		uncharge_memsw = false;
2551
2552	batch = &current->memcg_batch;
2553	/*
2554	 * In usual, we do css_get() when we remember memcg pointer.
2555	 * But in this case, we keep res->usage until end of a series of
2556	 * uncharges. Then, it's ok to ignore memcg's refcnt.
2557	 */
2558	if (!batch->memcg)
2559		batch->memcg = mem;
2560	/*
2561	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2562	 * In those cases, all pages freed continously can be expected to be in
2563	 * the same cgroup and we have chance to coalesce uncharges.
2564	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2565	 * because we want to do uncharge as soon as possible.
2566	 */
2567
2568	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2569		goto direct_uncharge;
2570
2571	if (page_size != PAGE_SIZE)
2572		goto direct_uncharge;
2573
2574	/*
2575	 * In typical case, batch->memcg == mem. This means we can
2576	 * merge a series of uncharges to an uncharge of res_counter.
2577	 * If not, we uncharge res_counter ony by one.
2578	 */
2579	if (batch->memcg != mem)
2580		goto direct_uncharge;
2581	/* remember freed charge and uncharge it later */
2582	batch->nr_pages++;
2583	if (uncharge_memsw)
2584		batch->memsw_nr_pages++;
2585	return;
2586direct_uncharge:
2587	res_counter_uncharge(&mem->res, page_size);
2588	if (uncharge_memsw)
2589		res_counter_uncharge(&mem->memsw, page_size);
2590	if (unlikely(batch->memcg != mem))
2591		memcg_oom_recover(mem);
2592	return;
2593}
2594
2595/*
2596 * uncharge if !page_mapped(page)
2597 */
2598static struct mem_cgroup *
2599__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2600{
2601	int count;
2602	struct page_cgroup *pc;
2603	struct mem_cgroup *mem = NULL;
2604	int page_size = PAGE_SIZE;
2605
2606	if (mem_cgroup_disabled())
2607		return NULL;
2608
2609	if (PageSwapCache(page))
2610		return NULL;
2611
2612	if (PageTransHuge(page)) {
2613		page_size <<= compound_order(page);
2614		VM_BUG_ON(!PageTransHuge(page));
2615	}
2616
2617	count = page_size >> PAGE_SHIFT;
2618	/*
2619	 * Check if our page_cgroup is valid
2620	 */
2621	pc = lookup_page_cgroup(page);
2622	if (unlikely(!pc || !PageCgroupUsed(pc)))
2623		return NULL;
2624
2625	lock_page_cgroup(pc);
2626
2627	mem = pc->mem_cgroup;
2628
2629	if (!PageCgroupUsed(pc))
2630		goto unlock_out;
2631
2632	switch (ctype) {
2633	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2634	case MEM_CGROUP_CHARGE_TYPE_DROP:
2635		/* See mem_cgroup_prepare_migration() */
2636		if (page_mapped(page) || PageCgroupMigration(pc))
2637			goto unlock_out;
2638		break;
2639	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2640		if (!PageAnon(page)) {	/* Shared memory */
2641			if (page->mapping && !page_is_file_cache(page))
2642				goto unlock_out;
2643		} else if (page_mapped(page)) /* Anon */
2644				goto unlock_out;
2645		break;
2646	default:
2647		break;
2648	}
2649
2650	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -count);
2651
2652	ClearPageCgroupUsed(pc);
2653	/*
2654	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2655	 * freed from LRU. This is safe because uncharged page is expected not
2656	 * to be reused (freed soon). Exception is SwapCache, it's handled by
2657	 * special functions.
2658	 */
2659
2660	unlock_page_cgroup(pc);
2661	/*
2662	 * even after unlock, we have mem->res.usage here and this memcg
2663	 * will never be freed.
2664	 */
2665	memcg_check_events(mem, page);
2666	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2667		mem_cgroup_swap_statistics(mem, true);
2668		mem_cgroup_get(mem);
2669	}
2670	if (!mem_cgroup_is_root(mem))
2671		__do_uncharge(mem, ctype, page_size);
2672
2673	return mem;
2674
2675unlock_out:
2676	unlock_page_cgroup(pc);
2677	return NULL;
2678}
2679
2680void mem_cgroup_uncharge_page(struct page *page)
2681{
2682	/* early check. */
2683	if (page_mapped(page))
2684		return;
2685	if (page->mapping && !PageAnon(page))
2686		return;
2687	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2688}
2689
2690void mem_cgroup_uncharge_cache_page(struct page *page)
2691{
2692	VM_BUG_ON(page_mapped(page));
2693	VM_BUG_ON(page->mapping);
2694	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2695}
2696
2697/*
2698 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2699 * In that cases, pages are freed continuously and we can expect pages
2700 * are in the same memcg. All these calls itself limits the number of
2701 * pages freed at once, then uncharge_start/end() is called properly.
2702 * This may be called prural(2) times in a context,
2703 */
2704
2705void mem_cgroup_uncharge_start(void)
2706{
2707	current->memcg_batch.do_batch++;
2708	/* We can do nest. */
2709	if (current->memcg_batch.do_batch == 1) {
2710		current->memcg_batch.memcg = NULL;
2711		current->memcg_batch.nr_pages = 0;
2712		current->memcg_batch.memsw_nr_pages = 0;
2713	}
2714}
2715
2716void mem_cgroup_uncharge_end(void)
2717{
2718	struct memcg_batch_info *batch = &current->memcg_batch;
2719
2720	if (!batch->do_batch)
2721		return;
2722
2723	batch->do_batch--;
2724	if (batch->do_batch) /* If stacked, do nothing. */
2725		return;
2726
2727	if (!batch->memcg)
2728		return;
2729	/*
2730	 * This "batch->memcg" is valid without any css_get/put etc...
2731	 * bacause we hide charges behind us.
2732	 */
2733	if (batch->nr_pages)
2734		res_counter_uncharge(&batch->memcg->res,
2735				     batch->nr_pages * PAGE_SIZE);
2736	if (batch->memsw_nr_pages)
2737		res_counter_uncharge(&batch->memcg->memsw,
2738				     batch->memsw_nr_pages * PAGE_SIZE);
2739	memcg_oom_recover(batch->memcg);
2740	/* forget this pointer (for sanity check) */
2741	batch->memcg = NULL;
2742}
2743
2744#ifdef CONFIG_SWAP
2745/*
2746 * called after __delete_from_swap_cache() and drop "page" account.
2747 * memcg information is recorded to swap_cgroup of "ent"
2748 */
2749void
2750mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2751{
2752	struct mem_cgroup *memcg;
2753	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2754
2755	if (!swapout) /* this was a swap cache but the swap is unused ! */
2756		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2757
2758	memcg = __mem_cgroup_uncharge_common(page, ctype);
2759
2760	/*
2761	 * record memcg information,  if swapout && memcg != NULL,
2762	 * mem_cgroup_get() was called in uncharge().
2763	 */
2764	if (do_swap_account && swapout && memcg)
2765		swap_cgroup_record(ent, css_id(&memcg->css));
2766}
2767#endif
2768
2769#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2770/*
2771 * called from swap_entry_free(). remove record in swap_cgroup and
2772 * uncharge "memsw" account.
2773 */
2774void mem_cgroup_uncharge_swap(swp_entry_t ent)
2775{
2776	struct mem_cgroup *memcg;
2777	unsigned short id;
2778
2779	if (!do_swap_account)
2780		return;
2781
2782	id = swap_cgroup_record(ent, 0);
2783	rcu_read_lock();
2784	memcg = mem_cgroup_lookup(id);
2785	if (memcg) {
2786		/*
2787		 * We uncharge this because swap is freed.
2788		 * This memcg can be obsolete one. We avoid calling css_tryget
2789		 */
2790		if (!mem_cgroup_is_root(memcg))
2791			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2792		mem_cgroup_swap_statistics(memcg, false);
2793		mem_cgroup_put(memcg);
2794	}
2795	rcu_read_unlock();
2796}
2797
2798/**
2799 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2800 * @entry: swap entry to be moved
2801 * @from:  mem_cgroup which the entry is moved from
2802 * @to:  mem_cgroup which the entry is moved to
2803 * @need_fixup: whether we should fixup res_counters and refcounts.
2804 *
2805 * It succeeds only when the swap_cgroup's record for this entry is the same
2806 * as the mem_cgroup's id of @from.
2807 *
2808 * Returns 0 on success, -EINVAL on failure.
2809 *
2810 * The caller must have charged to @to, IOW, called res_counter_charge() about
2811 * both res and memsw, and called css_get().
2812 */
2813static int mem_cgroup_move_swap_account(swp_entry_t entry,
2814		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2815{
2816	unsigned short old_id, new_id;
2817
2818	old_id = css_id(&from->css);
2819	new_id = css_id(&to->css);
2820
2821	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2822		mem_cgroup_swap_statistics(from, false);
2823		mem_cgroup_swap_statistics(to, true);
2824		/*
2825		 * This function is only called from task migration context now.
2826		 * It postpones res_counter and refcount handling till the end
2827		 * of task migration(mem_cgroup_clear_mc()) for performance
2828		 * improvement. But we cannot postpone mem_cgroup_get(to)
2829		 * because if the process that has been moved to @to does
2830		 * swap-in, the refcount of @to might be decreased to 0.
2831		 */
2832		mem_cgroup_get(to);
2833		if (need_fixup) {
2834			if (!mem_cgroup_is_root(from))
2835				res_counter_uncharge(&from->memsw, PAGE_SIZE);
2836			mem_cgroup_put(from);
2837			/*
2838			 * we charged both to->res and to->memsw, so we should
2839			 * uncharge to->res.
2840			 */
2841			if (!mem_cgroup_is_root(to))
2842				res_counter_uncharge(&to->res, PAGE_SIZE);
2843		}
2844		return 0;
2845	}
2846	return -EINVAL;
2847}
2848#else
2849static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2850		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2851{
2852	return -EINVAL;
2853}
2854#endif
2855
2856/*
2857 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2858 * page belongs to.
2859 */
2860int mem_cgroup_prepare_migration(struct page *page,
2861	struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
2862{
2863	struct page_cgroup *pc;
2864	struct mem_cgroup *mem = NULL;
2865	enum charge_type ctype;
2866	int ret = 0;
2867
2868	*ptr = NULL;
2869
2870	VM_BUG_ON(PageTransHuge(page));
2871	if (mem_cgroup_disabled())
2872		return 0;
2873
2874	pc = lookup_page_cgroup(page);
2875	lock_page_cgroup(pc);
2876	if (PageCgroupUsed(pc)) {
2877		mem = pc->mem_cgroup;
2878		css_get(&mem->css);
2879		/*
2880		 * At migrating an anonymous page, its mapcount goes down
2881		 * to 0 and uncharge() will be called. But, even if it's fully
2882		 * unmapped, migration may fail and this page has to be
2883		 * charged again. We set MIGRATION flag here and delay uncharge
2884		 * until end_migration() is called
2885		 *
2886		 * Corner Case Thinking
2887		 * A)
2888		 * When the old page was mapped as Anon and it's unmap-and-freed
2889		 * while migration was ongoing.
2890		 * If unmap finds the old page, uncharge() of it will be delayed
2891		 * until end_migration(). If unmap finds a new page, it's
2892		 * uncharged when it make mapcount to be 1->0. If unmap code
2893		 * finds swap_migration_entry, the new page will not be mapped
2894		 * and end_migration() will find it(mapcount==0).
2895		 *
2896		 * B)
2897		 * When the old page was mapped but migraion fails, the kernel
2898		 * remaps it. A charge for it is kept by MIGRATION flag even
2899		 * if mapcount goes down to 0. We can do remap successfully
2900		 * without charging it again.
2901		 *
2902		 * C)
2903		 * The "old" page is under lock_page() until the end of
2904		 * migration, so, the old page itself will not be swapped-out.
2905		 * If the new page is swapped out before end_migraton, our
2906		 * hook to usual swap-out path will catch the event.
2907		 */
2908		if (PageAnon(page))
2909			SetPageCgroupMigration(pc);
2910	}
2911	unlock_page_cgroup(pc);
2912	/*
2913	 * If the page is not charged at this point,
2914	 * we return here.
2915	 */
2916	if (!mem)
2917		return 0;
2918
2919	*ptr = mem;
2920	ret = __mem_cgroup_try_charge(NULL, gfp_mask, ptr, false, PAGE_SIZE);
2921	css_put(&mem->css);/* drop extra refcnt */
2922	if (ret || *ptr == NULL) {
2923		if (PageAnon(page)) {
2924			lock_page_cgroup(pc);
2925			ClearPageCgroupMigration(pc);
2926			unlock_page_cgroup(pc);
2927			/*
2928			 * The old page may be fully unmapped while we kept it.
2929			 */
2930			mem_cgroup_uncharge_page(page);
2931		}
2932		return -ENOMEM;
2933	}
2934	/*
2935	 * We charge new page before it's used/mapped. So, even if unlock_page()
2936	 * is called before end_migration, we can catch all events on this new
2937	 * page. In the case new page is migrated but not remapped, new page's
2938	 * mapcount will be finally 0 and we call uncharge in end_migration().
2939	 */
2940	pc = lookup_page_cgroup(newpage);
2941	if (PageAnon(page))
2942		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2943	else if (page_is_file_cache(page))
2944		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2945	else
2946		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2947	__mem_cgroup_commit_charge(mem, page, pc, ctype, PAGE_SIZE);
2948	return ret;
2949}
2950
2951/* remove redundant charge if migration failed*/
2952void mem_cgroup_end_migration(struct mem_cgroup *mem,
2953	struct page *oldpage, struct page *newpage, bool migration_ok)
2954{
2955	struct page *used, *unused;
2956	struct page_cgroup *pc;
2957
2958	if (!mem)
2959		return;
2960	/* blocks rmdir() */
2961	cgroup_exclude_rmdir(&mem->css);
2962	if (!migration_ok) {
2963		used = oldpage;
2964		unused = newpage;
2965	} else {
2966		used = newpage;
2967		unused = oldpage;
2968	}
2969	/*
2970	 * We disallowed uncharge of pages under migration because mapcount
2971	 * of the page goes down to zero, temporarly.
2972	 * Clear the flag and check the page should be charged.
2973	 */
2974	pc = lookup_page_cgroup(oldpage);
2975	lock_page_cgroup(pc);
2976	ClearPageCgroupMigration(pc);
2977	unlock_page_cgroup(pc);
2978
2979	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
2980
2981	/*
2982	 * If a page is a file cache, radix-tree replacement is very atomic
2983	 * and we can skip this check. When it was an Anon page, its mapcount
2984	 * goes down to 0. But because we added MIGRATION flage, it's not
2985	 * uncharged yet. There are several case but page->mapcount check
2986	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
2987	 * check. (see prepare_charge() also)
2988	 */
2989	if (PageAnon(used))
2990		mem_cgroup_uncharge_page(used);
2991	/*
2992	 * At migration, we may charge account against cgroup which has no
2993	 * tasks.
2994	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2995	 * In that case, we need to call pre_destroy() again. check it here.
2996	 */
2997	cgroup_release_and_wakeup_rmdir(&mem->css);
2998}
2999
3000/*
3001 * A call to try to shrink memory usage on charge failure at shmem's swapin.
3002 * Calling hierarchical_reclaim is not enough because we should update
3003 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
3004 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
3005 * not from the memcg which this page would be charged to.
3006 * try_charge_swapin does all of these works properly.
3007 */
3008int mem_cgroup_shmem_charge_fallback(struct page *page,
3009			    struct mm_struct *mm,
3010			    gfp_t gfp_mask)
3011{
3012	struct mem_cgroup *mem;
3013	int ret;
3014
3015	if (mem_cgroup_disabled())
3016		return 0;
3017
3018	ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
3019	if (!ret)
3020		mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
3021
3022	return ret;
3023}
3024
3025#ifdef CONFIG_DEBUG_VM
3026static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3027{
3028	struct page_cgroup *pc;
3029
3030	pc = lookup_page_cgroup(page);
3031	if (likely(pc) && PageCgroupUsed(pc))
3032		return pc;
3033	return NULL;
3034}
3035
3036bool mem_cgroup_bad_page_check(struct page *page)
3037{
3038	if (mem_cgroup_disabled())
3039		return false;
3040
3041	return lookup_page_cgroup_used(page) != NULL;
3042}
3043
3044void mem_cgroup_print_bad_page(struct page *page)
3045{
3046	struct page_cgroup *pc;
3047
3048	pc = lookup_page_cgroup_used(page);
3049	if (pc) {
3050		int ret = -1;
3051		char *path;
3052
3053		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3054		       pc, pc->flags, pc->mem_cgroup);
3055
3056		path = kmalloc(PATH_MAX, GFP_KERNEL);
3057		if (path) {
3058			rcu_read_lock();
3059			ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3060							path, PATH_MAX);
3061			rcu_read_unlock();
3062		}
3063
3064		printk(KERN_CONT "(%s)\n",
3065				(ret < 0) ? "cannot get the path" : path);
3066		kfree(path);
3067	}
3068}
3069#endif
3070
3071static DEFINE_MUTEX(set_limit_mutex);
3072
3073static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3074				unsigned long long val)
3075{
3076	int retry_count;
3077	u64 memswlimit, memlimit;
3078	int ret = 0;
3079	int children = mem_cgroup_count_children(memcg);
3080	u64 curusage, oldusage;
3081	int enlarge;
3082
3083	/*
3084	 * For keeping hierarchical_reclaim simple, how long we should retry
3085	 * is depends on callers. We set our retry-count to be function
3086	 * of # of children which we should visit in this loop.
3087	 */
3088	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3089
3090	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3091
3092	enlarge = 0;
3093	while (retry_count) {
3094		if (signal_pending(current)) {
3095			ret = -EINTR;
3096			break;
3097		}
3098		/*
3099		 * Rather than hide all in some function, I do this in
3100		 * open coded manner. You see what this really does.
3101		 * We have to guarantee mem->res.limit < mem->memsw.limit.
3102		 */
3103		mutex_lock(&set_limit_mutex);
3104		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3105		if (memswlimit < val) {
3106			ret = -EINVAL;
3107			mutex_unlock(&set_limit_mutex);
3108			break;
3109		}
3110
3111		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3112		if (memlimit < val)
3113			enlarge = 1;
3114
3115		ret = res_counter_set_limit(&memcg->res, val);
3116		if (!ret) {
3117			if (memswlimit == val)
3118				memcg->memsw_is_minimum = true;
3119			else
3120				memcg->memsw_is_minimum = false;
3121		}
3122		mutex_unlock(&set_limit_mutex);
3123
3124		if (!ret)
3125			break;
3126
3127		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3128						MEM_CGROUP_RECLAIM_SHRINK);
3129		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3130		/* Usage is reduced ? */
3131  		if (curusage >= oldusage)
3132			retry_count--;
3133		else
3134			oldusage = curusage;
3135	}
3136	if (!ret && enlarge)
3137		memcg_oom_recover(memcg);
3138
3139	return ret;
3140}
3141
3142static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3143					unsigned long long val)
3144{
3145	int retry_count;
3146	u64 memlimit, memswlimit, oldusage, curusage;
3147	int children = mem_cgroup_count_children(memcg);
3148	int ret = -EBUSY;
3149	int enlarge = 0;
3150
3151	/* see mem_cgroup_resize_res_limit */
3152 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3153	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3154	while (retry_count) {
3155		if (signal_pending(current)) {
3156			ret = -EINTR;
3157			break;
3158		}
3159		/*
3160		 * Rather than hide all in some function, I do this in
3161		 * open coded manner. You see what this really does.
3162		 * We have to guarantee mem->res.limit < mem->memsw.limit.
3163		 */
3164		mutex_lock(&set_limit_mutex);
3165		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3166		if (memlimit > val) {
3167			ret = -EINVAL;
3168			mutex_unlock(&set_limit_mutex);
3169			break;
3170		}
3171		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3172		if (memswlimit < val)
3173			enlarge = 1;
3174		ret = res_counter_set_limit(&memcg->memsw, val);
3175		if (!ret) {
3176			if (memlimit == val)
3177				memcg->memsw_is_minimum = true;
3178			else
3179				memcg->memsw_is_minimum = false;
3180		}
3181		mutex_unlock(&set_limit_mutex);
3182
3183		if (!ret)
3184			break;
3185
3186		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3187						MEM_CGROUP_RECLAIM_NOSWAP |
3188						MEM_CGROUP_RECLAIM_SHRINK);
3189		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3190		/* Usage is reduced ? */
3191		if (curusage >= oldusage)
3192			retry_count--;
3193		else
3194			oldusage = curusage;
3195	}
3196	if (!ret && enlarge)
3197		memcg_oom_recover(memcg);
3198	return ret;
3199}
3200
3201unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3202					    gfp_t gfp_mask)
3203{
3204	unsigned long nr_reclaimed = 0;
3205	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3206	unsigned long reclaimed;
3207	int loop = 0;
3208	struct mem_cgroup_tree_per_zone *mctz;
3209	unsigned long long excess;
3210
3211	if (order > 0)
3212		return 0;
3213
3214	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3215	/*
3216	 * This loop can run a while, specially if mem_cgroup's continuously
3217	 * keep exceeding their soft limit and putting the system under
3218	 * pressure
3219	 */
3220	do {
3221		if (next_mz)
3222			mz = next_mz;
3223		else
3224			mz = mem_cgroup_largest_soft_limit_node(mctz);
3225		if (!mz)
3226			break;
3227
3228		reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3229						gfp_mask,
3230						MEM_CGROUP_RECLAIM_SOFT);
3231		nr_reclaimed += reclaimed;
3232		spin_lock(&mctz->lock);
3233
3234		/*
3235		 * If we failed to reclaim anything from this memory cgroup
3236		 * it is time to move on to the next cgroup
3237		 */
3238		next_mz = NULL;
3239		if (!reclaimed) {
3240			do {
3241				/*
3242				 * Loop until we find yet another one.
3243				 *
3244				 * By the time we get the soft_limit lock
3245				 * again, someone might have aded the
3246				 * group back on the RB tree. Iterate to
3247				 * make sure we get a different mem.
3248				 * mem_cgroup_largest_soft_limit_node returns
3249				 * NULL if no other cgroup is present on
3250				 * the tree
3251				 */
3252				next_mz =
3253				__mem_cgroup_largest_soft_limit_node(mctz);
3254				if (next_mz == mz) {
3255					css_put(&next_mz->mem->css);
3256					next_mz = NULL;
3257				} else /* next_mz == NULL or other memcg */
3258					break;
3259			} while (1);
3260		}
3261		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3262		excess = res_counter_soft_limit_excess(&mz->mem->res);
3263		/*
3264		 * One school of thought says that we should not add
3265		 * back the node to the tree if reclaim returns 0.
3266		 * But our reclaim could return 0, simply because due
3267		 * to priority we are exposing a smaller subset of
3268		 * memory to reclaim from. Consider this as a longer
3269		 * term TODO.
3270		 */
3271		/* If excess == 0, no tree ops */
3272		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3273		spin_unlock(&mctz->lock);
3274		css_put(&mz->mem->css);
3275		loop++;
3276		/*
3277		 * Could not reclaim anything and there are no more
3278		 * mem cgroups to try or we seem to be looping without
3279		 * reclaiming anything.
3280		 */
3281		if (!nr_reclaimed &&
3282			(next_mz == NULL ||
3283			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3284			break;
3285	} while (!nr_reclaimed);
3286	if (next_mz)
3287		css_put(&next_mz->mem->css);
3288	return nr_reclaimed;
3289}
3290
3291/*
3292 * This routine traverse page_cgroup in given list and drop them all.
3293 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3294 */
3295static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
3296				int node, int zid, enum lru_list lru)
3297{
3298	struct zone *zone;
3299	struct mem_cgroup_per_zone *mz;
3300	struct page_cgroup *pc, *busy;
3301	unsigned long flags, loop;
3302	struct list_head *list;
3303	int ret = 0;
3304
3305	zone = &NODE_DATA(node)->node_zones[zid];
3306	mz = mem_cgroup_zoneinfo(mem, node, zid);
3307	list = &mz->lists[lru];
3308
3309	loop = MEM_CGROUP_ZSTAT(mz, lru);
3310	/* give some margin against EBUSY etc...*/
3311	loop += 256;
3312	busy = NULL;
3313	while (loop--) {
3314		struct page *page;
3315
3316		ret = 0;
3317		spin_lock_irqsave(&zone->lru_lock, flags);
3318		if (list_empty(list)) {
3319			spin_unlock_irqrestore(&zone->lru_lock, flags);
3320			break;
3321		}
3322		pc = list_entry(list->prev, struct page_cgroup, lru);
3323		if (busy == pc) {
3324			list_move(&pc->lru, list);
3325			busy = NULL;
3326			spin_unlock_irqrestore(&zone->lru_lock, flags);
3327			continue;
3328		}
3329		spin_unlock_irqrestore(&zone->lru_lock, flags);
3330
3331		page = lookup_cgroup_page(pc);
3332
3333		ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
3334		if (ret == -ENOMEM)
3335			break;
3336
3337		if (ret == -EBUSY || ret == -EINVAL) {
3338			/* found lock contention or "pc" is obsolete. */
3339			busy = pc;
3340			cond_resched();
3341		} else
3342			busy = NULL;
3343	}
3344
3345	if (!ret && !list_empty(list))
3346		return -EBUSY;
3347	return ret;
3348}
3349
3350/*
3351 * make mem_cgroup's charge to be 0 if there is no task.
3352 * This enables deleting this mem_cgroup.
3353 */
3354static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
3355{
3356	int ret;
3357	int node, zid, shrink;
3358	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3359	struct cgroup *cgrp = mem->css.cgroup;
3360
3361	css_get(&mem->css);
3362
3363	shrink = 0;
3364	/* should free all ? */
3365	if (free_all)
3366		goto try_to_free;
3367move_account:
3368	do {
3369		ret = -EBUSY;
3370		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3371			goto out;
3372		ret = -EINTR;
3373		if (signal_pending(current))
3374			goto out;
3375		/* This is for making all *used* pages to be on LRU. */
3376		lru_add_drain_all();
3377		drain_all_stock_sync();
3378		ret = 0;
3379		mem_cgroup_start_move(mem);
3380		for_each_node_state(node, N_HIGH_MEMORY) {
3381			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3382				enum lru_list l;
3383				for_each_lru(l) {
3384					ret = mem_cgroup_force_empty_list(mem,
3385							node, zid, l);
3386					if (ret)
3387						break;
3388				}
3389			}
3390			if (ret)
3391				break;
3392		}
3393		mem_cgroup_end_move(mem);
3394		memcg_oom_recover(mem);
3395		/* it seems parent cgroup doesn't have enough mem */
3396		if (ret == -ENOMEM)
3397			goto try_to_free;
3398		cond_resched();
3399	/* "ret" should also be checked to ensure all lists are empty. */
3400	} while (mem->res.usage > 0 || ret);
3401out:
3402	css_put(&mem->css);
3403	return ret;
3404
3405try_to_free:
3406	/* returns EBUSY if there is a task or if we come here twice. */
3407	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3408		ret = -EBUSY;
3409		goto out;
3410	}
3411	/* we call try-to-free pages for make this cgroup empty */
3412	lru_add_drain_all();
3413	/* try to free all pages in this cgroup */
3414	shrink = 1;
3415	while (nr_retries && mem->res.usage > 0) {
3416		int progress;
3417
3418		if (signal_pending(current)) {
3419			ret = -EINTR;
3420			goto out;
3421		}
3422		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3423						false, get_swappiness(mem));
3424		if (!progress) {
3425			nr_retries--;
3426			/* maybe some writeback is necessary */
3427			congestion_wait(BLK_RW_ASYNC, HZ/10);
3428		}
3429
3430	}
3431	lru_add_drain();
3432	/* try move_account...there may be some *locked* pages. */
3433	goto move_account;
3434}
3435
3436int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3437{
3438	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3439}
3440
3441
3442static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3443{
3444	return mem_cgroup_from_cont(cont)->use_hierarchy;
3445}
3446
3447static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3448					u64 val)
3449{
3450	int retval = 0;
3451	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3452	struct cgroup *parent = cont->parent;
3453	struct mem_cgroup *parent_mem = NULL;
3454
3455	if (parent)
3456		parent_mem = mem_cgroup_from_cont(parent);
3457
3458	cgroup_lock();
3459	/*
3460	 * If parent's use_hierarchy is set, we can't make any modifications
3461	 * in the child subtrees. If it is unset, then the change can
3462	 * occur, provided the current cgroup has no children.
3463	 *
3464	 * For the root cgroup, parent_mem is NULL, we allow value to be
3465	 * set if there are no children.
3466	 */
3467	if ((!parent_mem || !parent_mem->use_hierarchy) &&
3468				(val == 1 || val == 0)) {
3469		if (list_empty(&cont->children))
3470			mem->use_hierarchy = val;
3471		else
3472			retval = -EBUSY;
3473	} else
3474		retval = -EINVAL;
3475	cgroup_unlock();
3476
3477	return retval;
3478}
3479
3480
3481static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
3482				enum mem_cgroup_stat_index idx)
3483{
3484	struct mem_cgroup *iter;
3485	s64 val = 0;
3486
3487	/* each per cpu's value can be minus.Then, use s64 */
3488	for_each_mem_cgroup_tree(iter, mem)
3489		val += mem_cgroup_read_stat(iter, idx);
3490
3491	if (val < 0) /* race ? */
3492		val = 0;
3493	return val;
3494}
3495
3496static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3497{
3498	u64 val;
3499
3500	if (!mem_cgroup_is_root(mem)) {
3501		if (!swap)
3502			return res_counter_read_u64(&mem->res, RES_USAGE);
3503		else
3504			return res_counter_read_u64(&mem->memsw, RES_USAGE);
3505	}
3506
3507	val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
3508	val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
3509
3510	if (swap)
3511		val += mem_cgroup_get_recursive_idx_stat(mem,
3512				MEM_CGROUP_STAT_SWAPOUT);
3513
3514	return val << PAGE_SHIFT;
3515}
3516
3517static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3518{
3519	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3520	u64 val;
3521	int type, name;
3522
3523	type = MEMFILE_TYPE(cft->private);
3524	name = MEMFILE_ATTR(cft->private);
3525	switch (type) {
3526	case _MEM:
3527		if (name == RES_USAGE)
3528			val = mem_cgroup_usage(mem, false);
3529		else
3530			val = res_counter_read_u64(&mem->res, name);
3531		break;
3532	case _MEMSWAP:
3533		if (name == RES_USAGE)
3534			val = mem_cgroup_usage(mem, true);
3535		else
3536			val = res_counter_read_u64(&mem->memsw, name);
3537		break;
3538	default:
3539		BUG();
3540		break;
3541	}
3542	return val;
3543}
3544/*
3545 * The user of this function is...
3546 * RES_LIMIT.
3547 */
3548static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3549			    const char *buffer)
3550{
3551	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3552	int type, name;
3553	unsigned long long val;
3554	int ret;
3555
3556	type = MEMFILE_TYPE(cft->private);
3557	name = MEMFILE_ATTR(cft->private);
3558	switch (name) {
3559	case RES_LIMIT:
3560		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3561			ret = -EINVAL;
3562			break;
3563		}
3564		/* This function does all necessary parse...reuse it */
3565		ret = res_counter_memparse_write_strategy(buffer, &val);
3566		if (ret)
3567			break;
3568		if (type == _MEM)
3569			ret = mem_cgroup_resize_limit(memcg, val);
3570		else
3571			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3572		break;
3573	case RES_SOFT_LIMIT:
3574		ret = res_counter_memparse_write_strategy(buffer, &val);
3575		if (ret)
3576			break;
3577		/*
3578		 * For memsw, soft limits are hard to implement in terms
3579		 * of semantics, for now, we support soft limits for
3580		 * control without swap
3581		 */
3582		if (type == _MEM)
3583			ret = res_counter_set_soft_limit(&memcg->res, val);
3584		else
3585			ret = -EINVAL;
3586		break;
3587	default:
3588		ret = -EINVAL; /* should be BUG() ? */
3589		break;
3590	}
3591	return ret;
3592}
3593
3594static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3595		unsigned long long *mem_limit, unsigned long long *memsw_limit)
3596{
3597	struct cgroup *cgroup;
3598	unsigned long long min_limit, min_memsw_limit, tmp;
3599
3600	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3601	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3602	cgroup = memcg->css.cgroup;
3603	if (!memcg->use_hierarchy)
3604		goto out;
3605
3606	while (cgroup->parent) {
3607		cgroup = cgroup->parent;
3608		memcg = mem_cgroup_from_cont(cgroup);
3609		if (!memcg->use_hierarchy)
3610			break;
3611		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3612		min_limit = min(min_limit, tmp);
3613		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3614		min_memsw_limit = min(min_memsw_limit, tmp);
3615	}
3616out:
3617	*mem_limit = min_limit;
3618	*memsw_limit = min_memsw_limit;
3619	return;
3620}
3621
3622static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3623{
3624	struct mem_cgroup *mem;
3625	int type, name;
3626
3627	mem = mem_cgroup_from_cont(cont);
3628	type = MEMFILE_TYPE(event);
3629	name = MEMFILE_ATTR(event);
3630	switch (name) {
3631	case RES_MAX_USAGE:
3632		if (type == _MEM)
3633			res_counter_reset_max(&mem->res);
3634		else
3635			res_counter_reset_max(&mem->memsw);
3636		break;
3637	case RES_FAILCNT:
3638		if (type == _MEM)
3639			res_counter_reset_failcnt(&mem->res);
3640		else
3641			res_counter_reset_failcnt(&mem->memsw);
3642		break;
3643	}
3644
3645	return 0;
3646}
3647
3648static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3649					struct cftype *cft)
3650{
3651	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3652}
3653
3654#ifdef CONFIG_MMU
3655static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3656					struct cftype *cft, u64 val)
3657{
3658	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3659
3660	if (val >= (1 << NR_MOVE_TYPE))
3661		return -EINVAL;
3662	/*
3663	 * We check this value several times in both in can_attach() and
3664	 * attach(), so we need cgroup lock to prevent this value from being
3665	 * inconsistent.
3666	 */
3667	cgroup_lock();
3668	mem->move_charge_at_immigrate = val;
3669	cgroup_unlock();
3670
3671	return 0;
3672}
3673#else
3674static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3675					struct cftype *cft, u64 val)
3676{
3677	return -ENOSYS;
3678}
3679#endif
3680
3681
3682/* For read statistics */
3683enum {
3684	MCS_CACHE,
3685	MCS_RSS,
3686	MCS_FILE_MAPPED,
3687	MCS_PGPGIN,
3688	MCS_PGPGOUT,
3689	MCS_SWAP,
3690	MCS_INACTIVE_ANON,
3691	MCS_ACTIVE_ANON,
3692	MCS_INACTIVE_FILE,
3693	MCS_ACTIVE_FILE,
3694	MCS_UNEVICTABLE,
3695	NR_MCS_STAT,
3696};
3697
3698struct mcs_total_stat {
3699	s64 stat[NR_MCS_STAT];
3700};
3701
3702struct {
3703	char *local_name;
3704	char *total_name;
3705} memcg_stat_strings[NR_MCS_STAT] = {
3706	{"cache", "total_cache"},
3707	{"rss", "total_rss"},
3708	{"mapped_file", "total_mapped_file"},
3709	{"pgpgin", "total_pgpgin"},
3710	{"pgpgout", "total_pgpgout"},
3711	{"swap", "total_swap"},
3712	{"inactive_anon", "total_inactive_anon"},
3713	{"active_anon", "total_active_anon"},
3714	{"inactive_file", "total_inactive_file"},
3715	{"active_file", "total_active_file"},
3716	{"unevictable", "total_unevictable"}
3717};
3718
3719
3720static void
3721mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3722{
3723	s64 val;
3724
3725	/* per cpu stat */
3726	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3727	s->stat[MCS_CACHE] += val * PAGE_SIZE;
3728	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3729	s->stat[MCS_RSS] += val * PAGE_SIZE;
3730	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3731	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3732	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
3733	s->stat[MCS_PGPGIN] += val;
3734	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
3735	s->stat[MCS_PGPGOUT] += val;
3736	if (do_swap_account) {
3737		val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3738		s->stat[MCS_SWAP] += val * PAGE_SIZE;
3739	}
3740
3741	/* per zone stat */
3742	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3743	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3744	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3745	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3746	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3747	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3748	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3749	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3750	val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3751	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3752}
3753
3754static void
3755mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3756{
3757	struct mem_cgroup *iter;
3758
3759	for_each_mem_cgroup_tree(iter, mem)
3760		mem_cgroup_get_local_stat(iter, s);
3761}
3762
3763static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3764				 struct cgroup_map_cb *cb)
3765{
3766	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3767	struct mcs_total_stat mystat;
3768	int i;
3769
3770	memset(&mystat, 0, sizeof(mystat));
3771	mem_cgroup_get_local_stat(mem_cont, &mystat);
3772
3773	for (i = 0; i < NR_MCS_STAT; i++) {
3774		if (i == MCS_SWAP && !do_swap_account)
3775			continue;
3776		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3777	}
3778
3779	/* Hierarchical information */
3780	{
3781		unsigned long long limit, memsw_limit;
3782		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3783		cb->fill(cb, "hierarchical_memory_limit", limit);
3784		if (do_swap_account)
3785			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3786	}
3787
3788	memset(&mystat, 0, sizeof(mystat));
3789	mem_cgroup_get_total_stat(mem_cont, &mystat);
3790	for (i = 0; i < NR_MCS_STAT; i++) {
3791		if (i == MCS_SWAP && !do_swap_account)
3792			continue;
3793		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3794	}
3795
3796#ifdef CONFIG_DEBUG_VM
3797	cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3798
3799	{
3800		int nid, zid;
3801		struct mem_cgroup_per_zone *mz;
3802		unsigned long recent_rotated[2] = {0, 0};
3803		unsigned long recent_scanned[2] = {0, 0};
3804
3805		for_each_online_node(nid)
3806			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3807				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3808
3809				recent_rotated[0] +=
3810					mz->reclaim_stat.recent_rotated[0];
3811				recent_rotated[1] +=
3812					mz->reclaim_stat.recent_rotated[1];
3813				recent_scanned[0] +=
3814					mz->reclaim_stat.recent_scanned[0];
3815				recent_scanned[1] +=
3816					mz->reclaim_stat.recent_scanned[1];
3817			}
3818		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3819		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3820		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3821		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3822	}
3823#endif
3824
3825	return 0;
3826}
3827
3828static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3829{
3830	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3831
3832	return get_swappiness(memcg);
3833}
3834
3835static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3836				       u64 val)
3837{
3838	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3839	struct mem_cgroup *parent;
3840
3841	if (val > 100)
3842		return -EINVAL;
3843
3844	if (cgrp->parent == NULL)
3845		return -EINVAL;
3846
3847	parent = mem_cgroup_from_cont(cgrp->parent);
3848
3849	cgroup_lock();
3850
3851	/* If under hierarchy, only empty-root can set this value */
3852	if ((parent->use_hierarchy) ||
3853	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3854		cgroup_unlock();
3855		return -EINVAL;
3856	}
3857
3858	memcg->swappiness = val;
3859
3860	cgroup_unlock();
3861
3862	return 0;
3863}
3864
3865static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3866{
3867	struct mem_cgroup_threshold_ary *t;
3868	u64 usage;
3869	int i;
3870
3871	rcu_read_lock();
3872	if (!swap)
3873		t = rcu_dereference(memcg->thresholds.primary);
3874	else
3875		t = rcu_dereference(memcg->memsw_thresholds.primary);
3876
3877	if (!t)
3878		goto unlock;
3879
3880	usage = mem_cgroup_usage(memcg, swap);
3881
3882	/*
3883	 * current_threshold points to threshold just below usage.
3884	 * If it's not true, a threshold was crossed after last
3885	 * call of __mem_cgroup_threshold().
3886	 */
3887	i = t->current_threshold;
3888
3889	/*
3890	 * Iterate backward over array of thresholds starting from
3891	 * current_threshold and check if a threshold is crossed.
3892	 * If none of thresholds below usage is crossed, we read
3893	 * only one element of the array here.
3894	 */
3895	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3896		eventfd_signal(t->entries[i].eventfd, 1);
3897
3898	/* i = current_threshold + 1 */
3899	i++;
3900
3901	/*
3902	 * Iterate forward over array of thresholds starting from
3903	 * current_threshold+1 and check if a threshold is crossed.
3904	 * If none of thresholds above usage is crossed, we read
3905	 * only one element of the array here.
3906	 */
3907	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3908		eventfd_signal(t->entries[i].eventfd, 1);
3909
3910	/* Update current_threshold */
3911	t->current_threshold = i - 1;
3912unlock:
3913	rcu_read_unlock();
3914}
3915
3916static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3917{
3918	while (memcg) {
3919		__mem_cgroup_threshold(memcg, false);
3920		if (do_swap_account)
3921			__mem_cgroup_threshold(memcg, true);
3922
3923		memcg = parent_mem_cgroup(memcg);
3924	}
3925}
3926
3927static int compare_thresholds(const void *a, const void *b)
3928{
3929	const struct mem_cgroup_threshold *_a = a;
3930	const struct mem_cgroup_threshold *_b = b;
3931
3932	return _a->threshold - _b->threshold;
3933}
3934
3935static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
3936{
3937	struct mem_cgroup_eventfd_list *ev;
3938
3939	list_for_each_entry(ev, &mem->oom_notify, list)
3940		eventfd_signal(ev->eventfd, 1);
3941	return 0;
3942}
3943
3944static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3945{
3946	struct mem_cgroup *iter;
3947
3948	for_each_mem_cgroup_tree(iter, mem)
3949		mem_cgroup_oom_notify_cb(iter);
3950}
3951
3952static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3953	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3954{
3955	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3956	struct mem_cgroup_thresholds *thresholds;
3957	struct mem_cgroup_threshold_ary *new;
3958	int type = MEMFILE_TYPE(cft->private);
3959	u64 threshold, usage;
3960	int i, size, ret;
3961
3962	ret = res_counter_memparse_write_strategy(args, &threshold);
3963	if (ret)
3964		return ret;
3965
3966	mutex_lock(&memcg->thresholds_lock);
3967
3968	if (type == _MEM)
3969		thresholds = &memcg->thresholds;
3970	else if (type == _MEMSWAP)
3971		thresholds = &memcg->memsw_thresholds;
3972	else
3973		BUG();
3974
3975	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3976
3977	/* Check if a threshold crossed before adding a new one */
3978	if (thresholds->primary)
3979		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3980
3981	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3982
3983	/* Allocate memory for new array of thresholds */
3984	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3985			GFP_KERNEL);
3986	if (!new) {
3987		ret = -ENOMEM;
3988		goto unlock;
3989	}
3990	new->size = size;
3991
3992	/* Copy thresholds (if any) to new array */
3993	if (thresholds->primary) {
3994		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3995				sizeof(struct mem_cgroup_threshold));
3996	}
3997
3998	/* Add new threshold */
3999	new->entries[size - 1].eventfd = eventfd;
4000	new->entries[size - 1].threshold = threshold;
4001
4002	/* Sort thresholds. Registering of new threshold isn't time-critical */
4003	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4004			compare_thresholds, NULL);
4005
4006	/* Find current threshold */
4007	new->current_threshold = -1;
4008	for (i = 0; i < size; i++) {
4009		if (new->entries[i].threshold < usage) {
4010			/*
4011			 * new->current_threshold will not be used until
4012			 * rcu_assign_pointer(), so it's safe to increment
4013			 * it here.
4014			 */
4015			++new->current_threshold;
4016		}
4017	}
4018
4019	/* Free old spare buffer and save old primary buffer as spare */
4020	kfree(thresholds->spare);
4021	thresholds->spare = thresholds->primary;
4022
4023	rcu_assign_pointer(thresholds->primary, new);
4024
4025	/* To be sure that nobody uses thresholds */
4026	synchronize_rcu();
4027
4028unlock:
4029	mutex_unlock(&memcg->thresholds_lock);
4030
4031	return ret;
4032}
4033
4034static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4035	struct cftype *cft, struct eventfd_ctx *eventfd)
4036{
4037	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4038	struct mem_cgroup_thresholds *thresholds;
4039	struct mem_cgroup_threshold_ary *new;
4040	int type = MEMFILE_TYPE(cft->private);
4041	u64 usage;
4042	int i, j, size;
4043
4044	mutex_lock(&memcg->thresholds_lock);
4045	if (type == _MEM)
4046		thresholds = &memcg->thresholds;
4047	else if (type == _MEMSWAP)
4048		thresholds = &memcg->memsw_thresholds;
4049	else
4050		BUG();
4051
4052	/*
4053	 * Something went wrong if we trying to unregister a threshold
4054	 * if we don't have thresholds
4055	 */
4056	BUG_ON(!thresholds);
4057
4058	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4059
4060	/* Check if a threshold crossed before removing */
4061	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4062
4063	/* Calculate new number of threshold */
4064	size = 0;
4065	for (i = 0; i < thresholds->primary->size; i++) {
4066		if (thresholds->primary->entries[i].eventfd != eventfd)
4067			size++;
4068	}
4069
4070	new = thresholds->spare;
4071
4072	/* Set thresholds array to NULL if we don't have thresholds */
4073	if (!size) {
4074		kfree(new);
4075		new = NULL;
4076		goto swap_buffers;
4077	}
4078
4079	new->size = size;
4080
4081	/* Copy thresholds and find current threshold */
4082	new->current_threshold = -1;
4083	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4084		if (thresholds->primary->entries[i].eventfd == eventfd)
4085			continue;
4086
4087		new->entries[j] = thresholds->primary->entries[i];
4088		if (new->entries[j].threshold < usage) {
4089			/*
4090			 * new->current_threshold will not be used
4091			 * until rcu_assign_pointer(), so it's safe to increment
4092			 * it here.
4093			 */
4094			++new->current_threshold;
4095		}
4096		j++;
4097	}
4098
4099swap_buffers:
4100	/* Swap primary and spare array */
4101	thresholds->spare = thresholds->primary;
4102	rcu_assign_pointer(thresholds->primary, new);
4103
4104	/* To be sure that nobody uses thresholds */
4105	synchronize_rcu();
4106
4107	mutex_unlock(&memcg->thresholds_lock);
4108}
4109
4110static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4111	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4112{
4113	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4114	struct mem_cgroup_eventfd_list *event;
4115	int type = MEMFILE_TYPE(cft->private);
4116
4117	BUG_ON(type != _OOM_TYPE);
4118	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4119	if (!event)
4120		return -ENOMEM;
4121
4122	mutex_lock(&memcg_oom_mutex);
4123
4124	event->eventfd = eventfd;
4125	list_add(&event->list, &memcg->oom_notify);
4126
4127	/* already in OOM ? */
4128	if (atomic_read(&memcg->oom_lock))
4129		eventfd_signal(eventfd, 1);
4130	mutex_unlock(&memcg_oom_mutex);
4131
4132	return 0;
4133}
4134
4135static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4136	struct cftype *cft, struct eventfd_ctx *eventfd)
4137{
4138	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4139	struct mem_cgroup_eventfd_list *ev, *tmp;
4140	int type = MEMFILE_TYPE(cft->private);
4141
4142	BUG_ON(type != _OOM_TYPE);
4143
4144	mutex_lock(&memcg_oom_mutex);
4145
4146	list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
4147		if (ev->eventfd == eventfd) {
4148			list_del(&ev->list);
4149			kfree(ev);
4150		}
4151	}
4152
4153	mutex_unlock(&memcg_oom_mutex);
4154}
4155
4156static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4157	struct cftype *cft,  struct cgroup_map_cb *cb)
4158{
4159	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4160
4161	cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
4162
4163	if (atomic_read(&mem->oom_lock))
4164		cb->fill(cb, "under_oom", 1);
4165	else
4166		cb->fill(cb, "under_oom", 0);
4167	return 0;
4168}
4169
4170static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4171	struct cftype *cft, u64 val)
4172{
4173	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4174	struct mem_cgroup *parent;
4175
4176	/* cannot set to root cgroup and only 0 and 1 are allowed */
4177	if (!cgrp->parent || !((val == 0) || (val == 1)))
4178		return -EINVAL;
4179
4180	parent = mem_cgroup_from_cont(cgrp->parent);
4181
4182	cgroup_lock();
4183	/* oom-kill-disable is a flag for subhierarchy. */
4184	if ((parent->use_hierarchy) ||
4185	    (mem->use_hierarchy && !list_empty(&cgrp->children))) {
4186		cgroup_unlock();
4187		return -EINVAL;
4188	}
4189	mem->oom_kill_disable = val;
4190	if (!val)
4191		memcg_oom_recover(mem);
4192	cgroup_unlock();
4193	return 0;
4194}
4195
4196static struct cftype mem_cgroup_files[] = {
4197	{
4198		.name = "usage_in_bytes",
4199		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4200		.read_u64 = mem_cgroup_read,
4201		.register_event = mem_cgroup_usage_register_event,
4202		.unregister_event = mem_cgroup_usage_unregister_event,
4203	},
4204	{
4205		.name = "max_usage_in_bytes",
4206		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4207		.trigger = mem_cgroup_reset,
4208		.read_u64 = mem_cgroup_read,
4209	},
4210	{
4211		.name = "limit_in_bytes",
4212		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4213		.write_string = mem_cgroup_write,
4214		.read_u64 = mem_cgroup_read,
4215	},
4216	{
4217		.name = "soft_limit_in_bytes",
4218		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4219		.write_string = mem_cgroup_write,
4220		.read_u64 = mem_cgroup_read,
4221	},
4222	{
4223		.name = "failcnt",
4224		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4225		.trigger = mem_cgroup_reset,
4226		.read_u64 = mem_cgroup_read,
4227	},
4228	{
4229		.name = "stat",
4230		.read_map = mem_control_stat_show,
4231	},
4232	{
4233		.name = "force_empty",
4234		.trigger = mem_cgroup_force_empty_write,
4235	},
4236	{
4237		.name = "use_hierarchy",
4238		.write_u64 = mem_cgroup_hierarchy_write,
4239		.read_u64 = mem_cgroup_hierarchy_read,
4240	},
4241	{
4242		.name = "swappiness",
4243		.read_u64 = mem_cgroup_swappiness_read,
4244		.write_u64 = mem_cgroup_swappiness_write,
4245	},
4246	{
4247		.name = "move_charge_at_immigrate",
4248		.read_u64 = mem_cgroup_move_charge_read,
4249		.write_u64 = mem_cgroup_move_charge_write,
4250	},
4251	{
4252		.name = "oom_control",
4253		.read_map = mem_cgroup_oom_control_read,
4254		.write_u64 = mem_cgroup_oom_control_write,
4255		.register_event = mem_cgroup_oom_register_event,
4256		.unregister_event = mem_cgroup_oom_unregister_event,
4257		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4258	},
4259};
4260
4261#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4262static struct cftype memsw_cgroup_files[] = {
4263	{
4264		.name = "memsw.usage_in_bytes",
4265		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4266		.read_u64 = mem_cgroup_read,
4267		.register_event = mem_cgroup_usage_register_event,
4268		.unregister_event = mem_cgroup_usage_unregister_event,
4269	},
4270	{
4271		.name = "memsw.max_usage_in_bytes",
4272		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4273		.trigger = mem_cgroup_reset,
4274		.read_u64 = mem_cgroup_read,
4275	},
4276	{
4277		.name = "memsw.limit_in_bytes",
4278		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4279		.write_string = mem_cgroup_write,
4280		.read_u64 = mem_cgroup_read,
4281	},
4282	{
4283		.name = "memsw.failcnt",
4284		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4285		.trigger = mem_cgroup_reset,
4286		.read_u64 = mem_cgroup_read,
4287	},
4288};
4289
4290static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4291{
4292	if (!do_swap_account)
4293		return 0;
4294	return cgroup_add_files(cont, ss, memsw_cgroup_files,
4295				ARRAY_SIZE(memsw_cgroup_files));
4296};
4297#else
4298static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4299{
4300	return 0;
4301}
4302#endif
4303
4304static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4305{
4306	struct mem_cgroup_per_node *pn;
4307	struct mem_cgroup_per_zone *mz;
4308	enum lru_list l;
4309	int zone, tmp = node;
4310	/*
4311	 * This routine is called against possible nodes.
4312	 * But it's BUG to call kmalloc() against offline node.
4313	 *
4314	 * TODO: this routine can waste much memory for nodes which will
4315	 *       never be onlined. It's better to use memory hotplug callback
4316	 *       function.
4317	 */
4318	if (!node_state(node, N_NORMAL_MEMORY))
4319		tmp = -1;
4320	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4321	if (!pn)
4322		return 1;
4323
4324	mem->info.nodeinfo[node] = pn;
4325	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4326		mz = &pn->zoneinfo[zone];
4327		for_each_lru(l)
4328			INIT_LIST_HEAD(&mz->lists[l]);
4329		mz->usage_in_excess = 0;
4330		mz->on_tree = false;
4331		mz->mem = mem;
4332	}
4333	return 0;
4334}
4335
4336static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4337{
4338	kfree(mem->info.nodeinfo[node]);
4339}
4340
4341static struct mem_cgroup *mem_cgroup_alloc(void)
4342{
4343	struct mem_cgroup *mem;
4344	int size = sizeof(struct mem_cgroup);
4345
4346	/* Can be very big if MAX_NUMNODES is very big */
4347	if (size < PAGE_SIZE)
4348		mem = kzalloc(size, GFP_KERNEL);
4349	else
4350		mem = vzalloc(size);
4351
4352	if (!mem)
4353		return NULL;
4354
4355	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4356	if (!mem->stat)
4357		goto out_free;
4358	spin_lock_init(&mem->pcp_counter_lock);
4359	return mem;
4360
4361out_free:
4362	if (size < PAGE_SIZE)
4363		kfree(mem);
4364	else
4365		vfree(mem);
4366	return NULL;
4367}
4368
4369/*
4370 * At destroying mem_cgroup, references from swap_cgroup can remain.
4371 * (scanning all at force_empty is too costly...)
4372 *
4373 * Instead of clearing all references at force_empty, we remember
4374 * the number of reference from swap_cgroup and free mem_cgroup when
4375 * it goes down to 0.
4376 *
4377 * Removal of cgroup itself succeeds regardless of refs from swap.
4378 */
4379
4380static void __mem_cgroup_free(struct mem_cgroup *mem)
4381{
4382	int node;
4383
4384	mem_cgroup_remove_from_trees(mem);
4385	free_css_id(&mem_cgroup_subsys, &mem->css);
4386
4387	for_each_node_state(node, N_POSSIBLE)
4388		free_mem_cgroup_per_zone_info(mem, node);
4389
4390	free_percpu(mem->stat);
4391	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4392		kfree(mem);
4393	else
4394		vfree(mem);
4395}
4396
4397static void mem_cgroup_get(struct mem_cgroup *mem)
4398{
4399	atomic_inc(&mem->refcnt);
4400}
4401
4402static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4403{
4404	if (atomic_sub_and_test(count, &mem->refcnt)) {
4405		struct mem_cgroup *parent = parent_mem_cgroup(mem);
4406		__mem_cgroup_free(mem);
4407		if (parent)
4408			mem_cgroup_put(parent);
4409	}
4410}
4411
4412static void mem_cgroup_put(struct mem_cgroup *mem)
4413{
4414	__mem_cgroup_put(mem, 1);
4415}
4416
4417/*
4418 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4419 */
4420static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4421{
4422	if (!mem->res.parent)
4423		return NULL;
4424	return mem_cgroup_from_res_counter(mem->res.parent, res);
4425}
4426
4427#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4428static void __init enable_swap_cgroup(void)
4429{
4430	if (!mem_cgroup_disabled() && really_do_swap_account)
4431		do_swap_account = 1;
4432}
4433#else
4434static void __init enable_swap_cgroup(void)
4435{
4436}
4437#endif
4438
4439static int mem_cgroup_soft_limit_tree_init(void)
4440{
4441	struct mem_cgroup_tree_per_node *rtpn;
4442	struct mem_cgroup_tree_per_zone *rtpz;
4443	int tmp, node, zone;
4444
4445	for_each_node_state(node, N_POSSIBLE) {
4446		tmp = node;
4447		if (!node_state(node, N_NORMAL_MEMORY))
4448			tmp = -1;
4449		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4450		if (!rtpn)
4451			return 1;
4452
4453		soft_limit_tree.rb_tree_per_node[node] = rtpn;
4454
4455		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4456			rtpz = &rtpn->rb_tree_per_zone[zone];
4457			rtpz->rb_root = RB_ROOT;
4458			spin_lock_init(&rtpz->lock);
4459		}
4460	}
4461	return 0;
4462}
4463
4464static struct cgroup_subsys_state * __ref
4465mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4466{
4467	struct mem_cgroup *mem, *parent;
4468	long error = -ENOMEM;
4469	int node;
4470
4471	mem = mem_cgroup_alloc();
4472	if (!mem)
4473		return ERR_PTR(error);
4474
4475	for_each_node_state(node, N_POSSIBLE)
4476		if (alloc_mem_cgroup_per_zone_info(mem, node))
4477			goto free_out;
4478
4479	/* root ? */
4480	if (cont->parent == NULL) {
4481		int cpu;
4482		enable_swap_cgroup();
4483		parent = NULL;
4484		root_mem_cgroup = mem;
4485		if (mem_cgroup_soft_limit_tree_init())
4486			goto free_out;
4487		for_each_possible_cpu(cpu) {
4488			struct memcg_stock_pcp *stock =
4489						&per_cpu(memcg_stock, cpu);
4490			INIT_WORK(&stock->work, drain_local_stock);
4491		}
4492		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4493	} else {
4494		parent = mem_cgroup_from_cont(cont->parent);
4495		mem->use_hierarchy = parent->use_hierarchy;
4496		mem->oom_kill_disable = parent->oom_kill_disable;
4497	}
4498
4499	if (parent && parent->use_hierarchy) {
4500		res_counter_init(&mem->res, &parent->res);
4501		res_counter_init(&mem->memsw, &parent->memsw);
4502		/*
4503		 * We increment refcnt of the parent to ensure that we can
4504		 * safely access it on res_counter_charge/uncharge.
4505		 * This refcnt will be decremented when freeing this
4506		 * mem_cgroup(see mem_cgroup_put).
4507		 */
4508		mem_cgroup_get(parent);
4509	} else {
4510		res_counter_init(&mem->res, NULL);
4511		res_counter_init(&mem->memsw, NULL);
4512	}
4513	mem->last_scanned_child = 0;
4514	INIT_LIST_HEAD(&mem->oom_notify);
4515
4516	if (parent)
4517		mem->swappiness = get_swappiness(parent);
4518	atomic_set(&mem->refcnt, 1);
4519	mem->move_charge_at_immigrate = 0;
4520	mutex_init(&mem->thresholds_lock);
4521	return &mem->css;
4522free_out:
4523	__mem_cgroup_free(mem);
4524	root_mem_cgroup = NULL;
4525	return ERR_PTR(error);
4526}
4527
4528static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4529					struct cgroup *cont)
4530{
4531	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4532
4533	return mem_cgroup_force_empty(mem, false);
4534}
4535
4536static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4537				struct cgroup *cont)
4538{
4539	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4540
4541	mem_cgroup_put(mem);
4542}
4543
4544static int mem_cgroup_populate(struct cgroup_subsys *ss,
4545				struct cgroup *cont)
4546{
4547	int ret;
4548
4549	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4550				ARRAY_SIZE(mem_cgroup_files));
4551
4552	if (!ret)
4553		ret = register_memsw_files(cont, ss);
4554	return ret;
4555}
4556
4557#ifdef CONFIG_MMU
4558/* Handlers for move charge at task migration. */
4559#define PRECHARGE_COUNT_AT_ONCE	256
4560static int mem_cgroup_do_precharge(unsigned long count)
4561{
4562	int ret = 0;
4563	int batch_count = PRECHARGE_COUNT_AT_ONCE;
4564	struct mem_cgroup *mem = mc.to;
4565
4566	if (mem_cgroup_is_root(mem)) {
4567		mc.precharge += count;
4568		/* we don't need css_get for root */
4569		return ret;
4570	}
4571	/* try to charge at once */
4572	if (count > 1) {
4573		struct res_counter *dummy;
4574		/*
4575		 * "mem" cannot be under rmdir() because we've already checked
4576		 * by cgroup_lock_live_cgroup() that it is not removed and we
4577		 * are still under the same cgroup_mutex. So we can postpone
4578		 * css_get().
4579		 */
4580		if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4581			goto one_by_one;
4582		if (do_swap_account && res_counter_charge(&mem->memsw,
4583						PAGE_SIZE * count, &dummy)) {
4584			res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4585			goto one_by_one;
4586		}
4587		mc.precharge += count;
4588		return ret;
4589	}
4590one_by_one:
4591	/* fall back to one by one charge */
4592	while (count--) {
4593		if (signal_pending(current)) {
4594			ret = -EINTR;
4595			break;
4596		}
4597		if (!batch_count--) {
4598			batch_count = PRECHARGE_COUNT_AT_ONCE;
4599			cond_resched();
4600		}
4601		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
4602					      PAGE_SIZE);
4603		if (ret || !mem)
4604			/* mem_cgroup_clear_mc() will do uncharge later */
4605			return -ENOMEM;
4606		mc.precharge++;
4607	}
4608	return ret;
4609}
4610
4611/**
4612 * is_target_pte_for_mc - check a pte whether it is valid for move charge
4613 * @vma: the vma the pte to be checked belongs
4614 * @addr: the address corresponding to the pte to be checked
4615 * @ptent: the pte to be checked
4616 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4617 *
4618 * Returns
4619 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4620 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4621 *     move charge. if @target is not NULL, the page is stored in target->page
4622 *     with extra refcnt got(Callers should handle it).
4623 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4624 *     target for charge migration. if @target is not NULL, the entry is stored
4625 *     in target->ent.
4626 *
4627 * Called with pte lock held.
4628 */
4629union mc_target {
4630	struct page	*page;
4631	swp_entry_t	ent;
4632};
4633
4634enum mc_target_type {
4635	MC_TARGET_NONE,	/* not used */
4636	MC_TARGET_PAGE,
4637	MC_TARGET_SWAP,
4638};
4639
4640static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4641						unsigned long addr, pte_t ptent)
4642{
4643	struct page *page = vm_normal_page(vma, addr, ptent);
4644
4645	if (!page || !page_mapped(page))
4646		return NULL;
4647	if (PageAnon(page)) {
4648		/* we don't move shared anon */
4649		if (!move_anon() || page_mapcount(page) > 2)
4650			return NULL;
4651	} else if (!move_file())
4652		/* we ignore mapcount for file pages */
4653		return NULL;
4654	if (!get_page_unless_zero(page))
4655		return NULL;
4656
4657	return page;
4658}
4659
4660static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4661			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4662{
4663	int usage_count;
4664	struct page *page = NULL;
4665	swp_entry_t ent = pte_to_swp_entry(ptent);
4666
4667	if (!move_anon() || non_swap_entry(ent))
4668		return NULL;
4669	usage_count = mem_cgroup_count_swap_user(ent, &page);
4670	if (usage_count > 1) { /* we don't move shared anon */
4671		if (page)
4672			put_page(page);
4673		return NULL;
4674	}
4675	if (do_swap_account)
4676		entry->val = ent.val;
4677
4678	return page;
4679}
4680
4681static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4682			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4683{
4684	struct page *page = NULL;
4685	struct inode *inode;
4686	struct address_space *mapping;
4687	pgoff_t pgoff;
4688
4689	if (!vma->vm_file) /* anonymous vma */
4690		return NULL;
4691	if (!move_file())
4692		return NULL;
4693
4694	inode = vma->vm_file->f_path.dentry->d_inode;
4695	mapping = vma->vm_file->f_mapping;
4696	if (pte_none(ptent))
4697		pgoff = linear_page_index(vma, addr);
4698	else /* pte_file(ptent) is true */
4699		pgoff = pte_to_pgoff(ptent);
4700
4701	/* page is moved even if it's not RSS of this task(page-faulted). */
4702	if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4703		page = find_get_page(mapping, pgoff);
4704	} else { /* shmem/tmpfs file. we should take account of swap too. */
4705		swp_entry_t ent;
4706		mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4707		if (do_swap_account)
4708			entry->val = ent.val;
4709	}
4710
4711	return page;
4712}
4713
4714static int is_target_pte_for_mc(struct vm_area_struct *vma,
4715		unsigned long addr, pte_t ptent, union mc_target *target)
4716{
4717	struct page *page = NULL;
4718	struct page_cgroup *pc;
4719	int ret = 0;
4720	swp_entry_t ent = { .val = 0 };
4721
4722	if (pte_present(ptent))
4723		page = mc_handle_present_pte(vma, addr, ptent);
4724	else if (is_swap_pte(ptent))
4725		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4726	else if (pte_none(ptent) || pte_file(ptent))
4727		page = mc_handle_file_pte(vma, addr, ptent, &ent);
4728
4729	if (!page && !ent.val)
4730		return 0;
4731	if (page) {
4732		pc = lookup_page_cgroup(page);
4733		/*
4734		 * Do only loose check w/o page_cgroup lock.
4735		 * mem_cgroup_move_account() checks the pc is valid or not under
4736		 * the lock.
4737		 */
4738		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4739			ret = MC_TARGET_PAGE;
4740			if (target)
4741				target->page = page;
4742		}
4743		if (!ret || !target)
4744			put_page(page);
4745	}
4746	/* There is a swap entry and a page doesn't exist or isn't charged */
4747	if (ent.val && !ret &&
4748			css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4749		ret = MC_TARGET_SWAP;
4750		if (target)
4751			target->ent = ent;
4752	}
4753	return ret;
4754}
4755
4756static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4757					unsigned long addr, unsigned long end,
4758					struct mm_walk *walk)
4759{
4760	struct vm_area_struct *vma = walk->private;
4761	pte_t *pte;
4762	spinlock_t *ptl;
4763
4764	split_huge_page_pmd(walk->mm, pmd);
4765
4766	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4767	for (; addr != end; pte++, addr += PAGE_SIZE)
4768		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4769			mc.precharge++;	/* increment precharge temporarily */
4770	pte_unmap_unlock(pte - 1, ptl);
4771	cond_resched();
4772
4773	return 0;
4774}
4775
4776static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4777{
4778	unsigned long precharge;
4779	struct vm_area_struct *vma;
4780
4781	down_read(&mm->mmap_sem);
4782	for (vma = mm->mmap; vma; vma = vma->vm_next) {
4783		struct mm_walk mem_cgroup_count_precharge_walk = {
4784			.pmd_entry = mem_cgroup_count_precharge_pte_range,
4785			.mm = mm,
4786			.private = vma,
4787		};
4788		if (is_vm_hugetlb_page(vma))
4789			continue;
4790		walk_page_range(vma->vm_start, vma->vm_end,
4791					&mem_cgroup_count_precharge_walk);
4792	}
4793	up_read(&mm->mmap_sem);
4794
4795	precharge = mc.precharge;
4796	mc.precharge = 0;
4797
4798	return precharge;
4799}
4800
4801static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4802{
4803	unsigned long precharge = mem_cgroup_count_precharge(mm);
4804
4805	VM_BUG_ON(mc.moving_task);
4806	mc.moving_task = current;
4807	return mem_cgroup_do_precharge(precharge);
4808}
4809
4810/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4811static void __mem_cgroup_clear_mc(void)
4812{
4813	struct mem_cgroup *from = mc.from;
4814	struct mem_cgroup *to = mc.to;
4815
4816	/* we must uncharge all the leftover precharges from mc.to */
4817	if (mc.precharge) {
4818		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
4819		mc.precharge = 0;
4820	}
4821	/*
4822	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4823	 * we must uncharge here.
4824	 */
4825	if (mc.moved_charge) {
4826		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4827		mc.moved_charge = 0;
4828	}
4829	/* we must fixup refcnts and charges */
4830	if (mc.moved_swap) {
4831		/* uncharge swap account from the old cgroup */
4832		if (!mem_cgroup_is_root(mc.from))
4833			res_counter_uncharge(&mc.from->memsw,
4834						PAGE_SIZE * mc.moved_swap);
4835		__mem_cgroup_put(mc.from, mc.moved_swap);
4836
4837		if (!mem_cgroup_is_root(mc.to)) {
4838			/*
4839			 * we charged both to->res and to->memsw, so we should
4840			 * uncharge to->res.
4841			 */
4842			res_counter_uncharge(&mc.to->res,
4843						PAGE_SIZE * mc.moved_swap);
4844		}
4845		/* we've already done mem_cgroup_get(mc.to) */
4846		mc.moved_swap = 0;
4847	}
4848	memcg_oom_recover(from);
4849	memcg_oom_recover(to);
4850	wake_up_all(&mc.waitq);
4851}
4852
4853static void mem_cgroup_clear_mc(void)
4854{
4855	struct mem_cgroup *from = mc.from;
4856
4857	/*
4858	 * we must clear moving_task before waking up waiters at the end of
4859	 * task migration.
4860	 */
4861	mc.moving_task = NULL;
4862	__mem_cgroup_clear_mc();
4863	spin_lock(&mc.lock);
4864	mc.from = NULL;
4865	mc.to = NULL;
4866	spin_unlock(&mc.lock);
4867	mem_cgroup_end_move(from);
4868}
4869
4870static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4871				struct cgroup *cgroup,
4872				struct task_struct *p,
4873				bool threadgroup)
4874{
4875	int ret = 0;
4876	struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4877
4878	if (mem->move_charge_at_immigrate) {
4879		struct mm_struct *mm;
4880		struct mem_cgroup *from = mem_cgroup_from_task(p);
4881
4882		VM_BUG_ON(from == mem);
4883
4884		mm = get_task_mm(p);
4885		if (!mm)
4886			return 0;
4887		/* We move charges only when we move a owner of the mm */
4888		if (mm->owner == p) {
4889			VM_BUG_ON(mc.from);
4890			VM_BUG_ON(mc.to);
4891			VM_BUG_ON(mc.precharge);
4892			VM_BUG_ON(mc.moved_charge);
4893			VM_BUG_ON(mc.moved_swap);
4894			mem_cgroup_start_move(from);
4895			spin_lock(&mc.lock);
4896			mc.from = from;
4897			mc.to = mem;
4898			spin_unlock(&mc.lock);
4899			/* We set mc.moving_task later */
4900
4901			ret = mem_cgroup_precharge_mc(mm);
4902			if (ret)
4903				mem_cgroup_clear_mc();
4904		}
4905		mmput(mm);
4906	}
4907	return ret;
4908}
4909
4910static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4911				struct cgroup *cgroup,
4912				struct task_struct *p,
4913				bool threadgroup)
4914{
4915	mem_cgroup_clear_mc();
4916}
4917
4918static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4919				unsigned long addr, unsigned long end,
4920				struct mm_walk *walk)
4921{
4922	int ret = 0;
4923	struct vm_area_struct *vma = walk->private;
4924	pte_t *pte;
4925	spinlock_t *ptl;
4926
4927	split_huge_page_pmd(walk->mm, pmd);
4928retry:
4929	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4930	for (; addr != end; addr += PAGE_SIZE) {
4931		pte_t ptent = *(pte++);
4932		union mc_target target;
4933		int type;
4934		struct page *page;
4935		struct page_cgroup *pc;
4936		swp_entry_t ent;
4937
4938		if (!mc.precharge)
4939			break;
4940
4941		type = is_target_pte_for_mc(vma, addr, ptent, &target);
4942		switch (type) {
4943		case MC_TARGET_PAGE:
4944			page = target.page;
4945			if (isolate_lru_page(page))
4946				goto put;
4947			pc = lookup_page_cgroup(page);
4948			if (!mem_cgroup_move_account(page, pc,
4949					mc.from, mc.to, false, PAGE_SIZE)) {
4950				mc.precharge--;
4951				/* we uncharge from mc.from later. */
4952				mc.moved_charge++;
4953			}
4954			putback_lru_page(page);
4955put:			/* is_target_pte_for_mc() gets the page */
4956			put_page(page);
4957			break;
4958		case MC_TARGET_SWAP:
4959			ent = target.ent;
4960			if (!mem_cgroup_move_swap_account(ent,
4961						mc.from, mc.to, false)) {
4962				mc.precharge--;
4963				/* we fixup refcnts and charges later. */
4964				mc.moved_swap++;
4965			}
4966			break;
4967		default:
4968			break;
4969		}
4970	}
4971	pte_unmap_unlock(pte - 1, ptl);
4972	cond_resched();
4973
4974	if (addr != end) {
4975		/*
4976		 * We have consumed all precharges we got in can_attach().
4977		 * We try charge one by one, but don't do any additional
4978		 * charges to mc.to if we have failed in charge once in attach()
4979		 * phase.
4980		 */
4981		ret = mem_cgroup_do_precharge(1);
4982		if (!ret)
4983			goto retry;
4984	}
4985
4986	return ret;
4987}
4988
4989static void mem_cgroup_move_charge(struct mm_struct *mm)
4990{
4991	struct vm_area_struct *vma;
4992
4993	lru_add_drain_all();
4994retry:
4995	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
4996		/*
4997		 * Someone who are holding the mmap_sem might be waiting in
4998		 * waitq. So we cancel all extra charges, wake up all waiters,
4999		 * and retry. Because we cancel precharges, we might not be able
5000		 * to move enough charges, but moving charge is a best-effort
5001		 * feature anyway, so it wouldn't be a big problem.
5002		 */
5003		__mem_cgroup_clear_mc();
5004		cond_resched();
5005		goto retry;
5006	}
5007	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5008		int ret;
5009		struct mm_walk mem_cgroup_move_charge_walk = {
5010			.pmd_entry = mem_cgroup_move_charge_pte_range,
5011			.mm = mm,
5012			.private = vma,
5013		};
5014		if (is_vm_hugetlb_page(vma))
5015			continue;
5016		ret = walk_page_range(vma->vm_start, vma->vm_end,
5017						&mem_cgroup_move_charge_walk);
5018		if (ret)
5019			/*
5020			 * means we have consumed all precharges and failed in
5021			 * doing additional charge. Just abandon here.
5022			 */
5023			break;
5024	}
5025	up_read(&mm->mmap_sem);
5026}
5027
5028static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5029				struct cgroup *cont,
5030				struct cgroup *old_cont,
5031				struct task_struct *p,
5032				bool threadgroup)
5033{
5034	struct mm_struct *mm;
5035
5036	if (!mc.to)
5037		/* no need to move charge */
5038		return;
5039
5040	mm = get_task_mm(p);
5041	if (mm) {
5042		mem_cgroup_move_charge(mm);
5043		mmput(mm);
5044	}
5045	mem_cgroup_clear_mc();
5046}
5047#else	/* !CONFIG_MMU */
5048static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5049				struct cgroup *cgroup,
5050				struct task_struct *p,
5051				bool threadgroup)
5052{
5053	return 0;
5054}
5055static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5056				struct cgroup *cgroup,
5057				struct task_struct *p,
5058				bool threadgroup)
5059{
5060}
5061static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5062				struct cgroup *cont,
5063				struct cgroup *old_cont,
5064				struct task_struct *p,
5065				bool threadgroup)
5066{
5067}
5068#endif
5069
5070struct cgroup_subsys mem_cgroup_subsys = {
5071	.name = "memory",
5072	.subsys_id = mem_cgroup_subsys_id,
5073	.create = mem_cgroup_create,
5074	.pre_destroy = mem_cgroup_pre_destroy,
5075	.destroy = mem_cgroup_destroy,
5076	.populate = mem_cgroup_populate,
5077	.can_attach = mem_cgroup_can_attach,
5078	.cancel_attach = mem_cgroup_cancel_attach,
5079	.attach = mem_cgroup_move_task,
5080	.early_init = 0,
5081	.use_id = 1,
5082};
5083
5084#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5085static int __init enable_swap_account(char *s)
5086{
5087	/* consider enabled if no parameter or 1 is given */
5088	if (!(*s) || !strcmp(s, "=1"))
5089		really_do_swap_account = 1;
5090	else if (!strcmp(s, "=0"))
5091		really_do_swap_account = 0;
5092	return 1;
5093}
5094__setup("swapaccount", enable_swap_account);
5095
5096static int __init disable_swap_account(char *s)
5097{
5098	printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
5099	enable_swap_account("=0");
5100	return 1;
5101}
5102__setup("noswapaccount", disable_swap_account);
5103#endif
5104