memcontrol.c revision a433658c30974fc87ba3ff52d7e4e6299762aa3d
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
27#include <linux/mm.h>
28#include <linux/hugetlb.h>
29#include <linux/pagemap.h>
30#include <linux/smp.h>
31#include <linux/page-flags.h>
32#include <linux/backing-dev.h>
33#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
35#include <linux/limits.h>
36#include <linux/mutex.h>
37#include <linux/rbtree.h>
38#include <linux/slab.h>
39#include <linux/swap.h>
40#include <linux/swapops.h>
41#include <linux/spinlock.h>
42#include <linux/eventfd.h>
43#include <linux/sort.h>
44#include <linux/fs.h>
45#include <linux/seq_file.h>
46#include <linux/vmalloc.h>
47#include <linux/mm_inline.h>
48#include <linux/page_cgroup.h>
49#include <linux/cpu.h>
50#include <linux/oom.h>
51#include "internal.h"
52
53#include <asm/uaccess.h>
54
55#include <trace/events/vmscan.h>
56
57struct cgroup_subsys mem_cgroup_subsys __read_mostly;
58#define MEM_CGROUP_RECLAIM_RETRIES	5
59struct mem_cgroup *root_mem_cgroup __read_mostly;
60
61#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63int do_swap_account __read_mostly;
64
65/* for remember boot option*/
66#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
67static int really_do_swap_account __initdata = 1;
68#else
69static int really_do_swap_account __initdata = 0;
70#endif
71
72#else
73#define do_swap_account		(0)
74#endif
75
76
77/*
78 * Statistics for memory cgroup.
79 */
80enum mem_cgroup_stat_index {
81	/*
82	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
83	 */
84	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
85	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
86	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
87	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
88	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
89	MEM_CGROUP_ON_MOVE,	/* someone is moving account between groups */
90	MEM_CGROUP_STAT_NSTATS,
91};
92
93enum mem_cgroup_events_index {
94	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
95	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
96	MEM_CGROUP_EVENTS_COUNT,	/* # of pages paged in/out */
97	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
98	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
99	MEM_CGROUP_EVENTS_NSTATS,
100};
101/*
102 * Per memcg event counter is incremented at every pagein/pageout. With THP,
103 * it will be incremated by the number of pages. This counter is used for
104 * for trigger some periodic events. This is straightforward and better
105 * than using jiffies etc. to handle periodic memcg event.
106 */
107enum mem_cgroup_events_target {
108	MEM_CGROUP_TARGET_THRESH,
109	MEM_CGROUP_TARGET_SOFTLIMIT,
110	MEM_CGROUP_NTARGETS,
111};
112#define THRESHOLDS_EVENTS_TARGET (128)
113#define SOFTLIMIT_EVENTS_TARGET (1024)
114
115struct mem_cgroup_stat_cpu {
116	long count[MEM_CGROUP_STAT_NSTATS];
117	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
118	unsigned long targets[MEM_CGROUP_NTARGETS];
119};
120
121/*
122 * per-zone information in memory controller.
123 */
124struct mem_cgroup_per_zone {
125	/*
126	 * spin_lock to protect the per cgroup LRU
127	 */
128	struct list_head	lists[NR_LRU_LISTS];
129	unsigned long		count[NR_LRU_LISTS];
130
131	struct zone_reclaim_stat reclaim_stat;
132	struct rb_node		tree_node;	/* RB tree node */
133	unsigned long long	usage_in_excess;/* Set to the value by which */
134						/* the soft limit is exceeded*/
135	bool			on_tree;
136	struct mem_cgroup	*mem;		/* Back pointer, we cannot */
137						/* use container_of	   */
138};
139/* Macro for accessing counter */
140#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
141
142struct mem_cgroup_per_node {
143	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
144};
145
146struct mem_cgroup_lru_info {
147	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
148};
149
150/*
151 * Cgroups above their limits are maintained in a RB-Tree, independent of
152 * their hierarchy representation
153 */
154
155struct mem_cgroup_tree_per_zone {
156	struct rb_root rb_root;
157	spinlock_t lock;
158};
159
160struct mem_cgroup_tree_per_node {
161	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
162};
163
164struct mem_cgroup_tree {
165	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
166};
167
168static struct mem_cgroup_tree soft_limit_tree __read_mostly;
169
170struct mem_cgroup_threshold {
171	struct eventfd_ctx *eventfd;
172	u64 threshold;
173};
174
175/* For threshold */
176struct mem_cgroup_threshold_ary {
177	/* An array index points to threshold just below usage. */
178	int current_threshold;
179	/* Size of entries[] */
180	unsigned int size;
181	/* Array of thresholds */
182	struct mem_cgroup_threshold entries[0];
183};
184
185struct mem_cgroup_thresholds {
186	/* Primary thresholds array */
187	struct mem_cgroup_threshold_ary *primary;
188	/*
189	 * Spare threshold array.
190	 * This is needed to make mem_cgroup_unregister_event() "never fail".
191	 * It must be able to store at least primary->size - 1 entries.
192	 */
193	struct mem_cgroup_threshold_ary *spare;
194};
195
196/* for OOM */
197struct mem_cgroup_eventfd_list {
198	struct list_head list;
199	struct eventfd_ctx *eventfd;
200};
201
202static void mem_cgroup_threshold(struct mem_cgroup *mem);
203static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
204
205/*
206 * The memory controller data structure. The memory controller controls both
207 * page cache and RSS per cgroup. We would eventually like to provide
208 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
209 * to help the administrator determine what knobs to tune.
210 *
211 * TODO: Add a water mark for the memory controller. Reclaim will begin when
212 * we hit the water mark. May be even add a low water mark, such that
213 * no reclaim occurs from a cgroup at it's low water mark, this is
214 * a feature that will be implemented much later in the future.
215 */
216struct mem_cgroup {
217	struct cgroup_subsys_state css;
218	/*
219	 * the counter to account for memory usage
220	 */
221	struct res_counter res;
222	/*
223	 * the counter to account for mem+swap usage.
224	 */
225	struct res_counter memsw;
226	/*
227	 * Per cgroup active and inactive list, similar to the
228	 * per zone LRU lists.
229	 */
230	struct mem_cgroup_lru_info info;
231	/*
232	 * While reclaiming in a hierarchy, we cache the last child we
233	 * reclaimed from.
234	 */
235	int last_scanned_child;
236	int last_scanned_node;
237#if MAX_NUMNODES > 1
238	nodemask_t	scan_nodes;
239	unsigned long   next_scan_node_update;
240#endif
241	/*
242	 * Should the accounting and control be hierarchical, per subtree?
243	 */
244	bool use_hierarchy;
245	atomic_t	oom_lock;
246	atomic_t	refcnt;
247
248	unsigned int	swappiness;
249	/* OOM-Killer disable */
250	int		oom_kill_disable;
251
252	/* set when res.limit == memsw.limit */
253	bool		memsw_is_minimum;
254
255	/* protect arrays of thresholds */
256	struct mutex thresholds_lock;
257
258	/* thresholds for memory usage. RCU-protected */
259	struct mem_cgroup_thresholds thresholds;
260
261	/* thresholds for mem+swap usage. RCU-protected */
262	struct mem_cgroup_thresholds memsw_thresholds;
263
264	/* For oom notifier event fd */
265	struct list_head oom_notify;
266
267	/*
268	 * Should we move charges of a task when a task is moved into this
269	 * mem_cgroup ? And what type of charges should we move ?
270	 */
271	unsigned long 	move_charge_at_immigrate;
272	/*
273	 * percpu counter.
274	 */
275	struct mem_cgroup_stat_cpu *stat;
276	/*
277	 * used when a cpu is offlined or other synchronizations
278	 * See mem_cgroup_read_stat().
279	 */
280	struct mem_cgroup_stat_cpu nocpu_base;
281	spinlock_t pcp_counter_lock;
282};
283
284/* Stuffs for move charges at task migration. */
285/*
286 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
287 * left-shifted bitmap of these types.
288 */
289enum move_type {
290	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
291	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
292	NR_MOVE_TYPE,
293};
294
295/* "mc" and its members are protected by cgroup_mutex */
296static struct move_charge_struct {
297	spinlock_t	  lock; /* for from, to */
298	struct mem_cgroup *from;
299	struct mem_cgroup *to;
300	unsigned long precharge;
301	unsigned long moved_charge;
302	unsigned long moved_swap;
303	struct task_struct *moving_task;	/* a task moving charges */
304	wait_queue_head_t waitq;		/* a waitq for other context */
305} mc = {
306	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
307	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
308};
309
310static bool move_anon(void)
311{
312	return test_bit(MOVE_CHARGE_TYPE_ANON,
313					&mc.to->move_charge_at_immigrate);
314}
315
316static bool move_file(void)
317{
318	return test_bit(MOVE_CHARGE_TYPE_FILE,
319					&mc.to->move_charge_at_immigrate);
320}
321
322/*
323 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
324 * limit reclaim to prevent infinite loops, if they ever occur.
325 */
326#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
327#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)
328
329enum charge_type {
330	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
331	MEM_CGROUP_CHARGE_TYPE_MAPPED,
332	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
333	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
334	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
335	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
336	NR_CHARGE_TYPE,
337};
338
339/* for encoding cft->private value on file */
340#define _MEM			(0)
341#define _MEMSWAP		(1)
342#define _OOM_TYPE		(2)
343#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
344#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
345#define MEMFILE_ATTR(val)	((val) & 0xffff)
346/* Used for OOM nofiier */
347#define OOM_CONTROL		(0)
348
349/*
350 * Reclaim flags for mem_cgroup_hierarchical_reclaim
351 */
352#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
353#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
354#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
355#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
356#define MEM_CGROUP_RECLAIM_SOFT_BIT	0x2
357#define MEM_CGROUP_RECLAIM_SOFT		(1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
358
359static void mem_cgroup_get(struct mem_cgroup *mem);
360static void mem_cgroup_put(struct mem_cgroup *mem);
361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
362static void drain_all_stock_async(void);
363
364static struct mem_cgroup_per_zone *
365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
366{
367	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
368}
369
370struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
371{
372	return &mem->css;
373}
374
375static struct mem_cgroup_per_zone *
376page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
377{
378	int nid = page_to_nid(page);
379	int zid = page_zonenum(page);
380
381	return mem_cgroup_zoneinfo(mem, nid, zid);
382}
383
384static struct mem_cgroup_tree_per_zone *
385soft_limit_tree_node_zone(int nid, int zid)
386{
387	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
388}
389
390static struct mem_cgroup_tree_per_zone *
391soft_limit_tree_from_page(struct page *page)
392{
393	int nid = page_to_nid(page);
394	int zid = page_zonenum(page);
395
396	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
397}
398
399static void
400__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
401				struct mem_cgroup_per_zone *mz,
402				struct mem_cgroup_tree_per_zone *mctz,
403				unsigned long long new_usage_in_excess)
404{
405	struct rb_node **p = &mctz->rb_root.rb_node;
406	struct rb_node *parent = NULL;
407	struct mem_cgroup_per_zone *mz_node;
408
409	if (mz->on_tree)
410		return;
411
412	mz->usage_in_excess = new_usage_in_excess;
413	if (!mz->usage_in_excess)
414		return;
415	while (*p) {
416		parent = *p;
417		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
418					tree_node);
419		if (mz->usage_in_excess < mz_node->usage_in_excess)
420			p = &(*p)->rb_left;
421		/*
422		 * We can't avoid mem cgroups that are over their soft
423		 * limit by the same amount
424		 */
425		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
426			p = &(*p)->rb_right;
427	}
428	rb_link_node(&mz->tree_node, parent, p);
429	rb_insert_color(&mz->tree_node, &mctz->rb_root);
430	mz->on_tree = true;
431}
432
433static void
434__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
435				struct mem_cgroup_per_zone *mz,
436				struct mem_cgroup_tree_per_zone *mctz)
437{
438	if (!mz->on_tree)
439		return;
440	rb_erase(&mz->tree_node, &mctz->rb_root);
441	mz->on_tree = false;
442}
443
444static void
445mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
446				struct mem_cgroup_per_zone *mz,
447				struct mem_cgroup_tree_per_zone *mctz)
448{
449	spin_lock(&mctz->lock);
450	__mem_cgroup_remove_exceeded(mem, mz, mctz);
451	spin_unlock(&mctz->lock);
452}
453
454
455static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
456{
457	unsigned long long excess;
458	struct mem_cgroup_per_zone *mz;
459	struct mem_cgroup_tree_per_zone *mctz;
460	int nid = page_to_nid(page);
461	int zid = page_zonenum(page);
462	mctz = soft_limit_tree_from_page(page);
463
464	/*
465	 * Necessary to update all ancestors when hierarchy is used.
466	 * because their event counter is not touched.
467	 */
468	for (; mem; mem = parent_mem_cgroup(mem)) {
469		mz = mem_cgroup_zoneinfo(mem, nid, zid);
470		excess = res_counter_soft_limit_excess(&mem->res);
471		/*
472		 * We have to update the tree if mz is on RB-tree or
473		 * mem is over its softlimit.
474		 */
475		if (excess || mz->on_tree) {
476			spin_lock(&mctz->lock);
477			/* if on-tree, remove it */
478			if (mz->on_tree)
479				__mem_cgroup_remove_exceeded(mem, mz, mctz);
480			/*
481			 * Insert again. mz->usage_in_excess will be updated.
482			 * If excess is 0, no tree ops.
483			 */
484			__mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
485			spin_unlock(&mctz->lock);
486		}
487	}
488}
489
490static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
491{
492	int node, zone;
493	struct mem_cgroup_per_zone *mz;
494	struct mem_cgroup_tree_per_zone *mctz;
495
496	for_each_node_state(node, N_POSSIBLE) {
497		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
498			mz = mem_cgroup_zoneinfo(mem, node, zone);
499			mctz = soft_limit_tree_node_zone(node, zone);
500			mem_cgroup_remove_exceeded(mem, mz, mctz);
501		}
502	}
503}
504
505static struct mem_cgroup_per_zone *
506__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
507{
508	struct rb_node *rightmost = NULL;
509	struct mem_cgroup_per_zone *mz;
510
511retry:
512	mz = NULL;
513	rightmost = rb_last(&mctz->rb_root);
514	if (!rightmost)
515		goto done;		/* Nothing to reclaim from */
516
517	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
518	/*
519	 * Remove the node now but someone else can add it back,
520	 * we will to add it back at the end of reclaim to its correct
521	 * position in the tree.
522	 */
523	__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
524	if (!res_counter_soft_limit_excess(&mz->mem->res) ||
525		!css_tryget(&mz->mem->css))
526		goto retry;
527done:
528	return mz;
529}
530
531static struct mem_cgroup_per_zone *
532mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
533{
534	struct mem_cgroup_per_zone *mz;
535
536	spin_lock(&mctz->lock);
537	mz = __mem_cgroup_largest_soft_limit_node(mctz);
538	spin_unlock(&mctz->lock);
539	return mz;
540}
541
542/*
543 * Implementation Note: reading percpu statistics for memcg.
544 *
545 * Both of vmstat[] and percpu_counter has threshold and do periodic
546 * synchronization to implement "quick" read. There are trade-off between
547 * reading cost and precision of value. Then, we may have a chance to implement
548 * a periodic synchronizion of counter in memcg's counter.
549 *
550 * But this _read() function is used for user interface now. The user accounts
551 * memory usage by memory cgroup and he _always_ requires exact value because
552 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
553 * have to visit all online cpus and make sum. So, for now, unnecessary
554 * synchronization is not implemented. (just implemented for cpu hotplug)
555 *
556 * If there are kernel internal actions which can make use of some not-exact
557 * value, and reading all cpu value can be performance bottleneck in some
558 * common workload, threashold and synchonization as vmstat[] should be
559 * implemented.
560 */
561static long mem_cgroup_read_stat(struct mem_cgroup *mem,
562				 enum mem_cgroup_stat_index idx)
563{
564	long val = 0;
565	int cpu;
566
567	get_online_cpus();
568	for_each_online_cpu(cpu)
569		val += per_cpu(mem->stat->count[idx], cpu);
570#ifdef CONFIG_HOTPLUG_CPU
571	spin_lock(&mem->pcp_counter_lock);
572	val += mem->nocpu_base.count[idx];
573	spin_unlock(&mem->pcp_counter_lock);
574#endif
575	put_online_cpus();
576	return val;
577}
578
579static long mem_cgroup_local_usage(struct mem_cgroup *mem)
580{
581	long ret;
582
583	ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
584	ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
585	return ret;
586}
587
588static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
589					 bool charge)
590{
591	int val = (charge) ? 1 : -1;
592	this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
593}
594
595void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
596{
597	this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
598}
599
600void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
601{
602	this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
603}
604
605static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
606					    enum mem_cgroup_events_index idx)
607{
608	unsigned long val = 0;
609	int cpu;
610
611	for_each_online_cpu(cpu)
612		val += per_cpu(mem->stat->events[idx], cpu);
613#ifdef CONFIG_HOTPLUG_CPU
614	spin_lock(&mem->pcp_counter_lock);
615	val += mem->nocpu_base.events[idx];
616	spin_unlock(&mem->pcp_counter_lock);
617#endif
618	return val;
619}
620
621static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
622					 bool file, int nr_pages)
623{
624	preempt_disable();
625
626	if (file)
627		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
628	else
629		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
630
631	/* pagein of a big page is an event. So, ignore page size */
632	if (nr_pages > 0)
633		__this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
634	else {
635		__this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
636		nr_pages = -nr_pages; /* for event */
637	}
638
639	__this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
640
641	preempt_enable();
642}
643
644static unsigned long
645mem_cgroup_get_zonestat_node(struct mem_cgroup *mem, int nid, enum lru_list idx)
646{
647	struct mem_cgroup_per_zone *mz;
648	u64 total = 0;
649	int zid;
650
651	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
652		mz = mem_cgroup_zoneinfo(mem, nid, zid);
653		total += MEM_CGROUP_ZSTAT(mz, idx);
654	}
655	return total;
656}
657static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
658					enum lru_list idx)
659{
660	int nid;
661	u64 total = 0;
662
663	for_each_online_node(nid)
664		total += mem_cgroup_get_zonestat_node(mem, nid, idx);
665	return total;
666}
667
668static bool __memcg_event_check(struct mem_cgroup *mem, int target)
669{
670	unsigned long val, next;
671
672	val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
673	next = this_cpu_read(mem->stat->targets[target]);
674	/* from time_after() in jiffies.h */
675	return ((long)next - (long)val < 0);
676}
677
678static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
679{
680	unsigned long val, next;
681
682	val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
683
684	switch (target) {
685	case MEM_CGROUP_TARGET_THRESH:
686		next = val + THRESHOLDS_EVENTS_TARGET;
687		break;
688	case MEM_CGROUP_TARGET_SOFTLIMIT:
689		next = val + SOFTLIMIT_EVENTS_TARGET;
690		break;
691	default:
692		return;
693	}
694
695	this_cpu_write(mem->stat->targets[target], next);
696}
697
698/*
699 * Check events in order.
700 *
701 */
702static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
703{
704	/* threshold event is triggered in finer grain than soft limit */
705	if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
706		mem_cgroup_threshold(mem);
707		__mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
708		if (unlikely(__memcg_event_check(mem,
709			MEM_CGROUP_TARGET_SOFTLIMIT))){
710			mem_cgroup_update_tree(mem, page);
711			__mem_cgroup_target_update(mem,
712				MEM_CGROUP_TARGET_SOFTLIMIT);
713		}
714	}
715}
716
717static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
718{
719	return container_of(cgroup_subsys_state(cont,
720				mem_cgroup_subsys_id), struct mem_cgroup,
721				css);
722}
723
724struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
725{
726	/*
727	 * mm_update_next_owner() may clear mm->owner to NULL
728	 * if it races with swapoff, page migration, etc.
729	 * So this can be called with p == NULL.
730	 */
731	if (unlikely(!p))
732		return NULL;
733
734	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
735				struct mem_cgroup, css);
736}
737
738struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
739{
740	struct mem_cgroup *mem = NULL;
741
742	if (!mm)
743		return NULL;
744	/*
745	 * Because we have no locks, mm->owner's may be being moved to other
746	 * cgroup. We use css_tryget() here even if this looks
747	 * pessimistic (rather than adding locks here).
748	 */
749	rcu_read_lock();
750	do {
751		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
752		if (unlikely(!mem))
753			break;
754	} while (!css_tryget(&mem->css));
755	rcu_read_unlock();
756	return mem;
757}
758
759/* The caller has to guarantee "mem" exists before calling this */
760static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
761{
762	struct cgroup_subsys_state *css;
763	int found;
764
765	if (!mem) /* ROOT cgroup has the smallest ID */
766		return root_mem_cgroup; /*css_put/get against root is ignored*/
767	if (!mem->use_hierarchy) {
768		if (css_tryget(&mem->css))
769			return mem;
770		return NULL;
771	}
772	rcu_read_lock();
773	/*
774	 * searching a memory cgroup which has the smallest ID under given
775	 * ROOT cgroup. (ID >= 1)
776	 */
777	css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
778	if (css && css_tryget(css))
779		mem = container_of(css, struct mem_cgroup, css);
780	else
781		mem = NULL;
782	rcu_read_unlock();
783	return mem;
784}
785
786static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
787					struct mem_cgroup *root,
788					bool cond)
789{
790	int nextid = css_id(&iter->css) + 1;
791	int found;
792	int hierarchy_used;
793	struct cgroup_subsys_state *css;
794
795	hierarchy_used = iter->use_hierarchy;
796
797	css_put(&iter->css);
798	/* If no ROOT, walk all, ignore hierarchy */
799	if (!cond || (root && !hierarchy_used))
800		return NULL;
801
802	if (!root)
803		root = root_mem_cgroup;
804
805	do {
806		iter = NULL;
807		rcu_read_lock();
808
809		css = css_get_next(&mem_cgroup_subsys, nextid,
810				&root->css, &found);
811		if (css && css_tryget(css))
812			iter = container_of(css, struct mem_cgroup, css);
813		rcu_read_unlock();
814		/* If css is NULL, no more cgroups will be found */
815		nextid = found + 1;
816	} while (css && !iter);
817
818	return iter;
819}
820/*
821 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
822 * be careful that "break" loop is not allowed. We have reference count.
823 * Instead of that modify "cond" to be false and "continue" to exit the loop.
824 */
825#define for_each_mem_cgroup_tree_cond(iter, root, cond)	\
826	for (iter = mem_cgroup_start_loop(root);\
827	     iter != NULL;\
828	     iter = mem_cgroup_get_next(iter, root, cond))
829
830#define for_each_mem_cgroup_tree(iter, root) \
831	for_each_mem_cgroup_tree_cond(iter, root, true)
832
833#define for_each_mem_cgroup_all(iter) \
834	for_each_mem_cgroup_tree_cond(iter, NULL, true)
835
836
837static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
838{
839	return (mem == root_mem_cgroup);
840}
841
842void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
843{
844	struct mem_cgroup *mem;
845
846	if (!mm)
847		return;
848
849	rcu_read_lock();
850	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
851	if (unlikely(!mem))
852		goto out;
853
854	switch (idx) {
855	case PGMAJFAULT:
856		mem_cgroup_pgmajfault(mem, 1);
857		break;
858	case PGFAULT:
859		mem_cgroup_pgfault(mem, 1);
860		break;
861	default:
862		BUG();
863	}
864out:
865	rcu_read_unlock();
866}
867EXPORT_SYMBOL(mem_cgroup_count_vm_event);
868
869/*
870 * Following LRU functions are allowed to be used without PCG_LOCK.
871 * Operations are called by routine of global LRU independently from memcg.
872 * What we have to take care of here is validness of pc->mem_cgroup.
873 *
874 * Changes to pc->mem_cgroup happens when
875 * 1. charge
876 * 2. moving account
877 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
878 * It is added to LRU before charge.
879 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
880 * When moving account, the page is not on LRU. It's isolated.
881 */
882
883void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
884{
885	struct page_cgroup *pc;
886	struct mem_cgroup_per_zone *mz;
887
888	if (mem_cgroup_disabled())
889		return;
890	pc = lookup_page_cgroup(page);
891	/* can happen while we handle swapcache. */
892	if (!TestClearPageCgroupAcctLRU(pc))
893		return;
894	VM_BUG_ON(!pc->mem_cgroup);
895	/*
896	 * We don't check PCG_USED bit. It's cleared when the "page" is finally
897	 * removed from global LRU.
898	 */
899	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
900	/* huge page split is done under lru_lock. so, we have no races. */
901	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
902	if (mem_cgroup_is_root(pc->mem_cgroup))
903		return;
904	VM_BUG_ON(list_empty(&pc->lru));
905	list_del_init(&pc->lru);
906}
907
908void mem_cgroup_del_lru(struct page *page)
909{
910	mem_cgroup_del_lru_list(page, page_lru(page));
911}
912
913/*
914 * Writeback is about to end against a page which has been marked for immediate
915 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
916 * inactive list.
917 */
918void mem_cgroup_rotate_reclaimable_page(struct page *page)
919{
920	struct mem_cgroup_per_zone *mz;
921	struct page_cgroup *pc;
922	enum lru_list lru = page_lru(page);
923
924	if (mem_cgroup_disabled())
925		return;
926
927	pc = lookup_page_cgroup(page);
928	/* unused or root page is not rotated. */
929	if (!PageCgroupUsed(pc))
930		return;
931	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
932	smp_rmb();
933	if (mem_cgroup_is_root(pc->mem_cgroup))
934		return;
935	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
936	list_move_tail(&pc->lru, &mz->lists[lru]);
937}
938
939void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
940{
941	struct mem_cgroup_per_zone *mz;
942	struct page_cgroup *pc;
943
944	if (mem_cgroup_disabled())
945		return;
946
947	pc = lookup_page_cgroup(page);
948	/* unused or root page is not rotated. */
949	if (!PageCgroupUsed(pc))
950		return;
951	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
952	smp_rmb();
953	if (mem_cgroup_is_root(pc->mem_cgroup))
954		return;
955	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
956	list_move(&pc->lru, &mz->lists[lru]);
957}
958
959void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
960{
961	struct page_cgroup *pc;
962	struct mem_cgroup_per_zone *mz;
963
964	if (mem_cgroup_disabled())
965		return;
966	pc = lookup_page_cgroup(page);
967	VM_BUG_ON(PageCgroupAcctLRU(pc));
968	if (!PageCgroupUsed(pc))
969		return;
970	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
971	smp_rmb();
972	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
973	/* huge page split is done under lru_lock. so, we have no races. */
974	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
975	SetPageCgroupAcctLRU(pc);
976	if (mem_cgroup_is_root(pc->mem_cgroup))
977		return;
978	list_add(&pc->lru, &mz->lists[lru]);
979}
980
981/*
982 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
983 * while it's linked to lru because the page may be reused after it's fully
984 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
985 * It's done under lock_page and expected that zone->lru_lock isnever held.
986 */
987static void mem_cgroup_lru_del_before_commit(struct page *page)
988{
989	unsigned long flags;
990	struct zone *zone = page_zone(page);
991	struct page_cgroup *pc = lookup_page_cgroup(page);
992
993	/*
994	 * Doing this check without taking ->lru_lock seems wrong but this
995	 * is safe. Because if page_cgroup's USED bit is unset, the page
996	 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
997	 * set, the commit after this will fail, anyway.
998	 * This all charge/uncharge is done under some mutual execustion.
999	 * So, we don't need to taking care of changes in USED bit.
1000	 */
1001	if (likely(!PageLRU(page)))
1002		return;
1003
1004	spin_lock_irqsave(&zone->lru_lock, flags);
1005	/*
1006	 * Forget old LRU when this page_cgroup is *not* used. This Used bit
1007	 * is guarded by lock_page() because the page is SwapCache.
1008	 */
1009	if (!PageCgroupUsed(pc))
1010		mem_cgroup_del_lru_list(page, page_lru(page));
1011	spin_unlock_irqrestore(&zone->lru_lock, flags);
1012}
1013
1014static void mem_cgroup_lru_add_after_commit(struct page *page)
1015{
1016	unsigned long flags;
1017	struct zone *zone = page_zone(page);
1018	struct page_cgroup *pc = lookup_page_cgroup(page);
1019
1020	/* taking care of that the page is added to LRU while we commit it */
1021	if (likely(!PageLRU(page)))
1022		return;
1023	spin_lock_irqsave(&zone->lru_lock, flags);
1024	/* link when the page is linked to LRU but page_cgroup isn't */
1025	if (PageLRU(page) && !PageCgroupAcctLRU(pc))
1026		mem_cgroup_add_lru_list(page, page_lru(page));
1027	spin_unlock_irqrestore(&zone->lru_lock, flags);
1028}
1029
1030
1031void mem_cgroup_move_lists(struct page *page,
1032			   enum lru_list from, enum lru_list to)
1033{
1034	if (mem_cgroup_disabled())
1035		return;
1036	mem_cgroup_del_lru_list(page, from);
1037	mem_cgroup_add_lru_list(page, to);
1038}
1039
1040int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
1041{
1042	int ret;
1043	struct mem_cgroup *curr = NULL;
1044	struct task_struct *p;
1045
1046	p = find_lock_task_mm(task);
1047	if (!p)
1048		return 0;
1049	curr = try_get_mem_cgroup_from_mm(p->mm);
1050	task_unlock(p);
1051	if (!curr)
1052		return 0;
1053	/*
1054	 * We should check use_hierarchy of "mem" not "curr". Because checking
1055	 * use_hierarchy of "curr" here make this function true if hierarchy is
1056	 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
1057	 * hierarchy(even if use_hierarchy is disabled in "mem").
1058	 */
1059	if (mem->use_hierarchy)
1060		ret = css_is_ancestor(&curr->css, &mem->css);
1061	else
1062		ret = (curr == mem);
1063	css_put(&curr->css);
1064	return ret;
1065}
1066
1067static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
1068{
1069	unsigned long active;
1070	unsigned long inactive;
1071	unsigned long gb;
1072	unsigned long inactive_ratio;
1073
1074	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
1075	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
1076
1077	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1078	if (gb)
1079		inactive_ratio = int_sqrt(10 * gb);
1080	else
1081		inactive_ratio = 1;
1082
1083	if (present_pages) {
1084		present_pages[0] = inactive;
1085		present_pages[1] = active;
1086	}
1087
1088	return inactive_ratio;
1089}
1090
1091int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
1092{
1093	unsigned long active;
1094	unsigned long inactive;
1095	unsigned long present_pages[2];
1096	unsigned long inactive_ratio;
1097
1098	inactive_ratio = calc_inactive_ratio(memcg, present_pages);
1099
1100	inactive = present_pages[0];
1101	active = present_pages[1];
1102
1103	if (inactive * inactive_ratio < active)
1104		return 1;
1105
1106	return 0;
1107}
1108
1109int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
1110{
1111	unsigned long active;
1112	unsigned long inactive;
1113
1114	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
1115	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
1116
1117	return (active > inactive);
1118}
1119
1120unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
1121						struct zone *zone,
1122						enum lru_list lru)
1123{
1124	int nid = zone_to_nid(zone);
1125	int zid = zone_idx(zone);
1126	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1127
1128	return MEM_CGROUP_ZSTAT(mz, lru);
1129}
1130
1131#ifdef CONFIG_NUMA
1132static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
1133							int nid)
1134{
1135	unsigned long ret;
1136
1137	ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_FILE) +
1138		mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_FILE);
1139
1140	return ret;
1141}
1142
1143static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
1144{
1145	u64 total = 0;
1146	int nid;
1147
1148	for_each_node_state(nid, N_HIGH_MEMORY)
1149		total += mem_cgroup_node_nr_file_lru_pages(memcg, nid);
1150
1151	return total;
1152}
1153
1154static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
1155							int nid)
1156{
1157	unsigned long ret;
1158
1159	ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
1160		mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
1161
1162	return ret;
1163}
1164
1165static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg)
1166{
1167	u64 total = 0;
1168	int nid;
1169
1170	for_each_node_state(nid, N_HIGH_MEMORY)
1171		total += mem_cgroup_node_nr_anon_lru_pages(memcg, nid);
1172
1173	return total;
1174}
1175
1176static unsigned long
1177mem_cgroup_node_nr_unevictable_lru_pages(struct mem_cgroup *memcg, int nid)
1178{
1179	return mem_cgroup_get_zonestat_node(memcg, nid, LRU_UNEVICTABLE);
1180}
1181
1182static unsigned long
1183mem_cgroup_nr_unevictable_lru_pages(struct mem_cgroup *memcg)
1184{
1185	u64 total = 0;
1186	int nid;
1187
1188	for_each_node_state(nid, N_HIGH_MEMORY)
1189		total += mem_cgroup_node_nr_unevictable_lru_pages(memcg, nid);
1190
1191	return total;
1192}
1193
1194static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
1195							int nid)
1196{
1197	enum lru_list l;
1198	u64 total = 0;
1199
1200	for_each_lru(l)
1201		total += mem_cgroup_get_zonestat_node(memcg, nid, l);
1202
1203	return total;
1204}
1205
1206static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg)
1207{
1208	u64 total = 0;
1209	int nid;
1210
1211	for_each_node_state(nid, N_HIGH_MEMORY)
1212		total += mem_cgroup_node_nr_lru_pages(memcg, nid);
1213
1214	return total;
1215}
1216#endif /* CONFIG_NUMA */
1217
1218struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1219						      struct zone *zone)
1220{
1221	int nid = zone_to_nid(zone);
1222	int zid = zone_idx(zone);
1223	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1224
1225	return &mz->reclaim_stat;
1226}
1227
1228struct zone_reclaim_stat *
1229mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1230{
1231	struct page_cgroup *pc;
1232	struct mem_cgroup_per_zone *mz;
1233
1234	if (mem_cgroup_disabled())
1235		return NULL;
1236
1237	pc = lookup_page_cgroup(page);
1238	if (!PageCgroupUsed(pc))
1239		return NULL;
1240	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1241	smp_rmb();
1242	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1243	return &mz->reclaim_stat;
1244}
1245
1246unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1247					struct list_head *dst,
1248					unsigned long *scanned, int order,
1249					int mode, struct zone *z,
1250					struct mem_cgroup *mem_cont,
1251					int active, int file)
1252{
1253	unsigned long nr_taken = 0;
1254	struct page *page;
1255	unsigned long scan;
1256	LIST_HEAD(pc_list);
1257	struct list_head *src;
1258	struct page_cgroup *pc, *tmp;
1259	int nid = zone_to_nid(z);
1260	int zid = zone_idx(z);
1261	struct mem_cgroup_per_zone *mz;
1262	int lru = LRU_FILE * file + active;
1263	int ret;
1264
1265	BUG_ON(!mem_cont);
1266	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1267	src = &mz->lists[lru];
1268
1269	scan = 0;
1270	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1271		if (scan >= nr_to_scan)
1272			break;
1273
1274		if (unlikely(!PageCgroupUsed(pc)))
1275			continue;
1276
1277		page = lookup_cgroup_page(pc);
1278
1279		if (unlikely(!PageLRU(page)))
1280			continue;
1281
1282		scan++;
1283		ret = __isolate_lru_page(page, mode, file);
1284		switch (ret) {
1285		case 0:
1286			list_move(&page->lru, dst);
1287			mem_cgroup_del_lru(page);
1288			nr_taken += hpage_nr_pages(page);
1289			break;
1290		case -EBUSY:
1291			/* we don't affect global LRU but rotate in our LRU */
1292			mem_cgroup_rotate_lru_list(page, page_lru(page));
1293			break;
1294		default:
1295			break;
1296		}
1297	}
1298
1299	*scanned = scan;
1300
1301	trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1302				      0, 0, 0, mode);
1303
1304	return nr_taken;
1305}
1306
1307#define mem_cgroup_from_res_counter(counter, member)	\
1308	container_of(counter, struct mem_cgroup, member)
1309
1310/**
1311 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1312 * @mem: the memory cgroup
1313 *
1314 * Returns the maximum amount of memory @mem can be charged with, in
1315 * pages.
1316 */
1317static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
1318{
1319	unsigned long long margin;
1320
1321	margin = res_counter_margin(&mem->res);
1322	if (do_swap_account)
1323		margin = min(margin, res_counter_margin(&mem->memsw));
1324	return margin >> PAGE_SHIFT;
1325}
1326
1327static unsigned int get_swappiness(struct mem_cgroup *memcg)
1328{
1329	struct cgroup *cgrp = memcg->css.cgroup;
1330
1331	/* root ? */
1332	if (cgrp->parent == NULL)
1333		return vm_swappiness;
1334
1335	return memcg->swappiness;
1336}
1337
1338static void mem_cgroup_start_move(struct mem_cgroup *mem)
1339{
1340	int cpu;
1341
1342	get_online_cpus();
1343	spin_lock(&mem->pcp_counter_lock);
1344	for_each_online_cpu(cpu)
1345		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1346	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1347	spin_unlock(&mem->pcp_counter_lock);
1348	put_online_cpus();
1349
1350	synchronize_rcu();
1351}
1352
1353static void mem_cgroup_end_move(struct mem_cgroup *mem)
1354{
1355	int cpu;
1356
1357	if (!mem)
1358		return;
1359	get_online_cpus();
1360	spin_lock(&mem->pcp_counter_lock);
1361	for_each_online_cpu(cpu)
1362		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1363	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1364	spin_unlock(&mem->pcp_counter_lock);
1365	put_online_cpus();
1366}
1367/*
1368 * 2 routines for checking "mem" is under move_account() or not.
1369 *
1370 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1371 *			  for avoiding race in accounting. If true,
1372 *			  pc->mem_cgroup may be overwritten.
1373 *
1374 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1375 *			  under hierarchy of moving cgroups. This is for
1376 *			  waiting at hith-memory prressure caused by "move".
1377 */
1378
1379static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1380{
1381	VM_BUG_ON(!rcu_read_lock_held());
1382	return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1383}
1384
1385static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1386{
1387	struct mem_cgroup *from;
1388	struct mem_cgroup *to;
1389	bool ret = false;
1390	/*
1391	 * Unlike task_move routines, we access mc.to, mc.from not under
1392	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1393	 */
1394	spin_lock(&mc.lock);
1395	from = mc.from;
1396	to = mc.to;
1397	if (!from)
1398		goto unlock;
1399	if (from == mem || to == mem
1400	    || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1401	    || (mem->use_hierarchy && css_is_ancestor(&to->css,	&mem->css)))
1402		ret = true;
1403unlock:
1404	spin_unlock(&mc.lock);
1405	return ret;
1406}
1407
1408static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1409{
1410	if (mc.moving_task && current != mc.moving_task) {
1411		if (mem_cgroup_under_move(mem)) {
1412			DEFINE_WAIT(wait);
1413			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1414			/* moving charge context might have finished. */
1415			if (mc.moving_task)
1416				schedule();
1417			finish_wait(&mc.waitq, &wait);
1418			return true;
1419		}
1420	}
1421	return false;
1422}
1423
1424/**
1425 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1426 * @memcg: The memory cgroup that went over limit
1427 * @p: Task that is going to be killed
1428 *
1429 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1430 * enabled
1431 */
1432void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1433{
1434	struct cgroup *task_cgrp;
1435	struct cgroup *mem_cgrp;
1436	/*
1437	 * Need a buffer in BSS, can't rely on allocations. The code relies
1438	 * on the assumption that OOM is serialized for memory controller.
1439	 * If this assumption is broken, revisit this code.
1440	 */
1441	static char memcg_name[PATH_MAX];
1442	int ret;
1443
1444	if (!memcg || !p)
1445		return;
1446
1447
1448	rcu_read_lock();
1449
1450	mem_cgrp = memcg->css.cgroup;
1451	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1452
1453	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1454	if (ret < 0) {
1455		/*
1456		 * Unfortunately, we are unable to convert to a useful name
1457		 * But we'll still print out the usage information
1458		 */
1459		rcu_read_unlock();
1460		goto done;
1461	}
1462	rcu_read_unlock();
1463
1464	printk(KERN_INFO "Task in %s killed", memcg_name);
1465
1466	rcu_read_lock();
1467	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1468	if (ret < 0) {
1469		rcu_read_unlock();
1470		goto done;
1471	}
1472	rcu_read_unlock();
1473
1474	/*
1475	 * Continues from above, so we don't need an KERN_ level
1476	 */
1477	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1478done:
1479
1480	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1481		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1482		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1483		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1484	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1485		"failcnt %llu\n",
1486		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1487		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1488		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1489}
1490
1491/*
1492 * This function returns the number of memcg under hierarchy tree. Returns
1493 * 1(self count) if no children.
1494 */
1495static int mem_cgroup_count_children(struct mem_cgroup *mem)
1496{
1497	int num = 0;
1498	struct mem_cgroup *iter;
1499
1500	for_each_mem_cgroup_tree(iter, mem)
1501		num++;
1502	return num;
1503}
1504
1505/*
1506 * Return the memory (and swap, if configured) limit for a memcg.
1507 */
1508u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1509{
1510	u64 limit;
1511	u64 memsw;
1512
1513	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1514	limit += total_swap_pages << PAGE_SHIFT;
1515
1516	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1517	/*
1518	 * If memsw is finite and limits the amount of swap space available
1519	 * to this memcg, return that limit.
1520	 */
1521	return min(limit, memsw);
1522}
1523
1524/*
1525 * Visit the first child (need not be the first child as per the ordering
1526 * of the cgroup list, since we track last_scanned_child) of @mem and use
1527 * that to reclaim free pages from.
1528 */
1529static struct mem_cgroup *
1530mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1531{
1532	struct mem_cgroup *ret = NULL;
1533	struct cgroup_subsys_state *css;
1534	int nextid, found;
1535
1536	if (!root_mem->use_hierarchy) {
1537		css_get(&root_mem->css);
1538		ret = root_mem;
1539	}
1540
1541	while (!ret) {
1542		rcu_read_lock();
1543		nextid = root_mem->last_scanned_child + 1;
1544		css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1545				   &found);
1546		if (css && css_tryget(css))
1547			ret = container_of(css, struct mem_cgroup, css);
1548
1549		rcu_read_unlock();
1550		/* Updates scanning parameter */
1551		if (!css) {
1552			/* this means start scan from ID:1 */
1553			root_mem->last_scanned_child = 0;
1554		} else
1555			root_mem->last_scanned_child = found;
1556	}
1557
1558	return ret;
1559}
1560
1561#if MAX_NUMNODES > 1
1562
1563/*
1564 * Always updating the nodemask is not very good - even if we have an empty
1565 * list or the wrong list here, we can start from some node and traverse all
1566 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1567 *
1568 */
1569static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
1570{
1571	int nid;
1572
1573	if (time_after(mem->next_scan_node_update, jiffies))
1574		return;
1575
1576	mem->next_scan_node_update = jiffies + 10*HZ;
1577	/* make a nodemask where this memcg uses memory from */
1578	mem->scan_nodes = node_states[N_HIGH_MEMORY];
1579
1580	for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1581
1582		if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) ||
1583		    mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE))
1584			continue;
1585
1586		if (total_swap_pages &&
1587		    (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) ||
1588		     mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON)))
1589			continue;
1590		node_clear(nid, mem->scan_nodes);
1591	}
1592}
1593
1594/*
1595 * Selecting a node where we start reclaim from. Because what we need is just
1596 * reducing usage counter, start from anywhere is O,K. Considering
1597 * memory reclaim from current node, there are pros. and cons.
1598 *
1599 * Freeing memory from current node means freeing memory from a node which
1600 * we'll use or we've used. So, it may make LRU bad. And if several threads
1601 * hit limits, it will see a contention on a node. But freeing from remote
1602 * node means more costs for memory reclaim because of memory latency.
1603 *
1604 * Now, we use round-robin. Better algorithm is welcomed.
1605 */
1606int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1607{
1608	int node;
1609
1610	mem_cgroup_may_update_nodemask(mem);
1611	node = mem->last_scanned_node;
1612
1613	node = next_node(node, mem->scan_nodes);
1614	if (node == MAX_NUMNODES)
1615		node = first_node(mem->scan_nodes);
1616	/*
1617	 * We call this when we hit limit, not when pages are added to LRU.
1618	 * No LRU may hold pages because all pages are UNEVICTABLE or
1619	 * memcg is too small and all pages are not on LRU. In that case,
1620	 * we use curret node.
1621	 */
1622	if (unlikely(node == MAX_NUMNODES))
1623		node = numa_node_id();
1624
1625	mem->last_scanned_node = node;
1626	return node;
1627}
1628
1629#else
1630int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1631{
1632	return 0;
1633}
1634#endif
1635
1636/*
1637 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1638 * we reclaimed from, so that we don't end up penalizing one child extensively
1639 * based on its position in the children list.
1640 *
1641 * root_mem is the original ancestor that we've been reclaim from.
1642 *
1643 * We give up and return to the caller when we visit root_mem twice.
1644 * (other groups can be removed while we're walking....)
1645 *
1646 * If shrink==true, for avoiding to free too much, this returns immedieately.
1647 */
1648static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1649						struct zone *zone,
1650						gfp_t gfp_mask,
1651						unsigned long reclaim_options,
1652						unsigned long *total_scanned)
1653{
1654	struct mem_cgroup *victim;
1655	int ret, total = 0;
1656	int loop = 0;
1657	bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1658	bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1659	bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1660	unsigned long excess;
1661	unsigned long nr_scanned;
1662
1663	excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1664
1665	/* If memsw_is_minimum==1, swap-out is of-no-use. */
1666	if (root_mem->memsw_is_minimum)
1667		noswap = true;
1668
1669	while (1) {
1670		victim = mem_cgroup_select_victim(root_mem);
1671		if (victim == root_mem) {
1672			loop++;
1673			if (loop >= 1)
1674				drain_all_stock_async();
1675			if (loop >= 2) {
1676				/*
1677				 * If we have not been able to reclaim
1678				 * anything, it might because there are
1679				 * no reclaimable pages under this hierarchy
1680				 */
1681				if (!check_soft || !total) {
1682					css_put(&victim->css);
1683					break;
1684				}
1685				/*
1686				 * We want to do more targeted reclaim.
1687				 * excess >> 2 is not to excessive so as to
1688				 * reclaim too much, nor too less that we keep
1689				 * coming back to reclaim from this cgroup
1690				 */
1691				if (total >= (excess >> 2) ||
1692					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1693					css_put(&victim->css);
1694					break;
1695				}
1696			}
1697		}
1698		if (!mem_cgroup_local_usage(victim)) {
1699			/* this cgroup's local usage == 0 */
1700			css_put(&victim->css);
1701			continue;
1702		}
1703		/* we use swappiness of local cgroup */
1704		if (check_soft) {
1705			ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1706				noswap, get_swappiness(victim), zone,
1707				&nr_scanned);
1708			*total_scanned += nr_scanned;
1709		} else
1710			ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1711						noswap, get_swappiness(victim));
1712		css_put(&victim->css);
1713		/*
1714		 * At shrinking usage, we can't check we should stop here or
1715		 * reclaim more. It's depends on callers. last_scanned_child
1716		 * will work enough for keeping fairness under tree.
1717		 */
1718		if (shrink)
1719			return ret;
1720		total += ret;
1721		if (check_soft) {
1722			if (!res_counter_soft_limit_excess(&root_mem->res))
1723				return total;
1724		} else if (mem_cgroup_margin(root_mem))
1725			return total;
1726	}
1727	return total;
1728}
1729
1730/*
1731 * Check OOM-Killer is already running under our hierarchy.
1732 * If someone is running, return false.
1733 */
1734static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1735{
1736	int x, lock_count = 0;
1737	struct mem_cgroup *iter;
1738
1739	for_each_mem_cgroup_tree(iter, mem) {
1740		x = atomic_inc_return(&iter->oom_lock);
1741		lock_count = max(x, lock_count);
1742	}
1743
1744	if (lock_count == 1)
1745		return true;
1746	return false;
1747}
1748
1749static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1750{
1751	struct mem_cgroup *iter;
1752
1753	/*
1754	 * When a new child is created while the hierarchy is under oom,
1755	 * mem_cgroup_oom_lock() may not be called. We have to use
1756	 * atomic_add_unless() here.
1757	 */
1758	for_each_mem_cgroup_tree(iter, mem)
1759		atomic_add_unless(&iter->oom_lock, -1, 0);
1760	return 0;
1761}
1762
1763
1764static DEFINE_MUTEX(memcg_oom_mutex);
1765static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1766
1767struct oom_wait_info {
1768	struct mem_cgroup *mem;
1769	wait_queue_t	wait;
1770};
1771
1772static int memcg_oom_wake_function(wait_queue_t *wait,
1773	unsigned mode, int sync, void *arg)
1774{
1775	struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1776	struct oom_wait_info *oom_wait_info;
1777
1778	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1779
1780	if (oom_wait_info->mem == wake_mem)
1781		goto wakeup;
1782	/* if no hierarchy, no match */
1783	if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1784		return 0;
1785	/*
1786	 * Both of oom_wait_info->mem and wake_mem are stable under us.
1787	 * Then we can use css_is_ancestor without taking care of RCU.
1788	 */
1789	if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1790	    !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1791		return 0;
1792
1793wakeup:
1794	return autoremove_wake_function(wait, mode, sync, arg);
1795}
1796
1797static void memcg_wakeup_oom(struct mem_cgroup *mem)
1798{
1799	/* for filtering, pass "mem" as argument. */
1800	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1801}
1802
1803static void memcg_oom_recover(struct mem_cgroup *mem)
1804{
1805	if (mem && atomic_read(&mem->oom_lock))
1806		memcg_wakeup_oom(mem);
1807}
1808
1809/*
1810 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1811 */
1812bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1813{
1814	struct oom_wait_info owait;
1815	bool locked, need_to_kill;
1816
1817	owait.mem = mem;
1818	owait.wait.flags = 0;
1819	owait.wait.func = memcg_oom_wake_function;
1820	owait.wait.private = current;
1821	INIT_LIST_HEAD(&owait.wait.task_list);
1822	need_to_kill = true;
1823	/* At first, try to OOM lock hierarchy under mem.*/
1824	mutex_lock(&memcg_oom_mutex);
1825	locked = mem_cgroup_oom_lock(mem);
1826	/*
1827	 * Even if signal_pending(), we can't quit charge() loop without
1828	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1829	 * under OOM is always welcomed, use TASK_KILLABLE here.
1830	 */
1831	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1832	if (!locked || mem->oom_kill_disable)
1833		need_to_kill = false;
1834	if (locked)
1835		mem_cgroup_oom_notify(mem);
1836	mutex_unlock(&memcg_oom_mutex);
1837
1838	if (need_to_kill) {
1839		finish_wait(&memcg_oom_waitq, &owait.wait);
1840		mem_cgroup_out_of_memory(mem, mask);
1841	} else {
1842		schedule();
1843		finish_wait(&memcg_oom_waitq, &owait.wait);
1844	}
1845	mutex_lock(&memcg_oom_mutex);
1846	mem_cgroup_oom_unlock(mem);
1847	memcg_wakeup_oom(mem);
1848	mutex_unlock(&memcg_oom_mutex);
1849
1850	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1851		return false;
1852	/* Give chance to dying process */
1853	schedule_timeout(1);
1854	return true;
1855}
1856
1857/*
1858 * Currently used to update mapped file statistics, but the routine can be
1859 * generalized to update other statistics as well.
1860 *
1861 * Notes: Race condition
1862 *
1863 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1864 * it tends to be costly. But considering some conditions, we doesn't need
1865 * to do so _always_.
1866 *
1867 * Considering "charge", lock_page_cgroup() is not required because all
1868 * file-stat operations happen after a page is attached to radix-tree. There
1869 * are no race with "charge".
1870 *
1871 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1872 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1873 * if there are race with "uncharge". Statistics itself is properly handled
1874 * by flags.
1875 *
1876 * Considering "move", this is an only case we see a race. To make the race
1877 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1878 * possibility of race condition. If there is, we take a lock.
1879 */
1880
1881void mem_cgroup_update_page_stat(struct page *page,
1882				 enum mem_cgroup_page_stat_item idx, int val)
1883{
1884	struct mem_cgroup *mem;
1885	struct page_cgroup *pc = lookup_page_cgroup(page);
1886	bool need_unlock = false;
1887	unsigned long uninitialized_var(flags);
1888
1889	if (unlikely(!pc))
1890		return;
1891
1892	rcu_read_lock();
1893	mem = pc->mem_cgroup;
1894	if (unlikely(!mem || !PageCgroupUsed(pc)))
1895		goto out;
1896	/* pc->mem_cgroup is unstable ? */
1897	if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
1898		/* take a lock against to access pc->mem_cgroup */
1899		move_lock_page_cgroup(pc, &flags);
1900		need_unlock = true;
1901		mem = pc->mem_cgroup;
1902		if (!mem || !PageCgroupUsed(pc))
1903			goto out;
1904	}
1905
1906	switch (idx) {
1907	case MEMCG_NR_FILE_MAPPED:
1908		if (val > 0)
1909			SetPageCgroupFileMapped(pc);
1910		else if (!page_mapped(page))
1911			ClearPageCgroupFileMapped(pc);
1912		idx = MEM_CGROUP_STAT_FILE_MAPPED;
1913		break;
1914	default:
1915		BUG();
1916	}
1917
1918	this_cpu_add(mem->stat->count[idx], val);
1919
1920out:
1921	if (unlikely(need_unlock))
1922		move_unlock_page_cgroup(pc, &flags);
1923	rcu_read_unlock();
1924	return;
1925}
1926EXPORT_SYMBOL(mem_cgroup_update_page_stat);
1927
1928/*
1929 * size of first charge trial. "32" comes from vmscan.c's magic value.
1930 * TODO: maybe necessary to use big numbers in big irons.
1931 */
1932#define CHARGE_BATCH	32U
1933struct memcg_stock_pcp {
1934	struct mem_cgroup *cached; /* this never be root cgroup */
1935	unsigned int nr_pages;
1936	struct work_struct work;
1937};
1938static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1939static atomic_t memcg_drain_count;
1940
1941/*
1942 * Try to consume stocked charge on this cpu. If success, one page is consumed
1943 * from local stock and true is returned. If the stock is 0 or charges from a
1944 * cgroup which is not current target, returns false. This stock will be
1945 * refilled.
1946 */
1947static bool consume_stock(struct mem_cgroup *mem)
1948{
1949	struct memcg_stock_pcp *stock;
1950	bool ret = true;
1951
1952	stock = &get_cpu_var(memcg_stock);
1953	if (mem == stock->cached && stock->nr_pages)
1954		stock->nr_pages--;
1955	else /* need to call res_counter_charge */
1956		ret = false;
1957	put_cpu_var(memcg_stock);
1958	return ret;
1959}
1960
1961/*
1962 * Returns stocks cached in percpu to res_counter and reset cached information.
1963 */
1964static void drain_stock(struct memcg_stock_pcp *stock)
1965{
1966	struct mem_cgroup *old = stock->cached;
1967
1968	if (stock->nr_pages) {
1969		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
1970
1971		res_counter_uncharge(&old->res, bytes);
1972		if (do_swap_account)
1973			res_counter_uncharge(&old->memsw, bytes);
1974		stock->nr_pages = 0;
1975	}
1976	stock->cached = NULL;
1977}
1978
1979/*
1980 * This must be called under preempt disabled or must be called by
1981 * a thread which is pinned to local cpu.
1982 */
1983static void drain_local_stock(struct work_struct *dummy)
1984{
1985	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1986	drain_stock(stock);
1987}
1988
1989/*
1990 * Cache charges(val) which is from res_counter, to local per_cpu area.
1991 * This will be consumed by consume_stock() function, later.
1992 */
1993static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
1994{
1995	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1996
1997	if (stock->cached != mem) { /* reset if necessary */
1998		drain_stock(stock);
1999		stock->cached = mem;
2000	}
2001	stock->nr_pages += nr_pages;
2002	put_cpu_var(memcg_stock);
2003}
2004
2005/*
2006 * Tries to drain stocked charges in other cpus. This function is asynchronous
2007 * and just put a work per cpu for draining localy on each cpu. Caller can
2008 * expects some charges will be back to res_counter later but cannot wait for
2009 * it.
2010 */
2011static void drain_all_stock_async(void)
2012{
2013	int cpu;
2014	/* This function is for scheduling "drain" in asynchronous way.
2015	 * The result of "drain" is not directly handled by callers. Then,
2016	 * if someone is calling drain, we don't have to call drain more.
2017	 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
2018	 * there is a race. We just do loose check here.
2019	 */
2020	if (atomic_read(&memcg_drain_count))
2021		return;
2022	/* Notify other cpus that system-wide "drain" is running */
2023	atomic_inc(&memcg_drain_count);
2024	get_online_cpus();
2025	for_each_online_cpu(cpu) {
2026		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2027		schedule_work_on(cpu, &stock->work);
2028	}
2029 	put_online_cpus();
2030	atomic_dec(&memcg_drain_count);
2031	/* We don't wait for flush_work */
2032}
2033
2034/* This is a synchronous drain interface. */
2035static void drain_all_stock_sync(void)
2036{
2037	/* called when force_empty is called */
2038	atomic_inc(&memcg_drain_count);
2039	schedule_on_each_cpu(drain_local_stock);
2040	atomic_dec(&memcg_drain_count);
2041}
2042
2043/*
2044 * This function drains percpu counter value from DEAD cpu and
2045 * move it to local cpu. Note that this function can be preempted.
2046 */
2047static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
2048{
2049	int i;
2050
2051	spin_lock(&mem->pcp_counter_lock);
2052	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
2053		long x = per_cpu(mem->stat->count[i], cpu);
2054
2055		per_cpu(mem->stat->count[i], cpu) = 0;
2056		mem->nocpu_base.count[i] += x;
2057	}
2058	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2059		unsigned long x = per_cpu(mem->stat->events[i], cpu);
2060
2061		per_cpu(mem->stat->events[i], cpu) = 0;
2062		mem->nocpu_base.events[i] += x;
2063	}
2064	/* need to clear ON_MOVE value, works as a kind of lock. */
2065	per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2066	spin_unlock(&mem->pcp_counter_lock);
2067}
2068
2069static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
2070{
2071	int idx = MEM_CGROUP_ON_MOVE;
2072
2073	spin_lock(&mem->pcp_counter_lock);
2074	per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
2075	spin_unlock(&mem->pcp_counter_lock);
2076}
2077
2078static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2079					unsigned long action,
2080					void *hcpu)
2081{
2082	int cpu = (unsigned long)hcpu;
2083	struct memcg_stock_pcp *stock;
2084	struct mem_cgroup *iter;
2085
2086	if ((action == CPU_ONLINE)) {
2087		for_each_mem_cgroup_all(iter)
2088			synchronize_mem_cgroup_on_move(iter, cpu);
2089		return NOTIFY_OK;
2090	}
2091
2092	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
2093		return NOTIFY_OK;
2094
2095	for_each_mem_cgroup_all(iter)
2096		mem_cgroup_drain_pcp_counter(iter, cpu);
2097
2098	stock = &per_cpu(memcg_stock, cpu);
2099	drain_stock(stock);
2100	return NOTIFY_OK;
2101}
2102
2103
2104/* See __mem_cgroup_try_charge() for details */
2105enum {
2106	CHARGE_OK,		/* success */
2107	CHARGE_RETRY,		/* need to retry but retry is not bad */
2108	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
2109	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
2110	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
2111};
2112
2113static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
2114				unsigned int nr_pages, bool oom_check)
2115{
2116	unsigned long csize = nr_pages * PAGE_SIZE;
2117	struct mem_cgroup *mem_over_limit;
2118	struct res_counter *fail_res;
2119	unsigned long flags = 0;
2120	int ret;
2121
2122	ret = res_counter_charge(&mem->res, csize, &fail_res);
2123
2124	if (likely(!ret)) {
2125		if (!do_swap_account)
2126			return CHARGE_OK;
2127		ret = res_counter_charge(&mem->memsw, csize, &fail_res);
2128		if (likely(!ret))
2129			return CHARGE_OK;
2130
2131		res_counter_uncharge(&mem->res, csize);
2132		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2133		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2134	} else
2135		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2136	/*
2137	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2138	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2139	 *
2140	 * Never reclaim on behalf of optional batching, retry with a
2141	 * single page instead.
2142	 */
2143	if (nr_pages == CHARGE_BATCH)
2144		return CHARGE_RETRY;
2145
2146	if (!(gfp_mask & __GFP_WAIT))
2147		return CHARGE_WOULDBLOCK;
2148
2149	ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
2150					      gfp_mask, flags, NULL);
2151	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2152		return CHARGE_RETRY;
2153	/*
2154	 * Even though the limit is exceeded at this point, reclaim
2155	 * may have been able to free some pages.  Retry the charge
2156	 * before killing the task.
2157	 *
2158	 * Only for regular pages, though: huge pages are rather
2159	 * unlikely to succeed so close to the limit, and we fall back
2160	 * to regular pages anyway in case of failure.
2161	 */
2162	if (nr_pages == 1 && ret)
2163		return CHARGE_RETRY;
2164
2165	/*
2166	 * At task move, charge accounts can be doubly counted. So, it's
2167	 * better to wait until the end of task_move if something is going on.
2168	 */
2169	if (mem_cgroup_wait_acct_move(mem_over_limit))
2170		return CHARGE_RETRY;
2171
2172	/* If we don't need to call oom-killer at el, return immediately */
2173	if (!oom_check)
2174		return CHARGE_NOMEM;
2175	/* check OOM */
2176	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
2177		return CHARGE_OOM_DIE;
2178
2179	return CHARGE_RETRY;
2180}
2181
2182/*
2183 * Unlike exported interface, "oom" parameter is added. if oom==true,
2184 * oom-killer can be invoked.
2185 */
2186static int __mem_cgroup_try_charge(struct mm_struct *mm,
2187				   gfp_t gfp_mask,
2188				   unsigned int nr_pages,
2189				   struct mem_cgroup **memcg,
2190				   bool oom)
2191{
2192	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2193	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2194	struct mem_cgroup *mem = NULL;
2195	int ret;
2196
2197	/*
2198	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2199	 * in system level. So, allow to go ahead dying process in addition to
2200	 * MEMDIE process.
2201	 */
2202	if (unlikely(test_thread_flag(TIF_MEMDIE)
2203		     || fatal_signal_pending(current)))
2204		goto bypass;
2205
2206	/*
2207	 * We always charge the cgroup the mm_struct belongs to.
2208	 * The mm_struct's mem_cgroup changes on task migration if the
2209	 * thread group leader migrates. It's possible that mm is not
2210	 * set, if so charge the init_mm (happens for pagecache usage).
2211	 */
2212	if (!*memcg && !mm)
2213		goto bypass;
2214again:
2215	if (*memcg) { /* css should be a valid one */
2216		mem = *memcg;
2217		VM_BUG_ON(css_is_removed(&mem->css));
2218		if (mem_cgroup_is_root(mem))
2219			goto done;
2220		if (nr_pages == 1 && consume_stock(mem))
2221			goto done;
2222		css_get(&mem->css);
2223	} else {
2224		struct task_struct *p;
2225
2226		rcu_read_lock();
2227		p = rcu_dereference(mm->owner);
2228		/*
2229		 * Because we don't have task_lock(), "p" can exit.
2230		 * In that case, "mem" can point to root or p can be NULL with
2231		 * race with swapoff. Then, we have small risk of mis-accouning.
2232		 * But such kind of mis-account by race always happens because
2233		 * we don't have cgroup_mutex(). It's overkill and we allo that
2234		 * small race, here.
2235		 * (*) swapoff at el will charge against mm-struct not against
2236		 * task-struct. So, mm->owner can be NULL.
2237		 */
2238		mem = mem_cgroup_from_task(p);
2239		if (!mem || mem_cgroup_is_root(mem)) {
2240			rcu_read_unlock();
2241			goto done;
2242		}
2243		if (nr_pages == 1 && consume_stock(mem)) {
2244			/*
2245			 * It seems dagerous to access memcg without css_get().
2246			 * But considering how consume_stok works, it's not
2247			 * necessary. If consume_stock success, some charges
2248			 * from this memcg are cached on this cpu. So, we
2249			 * don't need to call css_get()/css_tryget() before
2250			 * calling consume_stock().
2251			 */
2252			rcu_read_unlock();
2253			goto done;
2254		}
2255		/* after here, we may be blocked. we need to get refcnt */
2256		if (!css_tryget(&mem->css)) {
2257			rcu_read_unlock();
2258			goto again;
2259		}
2260		rcu_read_unlock();
2261	}
2262
2263	do {
2264		bool oom_check;
2265
2266		/* If killed, bypass charge */
2267		if (fatal_signal_pending(current)) {
2268			css_put(&mem->css);
2269			goto bypass;
2270		}
2271
2272		oom_check = false;
2273		if (oom && !nr_oom_retries) {
2274			oom_check = true;
2275			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2276		}
2277
2278		ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
2279		switch (ret) {
2280		case CHARGE_OK:
2281			break;
2282		case CHARGE_RETRY: /* not in OOM situation but retry */
2283			batch = nr_pages;
2284			css_put(&mem->css);
2285			mem = NULL;
2286			goto again;
2287		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2288			css_put(&mem->css);
2289			goto nomem;
2290		case CHARGE_NOMEM: /* OOM routine works */
2291			if (!oom) {
2292				css_put(&mem->css);
2293				goto nomem;
2294			}
2295			/* If oom, we never return -ENOMEM */
2296			nr_oom_retries--;
2297			break;
2298		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2299			css_put(&mem->css);
2300			goto bypass;
2301		}
2302	} while (ret != CHARGE_OK);
2303
2304	if (batch > nr_pages)
2305		refill_stock(mem, batch - nr_pages);
2306	css_put(&mem->css);
2307done:
2308	*memcg = mem;
2309	return 0;
2310nomem:
2311	*memcg = NULL;
2312	return -ENOMEM;
2313bypass:
2314	*memcg = NULL;
2315	return 0;
2316}
2317
2318/*
2319 * Somemtimes we have to undo a charge we got by try_charge().
2320 * This function is for that and do uncharge, put css's refcnt.
2321 * gotten by try_charge().
2322 */
2323static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
2324				       unsigned int nr_pages)
2325{
2326	if (!mem_cgroup_is_root(mem)) {
2327		unsigned long bytes = nr_pages * PAGE_SIZE;
2328
2329		res_counter_uncharge(&mem->res, bytes);
2330		if (do_swap_account)
2331			res_counter_uncharge(&mem->memsw, bytes);
2332	}
2333}
2334
2335/*
2336 * A helper function to get mem_cgroup from ID. must be called under
2337 * rcu_read_lock(). The caller must check css_is_removed() or some if
2338 * it's concern. (dropping refcnt from swap can be called against removed
2339 * memcg.)
2340 */
2341static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2342{
2343	struct cgroup_subsys_state *css;
2344
2345	/* ID 0 is unused ID */
2346	if (!id)
2347		return NULL;
2348	css = css_lookup(&mem_cgroup_subsys, id);
2349	if (!css)
2350		return NULL;
2351	return container_of(css, struct mem_cgroup, css);
2352}
2353
2354struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2355{
2356	struct mem_cgroup *mem = NULL;
2357	struct page_cgroup *pc;
2358	unsigned short id;
2359	swp_entry_t ent;
2360
2361	VM_BUG_ON(!PageLocked(page));
2362
2363	pc = lookup_page_cgroup(page);
2364	lock_page_cgroup(pc);
2365	if (PageCgroupUsed(pc)) {
2366		mem = pc->mem_cgroup;
2367		if (mem && !css_tryget(&mem->css))
2368			mem = NULL;
2369	} else if (PageSwapCache(page)) {
2370		ent.val = page_private(page);
2371		id = lookup_swap_cgroup(ent);
2372		rcu_read_lock();
2373		mem = mem_cgroup_lookup(id);
2374		if (mem && !css_tryget(&mem->css))
2375			mem = NULL;
2376		rcu_read_unlock();
2377	}
2378	unlock_page_cgroup(pc);
2379	return mem;
2380}
2381
2382static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
2383				       struct page *page,
2384				       unsigned int nr_pages,
2385				       struct page_cgroup *pc,
2386				       enum charge_type ctype)
2387{
2388	lock_page_cgroup(pc);
2389	if (unlikely(PageCgroupUsed(pc))) {
2390		unlock_page_cgroup(pc);
2391		__mem_cgroup_cancel_charge(mem, nr_pages);
2392		return;
2393	}
2394	/*
2395	 * we don't need page_cgroup_lock about tail pages, becase they are not
2396	 * accessed by any other context at this point.
2397	 */
2398	pc->mem_cgroup = mem;
2399	/*
2400	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2401	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2402	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2403	 * before USED bit, we need memory barrier here.
2404	 * See mem_cgroup_add_lru_list(), etc.
2405 	 */
2406	smp_wmb();
2407	switch (ctype) {
2408	case MEM_CGROUP_CHARGE_TYPE_CACHE:
2409	case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2410		SetPageCgroupCache(pc);
2411		SetPageCgroupUsed(pc);
2412		break;
2413	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2414		ClearPageCgroupCache(pc);
2415		SetPageCgroupUsed(pc);
2416		break;
2417	default:
2418		break;
2419	}
2420
2421	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
2422	unlock_page_cgroup(pc);
2423	/*
2424	 * "charge_statistics" updated event counter. Then, check it.
2425	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2426	 * if they exceeds softlimit.
2427	 */
2428	memcg_check_events(mem, page);
2429}
2430
2431#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2432
2433#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2434			(1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2435/*
2436 * Because tail pages are not marked as "used", set it. We're under
2437 * zone->lru_lock, 'splitting on pmd' and compund_lock.
2438 */
2439void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2440{
2441	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2442	struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2443	unsigned long flags;
2444
2445	if (mem_cgroup_disabled())
2446		return;
2447	/*
2448	 * We have no races with charge/uncharge but will have races with
2449	 * page state accounting.
2450	 */
2451	move_lock_page_cgroup(head_pc, &flags);
2452
2453	tail_pc->mem_cgroup = head_pc->mem_cgroup;
2454	smp_wmb(); /* see __commit_charge() */
2455	if (PageCgroupAcctLRU(head_pc)) {
2456		enum lru_list lru;
2457		struct mem_cgroup_per_zone *mz;
2458
2459		/*
2460		 * LRU flags cannot be copied because we need to add tail
2461		 *.page to LRU by generic call and our hook will be called.
2462		 * We hold lru_lock, then, reduce counter directly.
2463		 */
2464		lru = page_lru(head);
2465		mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
2466		MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2467	}
2468	tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2469	move_unlock_page_cgroup(head_pc, &flags);
2470}
2471#endif
2472
2473/**
2474 * mem_cgroup_move_account - move account of the page
2475 * @page: the page
2476 * @nr_pages: number of regular pages (>1 for huge pages)
2477 * @pc:	page_cgroup of the page.
2478 * @from: mem_cgroup which the page is moved from.
2479 * @to:	mem_cgroup which the page is moved to. @from != @to.
2480 * @uncharge: whether we should call uncharge and css_put against @from.
2481 *
2482 * The caller must confirm following.
2483 * - page is not on LRU (isolate_page() is useful.)
2484 * - compound_lock is held when nr_pages > 1
2485 *
2486 * This function doesn't do "charge" nor css_get to new cgroup. It should be
2487 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2488 * true, this function does "uncharge" from old cgroup, but it doesn't if
2489 * @uncharge is false, so a caller should do "uncharge".
2490 */
2491static int mem_cgroup_move_account(struct page *page,
2492				   unsigned int nr_pages,
2493				   struct page_cgroup *pc,
2494				   struct mem_cgroup *from,
2495				   struct mem_cgroup *to,
2496				   bool uncharge)
2497{
2498	unsigned long flags;
2499	int ret;
2500
2501	VM_BUG_ON(from == to);
2502	VM_BUG_ON(PageLRU(page));
2503	/*
2504	 * The page is isolated from LRU. So, collapse function
2505	 * will not handle this page. But page splitting can happen.
2506	 * Do this check under compound_page_lock(). The caller should
2507	 * hold it.
2508	 */
2509	ret = -EBUSY;
2510	if (nr_pages > 1 && !PageTransHuge(page))
2511		goto out;
2512
2513	lock_page_cgroup(pc);
2514
2515	ret = -EINVAL;
2516	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2517		goto unlock;
2518
2519	move_lock_page_cgroup(pc, &flags);
2520
2521	if (PageCgroupFileMapped(pc)) {
2522		/* Update mapped_file data for mem_cgroup */
2523		preempt_disable();
2524		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2525		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2526		preempt_enable();
2527	}
2528	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
2529	if (uncharge)
2530		/* This is not "cancel", but cancel_charge does all we need. */
2531		__mem_cgroup_cancel_charge(from, nr_pages);
2532
2533	/* caller should have done css_get */
2534	pc->mem_cgroup = to;
2535	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
2536	/*
2537	 * We charges against "to" which may not have any tasks. Then, "to"
2538	 * can be under rmdir(). But in current implementation, caller of
2539	 * this function is just force_empty() and move charge, so it's
2540	 * guaranteed that "to" is never removed. So, we don't check rmdir
2541	 * status here.
2542	 */
2543	move_unlock_page_cgroup(pc, &flags);
2544	ret = 0;
2545unlock:
2546	unlock_page_cgroup(pc);
2547	/*
2548	 * check events
2549	 */
2550	memcg_check_events(to, page);
2551	memcg_check_events(from, page);
2552out:
2553	return ret;
2554}
2555
2556/*
2557 * move charges to its parent.
2558 */
2559
2560static int mem_cgroup_move_parent(struct page *page,
2561				  struct page_cgroup *pc,
2562				  struct mem_cgroup *child,
2563				  gfp_t gfp_mask)
2564{
2565	struct cgroup *cg = child->css.cgroup;
2566	struct cgroup *pcg = cg->parent;
2567	struct mem_cgroup *parent;
2568	unsigned int nr_pages;
2569	unsigned long uninitialized_var(flags);
2570	int ret;
2571
2572	/* Is ROOT ? */
2573	if (!pcg)
2574		return -EINVAL;
2575
2576	ret = -EBUSY;
2577	if (!get_page_unless_zero(page))
2578		goto out;
2579	if (isolate_lru_page(page))
2580		goto put;
2581
2582	nr_pages = hpage_nr_pages(page);
2583
2584	parent = mem_cgroup_from_cont(pcg);
2585	ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
2586	if (ret || !parent)
2587		goto put_back;
2588
2589	if (nr_pages > 1)
2590		flags = compound_lock_irqsave(page);
2591
2592	ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
2593	if (ret)
2594		__mem_cgroup_cancel_charge(parent, nr_pages);
2595
2596	if (nr_pages > 1)
2597		compound_unlock_irqrestore(page, flags);
2598put_back:
2599	putback_lru_page(page);
2600put:
2601	put_page(page);
2602out:
2603	return ret;
2604}
2605
2606/*
2607 * Charge the memory controller for page usage.
2608 * Return
2609 * 0 if the charge was successful
2610 * < 0 if the cgroup is over its limit
2611 */
2612static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2613				gfp_t gfp_mask, enum charge_type ctype)
2614{
2615	struct mem_cgroup *mem = NULL;
2616	unsigned int nr_pages = 1;
2617	struct page_cgroup *pc;
2618	bool oom = true;
2619	int ret;
2620
2621	if (PageTransHuge(page)) {
2622		nr_pages <<= compound_order(page);
2623		VM_BUG_ON(!PageTransHuge(page));
2624		/*
2625		 * Never OOM-kill a process for a huge page.  The
2626		 * fault handler will fall back to regular pages.
2627		 */
2628		oom = false;
2629	}
2630
2631	pc = lookup_page_cgroup(page);
2632	BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
2633
2634	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
2635	if (ret || !mem)
2636		return ret;
2637
2638	__mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
2639	return 0;
2640}
2641
2642int mem_cgroup_newpage_charge(struct page *page,
2643			      struct mm_struct *mm, gfp_t gfp_mask)
2644{
2645	if (mem_cgroup_disabled())
2646		return 0;
2647	/*
2648	 * If already mapped, we don't have to account.
2649	 * If page cache, page->mapping has address_space.
2650	 * But page->mapping may have out-of-use anon_vma pointer,
2651	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2652	 * is NULL.
2653  	 */
2654	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2655		return 0;
2656	if (unlikely(!mm))
2657		mm = &init_mm;
2658	return mem_cgroup_charge_common(page, mm, gfp_mask,
2659				MEM_CGROUP_CHARGE_TYPE_MAPPED);
2660}
2661
2662static void
2663__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2664					enum charge_type ctype);
2665
2666static void
2667__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
2668					enum charge_type ctype)
2669{
2670	struct page_cgroup *pc = lookup_page_cgroup(page);
2671	/*
2672	 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2673	 * is already on LRU. It means the page may on some other page_cgroup's
2674	 * LRU. Take care of it.
2675	 */
2676	mem_cgroup_lru_del_before_commit(page);
2677	__mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
2678	mem_cgroup_lru_add_after_commit(page);
2679	return;
2680}
2681
2682int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2683				gfp_t gfp_mask)
2684{
2685	struct mem_cgroup *mem = NULL;
2686	int ret;
2687
2688	if (mem_cgroup_disabled())
2689		return 0;
2690	if (PageCompound(page))
2691		return 0;
2692	/*
2693	 * Corner case handling. This is called from add_to_page_cache()
2694	 * in usual. But some FS (shmem) precharges this page before calling it
2695	 * and call add_to_page_cache() with GFP_NOWAIT.
2696	 *
2697	 * For GFP_NOWAIT case, the page may be pre-charged before calling
2698	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2699	 * charge twice. (It works but has to pay a bit larger cost.)
2700	 * And when the page is SwapCache, it should take swap information
2701	 * into account. This is under lock_page() now.
2702	 */
2703	if (!(gfp_mask & __GFP_WAIT)) {
2704		struct page_cgroup *pc;
2705
2706		pc = lookup_page_cgroup(page);
2707		if (!pc)
2708			return 0;
2709		lock_page_cgroup(pc);
2710		if (PageCgroupUsed(pc)) {
2711			unlock_page_cgroup(pc);
2712			return 0;
2713		}
2714		unlock_page_cgroup(pc);
2715	}
2716
2717	if (unlikely(!mm))
2718		mm = &init_mm;
2719
2720	if (page_is_file_cache(page)) {
2721		ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
2722		if (ret || !mem)
2723			return ret;
2724
2725		/*
2726		 * FUSE reuses pages without going through the final
2727		 * put that would remove them from the LRU list, make
2728		 * sure that they get relinked properly.
2729		 */
2730		__mem_cgroup_commit_charge_lrucare(page, mem,
2731					MEM_CGROUP_CHARGE_TYPE_CACHE);
2732		return ret;
2733	}
2734	/* shmem */
2735	if (PageSwapCache(page)) {
2736		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2737		if (!ret)
2738			__mem_cgroup_commit_charge_swapin(page, mem,
2739					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2740	} else
2741		ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2742					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2743
2744	return ret;
2745}
2746
2747/*
2748 * While swap-in, try_charge -> commit or cancel, the page is locked.
2749 * And when try_charge() successfully returns, one refcnt to memcg without
2750 * struct page_cgroup is acquired. This refcnt will be consumed by
2751 * "commit()" or removed by "cancel()"
2752 */
2753int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2754				 struct page *page,
2755				 gfp_t mask, struct mem_cgroup **ptr)
2756{
2757	struct mem_cgroup *mem;
2758	int ret;
2759
2760	*ptr = NULL;
2761
2762	if (mem_cgroup_disabled())
2763		return 0;
2764
2765	if (!do_swap_account)
2766		goto charge_cur_mm;
2767	/*
2768	 * A racing thread's fault, or swapoff, may have already updated
2769	 * the pte, and even removed page from swap cache: in those cases
2770	 * do_swap_page()'s pte_same() test will fail; but there's also a
2771	 * KSM case which does need to charge the page.
2772	 */
2773	if (!PageSwapCache(page))
2774		goto charge_cur_mm;
2775	mem = try_get_mem_cgroup_from_page(page);
2776	if (!mem)
2777		goto charge_cur_mm;
2778	*ptr = mem;
2779	ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
2780	css_put(&mem->css);
2781	return ret;
2782charge_cur_mm:
2783	if (unlikely(!mm))
2784		mm = &init_mm;
2785	return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
2786}
2787
2788static void
2789__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2790					enum charge_type ctype)
2791{
2792	if (mem_cgroup_disabled())
2793		return;
2794	if (!ptr)
2795		return;
2796	cgroup_exclude_rmdir(&ptr->css);
2797
2798	__mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
2799	/*
2800	 * Now swap is on-memory. This means this page may be
2801	 * counted both as mem and swap....double count.
2802	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2803	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2804	 * may call delete_from_swap_cache() before reach here.
2805	 */
2806	if (do_swap_account && PageSwapCache(page)) {
2807		swp_entry_t ent = {.val = page_private(page)};
2808		unsigned short id;
2809		struct mem_cgroup *memcg;
2810
2811		id = swap_cgroup_record(ent, 0);
2812		rcu_read_lock();
2813		memcg = mem_cgroup_lookup(id);
2814		if (memcg) {
2815			/*
2816			 * This recorded memcg can be obsolete one. So, avoid
2817			 * calling css_tryget
2818			 */
2819			if (!mem_cgroup_is_root(memcg))
2820				res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2821			mem_cgroup_swap_statistics(memcg, false);
2822			mem_cgroup_put(memcg);
2823		}
2824		rcu_read_unlock();
2825	}
2826	/*
2827	 * At swapin, we may charge account against cgroup which has no tasks.
2828	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2829	 * In that case, we need to call pre_destroy() again. check it here.
2830	 */
2831	cgroup_release_and_wakeup_rmdir(&ptr->css);
2832}
2833
2834void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2835{
2836	__mem_cgroup_commit_charge_swapin(page, ptr,
2837					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2838}
2839
2840void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2841{
2842	if (mem_cgroup_disabled())
2843		return;
2844	if (!mem)
2845		return;
2846	__mem_cgroup_cancel_charge(mem, 1);
2847}
2848
2849static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
2850				   unsigned int nr_pages,
2851				   const enum charge_type ctype)
2852{
2853	struct memcg_batch_info *batch = NULL;
2854	bool uncharge_memsw = true;
2855
2856	/* If swapout, usage of swap doesn't decrease */
2857	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2858		uncharge_memsw = false;
2859
2860	batch = &current->memcg_batch;
2861	/*
2862	 * In usual, we do css_get() when we remember memcg pointer.
2863	 * But in this case, we keep res->usage until end of a series of
2864	 * uncharges. Then, it's ok to ignore memcg's refcnt.
2865	 */
2866	if (!batch->memcg)
2867		batch->memcg = mem;
2868	/*
2869	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2870	 * In those cases, all pages freed continuously can be expected to be in
2871	 * the same cgroup and we have chance to coalesce uncharges.
2872	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2873	 * because we want to do uncharge as soon as possible.
2874	 */
2875
2876	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2877		goto direct_uncharge;
2878
2879	if (nr_pages > 1)
2880		goto direct_uncharge;
2881
2882	/*
2883	 * In typical case, batch->memcg == mem. This means we can
2884	 * merge a series of uncharges to an uncharge of res_counter.
2885	 * If not, we uncharge res_counter ony by one.
2886	 */
2887	if (batch->memcg != mem)
2888		goto direct_uncharge;
2889	/* remember freed charge and uncharge it later */
2890	batch->nr_pages++;
2891	if (uncharge_memsw)
2892		batch->memsw_nr_pages++;
2893	return;
2894direct_uncharge:
2895	res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
2896	if (uncharge_memsw)
2897		res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
2898	if (unlikely(batch->memcg != mem))
2899		memcg_oom_recover(mem);
2900	return;
2901}
2902
2903/*
2904 * uncharge if !page_mapped(page)
2905 */
2906static struct mem_cgroup *
2907__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2908{
2909	struct mem_cgroup *mem = NULL;
2910	unsigned int nr_pages = 1;
2911	struct page_cgroup *pc;
2912
2913	if (mem_cgroup_disabled())
2914		return NULL;
2915
2916	if (PageSwapCache(page))
2917		return NULL;
2918
2919	if (PageTransHuge(page)) {
2920		nr_pages <<= compound_order(page);
2921		VM_BUG_ON(!PageTransHuge(page));
2922	}
2923	/*
2924	 * Check if our page_cgroup is valid
2925	 */
2926	pc = lookup_page_cgroup(page);
2927	if (unlikely(!pc || !PageCgroupUsed(pc)))
2928		return NULL;
2929
2930	lock_page_cgroup(pc);
2931
2932	mem = pc->mem_cgroup;
2933
2934	if (!PageCgroupUsed(pc))
2935		goto unlock_out;
2936
2937	switch (ctype) {
2938	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2939	case MEM_CGROUP_CHARGE_TYPE_DROP:
2940		/* See mem_cgroup_prepare_migration() */
2941		if (page_mapped(page) || PageCgroupMigration(pc))
2942			goto unlock_out;
2943		break;
2944	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2945		if (!PageAnon(page)) {	/* Shared memory */
2946			if (page->mapping && !page_is_file_cache(page))
2947				goto unlock_out;
2948		} else if (page_mapped(page)) /* Anon */
2949				goto unlock_out;
2950		break;
2951	default:
2952		break;
2953	}
2954
2955	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
2956
2957	ClearPageCgroupUsed(pc);
2958	/*
2959	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2960	 * freed from LRU. This is safe because uncharged page is expected not
2961	 * to be reused (freed soon). Exception is SwapCache, it's handled by
2962	 * special functions.
2963	 */
2964
2965	unlock_page_cgroup(pc);
2966	/*
2967	 * even after unlock, we have mem->res.usage here and this memcg
2968	 * will never be freed.
2969	 */
2970	memcg_check_events(mem, page);
2971	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2972		mem_cgroup_swap_statistics(mem, true);
2973		mem_cgroup_get(mem);
2974	}
2975	if (!mem_cgroup_is_root(mem))
2976		mem_cgroup_do_uncharge(mem, nr_pages, ctype);
2977
2978	return mem;
2979
2980unlock_out:
2981	unlock_page_cgroup(pc);
2982	return NULL;
2983}
2984
2985void mem_cgroup_uncharge_page(struct page *page)
2986{
2987	/* early check. */
2988	if (page_mapped(page))
2989		return;
2990	if (page->mapping && !PageAnon(page))
2991		return;
2992	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2993}
2994
2995void mem_cgroup_uncharge_cache_page(struct page *page)
2996{
2997	VM_BUG_ON(page_mapped(page));
2998	VM_BUG_ON(page->mapping);
2999	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3000}
3001
3002/*
3003 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3004 * In that cases, pages are freed continuously and we can expect pages
3005 * are in the same memcg. All these calls itself limits the number of
3006 * pages freed at once, then uncharge_start/end() is called properly.
3007 * This may be called prural(2) times in a context,
3008 */
3009
3010void mem_cgroup_uncharge_start(void)
3011{
3012	current->memcg_batch.do_batch++;
3013	/* We can do nest. */
3014	if (current->memcg_batch.do_batch == 1) {
3015		current->memcg_batch.memcg = NULL;
3016		current->memcg_batch.nr_pages = 0;
3017		current->memcg_batch.memsw_nr_pages = 0;
3018	}
3019}
3020
3021void mem_cgroup_uncharge_end(void)
3022{
3023	struct memcg_batch_info *batch = &current->memcg_batch;
3024
3025	if (!batch->do_batch)
3026		return;
3027
3028	batch->do_batch--;
3029	if (batch->do_batch) /* If stacked, do nothing. */
3030		return;
3031
3032	if (!batch->memcg)
3033		return;
3034	/*
3035	 * This "batch->memcg" is valid without any css_get/put etc...
3036	 * bacause we hide charges behind us.
3037	 */
3038	if (batch->nr_pages)
3039		res_counter_uncharge(&batch->memcg->res,
3040				     batch->nr_pages * PAGE_SIZE);
3041	if (batch->memsw_nr_pages)
3042		res_counter_uncharge(&batch->memcg->memsw,
3043				     batch->memsw_nr_pages * PAGE_SIZE);
3044	memcg_oom_recover(batch->memcg);
3045	/* forget this pointer (for sanity check) */
3046	batch->memcg = NULL;
3047}
3048
3049#ifdef CONFIG_SWAP
3050/*
3051 * called after __delete_from_swap_cache() and drop "page" account.
3052 * memcg information is recorded to swap_cgroup of "ent"
3053 */
3054void
3055mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3056{
3057	struct mem_cgroup *memcg;
3058	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3059
3060	if (!swapout) /* this was a swap cache but the swap is unused ! */
3061		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3062
3063	memcg = __mem_cgroup_uncharge_common(page, ctype);
3064
3065	/*
3066	 * record memcg information,  if swapout && memcg != NULL,
3067	 * mem_cgroup_get() was called in uncharge().
3068	 */
3069	if (do_swap_account && swapout && memcg)
3070		swap_cgroup_record(ent, css_id(&memcg->css));
3071}
3072#endif
3073
3074#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3075/*
3076 * called from swap_entry_free(). remove record in swap_cgroup and
3077 * uncharge "memsw" account.
3078 */
3079void mem_cgroup_uncharge_swap(swp_entry_t ent)
3080{
3081	struct mem_cgroup *memcg;
3082	unsigned short id;
3083
3084	if (!do_swap_account)
3085		return;
3086
3087	id = swap_cgroup_record(ent, 0);
3088	rcu_read_lock();
3089	memcg = mem_cgroup_lookup(id);
3090	if (memcg) {
3091		/*
3092		 * We uncharge this because swap is freed.
3093		 * This memcg can be obsolete one. We avoid calling css_tryget
3094		 */
3095		if (!mem_cgroup_is_root(memcg))
3096			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3097		mem_cgroup_swap_statistics(memcg, false);
3098		mem_cgroup_put(memcg);
3099	}
3100	rcu_read_unlock();
3101}
3102
3103/**
3104 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3105 * @entry: swap entry to be moved
3106 * @from:  mem_cgroup which the entry is moved from
3107 * @to:  mem_cgroup which the entry is moved to
3108 * @need_fixup: whether we should fixup res_counters and refcounts.
3109 *
3110 * It succeeds only when the swap_cgroup's record for this entry is the same
3111 * as the mem_cgroup's id of @from.
3112 *
3113 * Returns 0 on success, -EINVAL on failure.
3114 *
3115 * The caller must have charged to @to, IOW, called res_counter_charge() about
3116 * both res and memsw, and called css_get().
3117 */
3118static int mem_cgroup_move_swap_account(swp_entry_t entry,
3119		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3120{
3121	unsigned short old_id, new_id;
3122
3123	old_id = css_id(&from->css);
3124	new_id = css_id(&to->css);
3125
3126	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3127		mem_cgroup_swap_statistics(from, false);
3128		mem_cgroup_swap_statistics(to, true);
3129		/*
3130		 * This function is only called from task migration context now.
3131		 * It postpones res_counter and refcount handling till the end
3132		 * of task migration(mem_cgroup_clear_mc()) for performance
3133		 * improvement. But we cannot postpone mem_cgroup_get(to)
3134		 * because if the process that has been moved to @to does
3135		 * swap-in, the refcount of @to might be decreased to 0.
3136		 */
3137		mem_cgroup_get(to);
3138		if (need_fixup) {
3139			if (!mem_cgroup_is_root(from))
3140				res_counter_uncharge(&from->memsw, PAGE_SIZE);
3141			mem_cgroup_put(from);
3142			/*
3143			 * we charged both to->res and to->memsw, so we should
3144			 * uncharge to->res.
3145			 */
3146			if (!mem_cgroup_is_root(to))
3147				res_counter_uncharge(&to->res, PAGE_SIZE);
3148		}
3149		return 0;
3150	}
3151	return -EINVAL;
3152}
3153#else
3154static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3155		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3156{
3157	return -EINVAL;
3158}
3159#endif
3160
3161/*
3162 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3163 * page belongs to.
3164 */
3165int mem_cgroup_prepare_migration(struct page *page,
3166	struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
3167{
3168	struct mem_cgroup *mem = NULL;
3169	struct page_cgroup *pc;
3170	enum charge_type ctype;
3171	int ret = 0;
3172
3173	*ptr = NULL;
3174
3175	VM_BUG_ON(PageTransHuge(page));
3176	if (mem_cgroup_disabled())
3177		return 0;
3178
3179	pc = lookup_page_cgroup(page);
3180	lock_page_cgroup(pc);
3181	if (PageCgroupUsed(pc)) {
3182		mem = pc->mem_cgroup;
3183		css_get(&mem->css);
3184		/*
3185		 * At migrating an anonymous page, its mapcount goes down
3186		 * to 0 and uncharge() will be called. But, even if it's fully
3187		 * unmapped, migration may fail and this page has to be
3188		 * charged again. We set MIGRATION flag here and delay uncharge
3189		 * until end_migration() is called
3190		 *
3191		 * Corner Case Thinking
3192		 * A)
3193		 * When the old page was mapped as Anon and it's unmap-and-freed
3194		 * while migration was ongoing.
3195		 * If unmap finds the old page, uncharge() of it will be delayed
3196		 * until end_migration(). If unmap finds a new page, it's
3197		 * uncharged when it make mapcount to be 1->0. If unmap code
3198		 * finds swap_migration_entry, the new page will not be mapped
3199		 * and end_migration() will find it(mapcount==0).
3200		 *
3201		 * B)
3202		 * When the old page was mapped but migraion fails, the kernel
3203		 * remaps it. A charge for it is kept by MIGRATION flag even
3204		 * if mapcount goes down to 0. We can do remap successfully
3205		 * without charging it again.
3206		 *
3207		 * C)
3208		 * The "old" page is under lock_page() until the end of
3209		 * migration, so, the old page itself will not be swapped-out.
3210		 * If the new page is swapped out before end_migraton, our
3211		 * hook to usual swap-out path will catch the event.
3212		 */
3213		if (PageAnon(page))
3214			SetPageCgroupMigration(pc);
3215	}
3216	unlock_page_cgroup(pc);
3217	/*
3218	 * If the page is not charged at this point,
3219	 * we return here.
3220	 */
3221	if (!mem)
3222		return 0;
3223
3224	*ptr = mem;
3225	ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
3226	css_put(&mem->css);/* drop extra refcnt */
3227	if (ret || *ptr == NULL) {
3228		if (PageAnon(page)) {
3229			lock_page_cgroup(pc);
3230			ClearPageCgroupMigration(pc);
3231			unlock_page_cgroup(pc);
3232			/*
3233			 * The old page may be fully unmapped while we kept it.
3234			 */
3235			mem_cgroup_uncharge_page(page);
3236		}
3237		return -ENOMEM;
3238	}
3239	/*
3240	 * We charge new page before it's used/mapped. So, even if unlock_page()
3241	 * is called before end_migration, we can catch all events on this new
3242	 * page. In the case new page is migrated but not remapped, new page's
3243	 * mapcount will be finally 0 and we call uncharge in end_migration().
3244	 */
3245	pc = lookup_page_cgroup(newpage);
3246	if (PageAnon(page))
3247		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3248	else if (page_is_file_cache(page))
3249		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3250	else
3251		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3252	__mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
3253	return ret;
3254}
3255
3256/* remove redundant charge if migration failed*/
3257void mem_cgroup_end_migration(struct mem_cgroup *mem,
3258	struct page *oldpage, struct page *newpage, bool migration_ok)
3259{
3260	struct page *used, *unused;
3261	struct page_cgroup *pc;
3262
3263	if (!mem)
3264		return;
3265	/* blocks rmdir() */
3266	cgroup_exclude_rmdir(&mem->css);
3267	if (!migration_ok) {
3268		used = oldpage;
3269		unused = newpage;
3270	} else {
3271		used = newpage;
3272		unused = oldpage;
3273	}
3274	/*
3275	 * We disallowed uncharge of pages under migration because mapcount
3276	 * of the page goes down to zero, temporarly.
3277	 * Clear the flag and check the page should be charged.
3278	 */
3279	pc = lookup_page_cgroup(oldpage);
3280	lock_page_cgroup(pc);
3281	ClearPageCgroupMigration(pc);
3282	unlock_page_cgroup(pc);
3283
3284	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3285
3286	/*
3287	 * If a page is a file cache, radix-tree replacement is very atomic
3288	 * and we can skip this check. When it was an Anon page, its mapcount
3289	 * goes down to 0. But because we added MIGRATION flage, it's not
3290	 * uncharged yet. There are several case but page->mapcount check
3291	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3292	 * check. (see prepare_charge() also)
3293	 */
3294	if (PageAnon(used))
3295		mem_cgroup_uncharge_page(used);
3296	/*
3297	 * At migration, we may charge account against cgroup which has no
3298	 * tasks.
3299	 * So, rmdir()->pre_destroy() can be called while we do this charge.
3300	 * In that case, we need to call pre_destroy() again. check it here.
3301	 */
3302	cgroup_release_and_wakeup_rmdir(&mem->css);
3303}
3304
3305/*
3306 * A call to try to shrink memory usage on charge failure at shmem's swapin.
3307 * Calling hierarchical_reclaim is not enough because we should update
3308 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
3309 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
3310 * not from the memcg which this page would be charged to.
3311 * try_charge_swapin does all of these works properly.
3312 */
3313int mem_cgroup_shmem_charge_fallback(struct page *page,
3314			    struct mm_struct *mm,
3315			    gfp_t gfp_mask)
3316{
3317	struct mem_cgroup *mem;
3318	int ret;
3319
3320	if (mem_cgroup_disabled())
3321		return 0;
3322
3323	ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
3324	if (!ret)
3325		mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
3326
3327	return ret;
3328}
3329
3330#ifdef CONFIG_DEBUG_VM
3331static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3332{
3333	struct page_cgroup *pc;
3334
3335	pc = lookup_page_cgroup(page);
3336	if (likely(pc) && PageCgroupUsed(pc))
3337		return pc;
3338	return NULL;
3339}
3340
3341bool mem_cgroup_bad_page_check(struct page *page)
3342{
3343	if (mem_cgroup_disabled())
3344		return false;
3345
3346	return lookup_page_cgroup_used(page) != NULL;
3347}
3348
3349void mem_cgroup_print_bad_page(struct page *page)
3350{
3351	struct page_cgroup *pc;
3352
3353	pc = lookup_page_cgroup_used(page);
3354	if (pc) {
3355		int ret = -1;
3356		char *path;
3357
3358		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3359		       pc, pc->flags, pc->mem_cgroup);
3360
3361		path = kmalloc(PATH_MAX, GFP_KERNEL);
3362		if (path) {
3363			rcu_read_lock();
3364			ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3365							path, PATH_MAX);
3366			rcu_read_unlock();
3367		}
3368
3369		printk(KERN_CONT "(%s)\n",
3370				(ret < 0) ? "cannot get the path" : path);
3371		kfree(path);
3372	}
3373}
3374#endif
3375
3376static DEFINE_MUTEX(set_limit_mutex);
3377
3378static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3379				unsigned long long val)
3380{
3381	int retry_count;
3382	u64 memswlimit, memlimit;
3383	int ret = 0;
3384	int children = mem_cgroup_count_children(memcg);
3385	u64 curusage, oldusage;
3386	int enlarge;
3387
3388	/*
3389	 * For keeping hierarchical_reclaim simple, how long we should retry
3390	 * is depends on callers. We set our retry-count to be function
3391	 * of # of children which we should visit in this loop.
3392	 */
3393	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3394
3395	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3396
3397	enlarge = 0;
3398	while (retry_count) {
3399		if (signal_pending(current)) {
3400			ret = -EINTR;
3401			break;
3402		}
3403		/*
3404		 * Rather than hide all in some function, I do this in
3405		 * open coded manner. You see what this really does.
3406		 * We have to guarantee mem->res.limit < mem->memsw.limit.
3407		 */
3408		mutex_lock(&set_limit_mutex);
3409		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3410		if (memswlimit < val) {
3411			ret = -EINVAL;
3412			mutex_unlock(&set_limit_mutex);
3413			break;
3414		}
3415
3416		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3417		if (memlimit < val)
3418			enlarge = 1;
3419
3420		ret = res_counter_set_limit(&memcg->res, val);
3421		if (!ret) {
3422			if (memswlimit == val)
3423				memcg->memsw_is_minimum = true;
3424			else
3425				memcg->memsw_is_minimum = false;
3426		}
3427		mutex_unlock(&set_limit_mutex);
3428
3429		if (!ret)
3430			break;
3431
3432		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3433						MEM_CGROUP_RECLAIM_SHRINK,
3434						NULL);
3435		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3436		/* Usage is reduced ? */
3437  		if (curusage >= oldusage)
3438			retry_count--;
3439		else
3440			oldusage = curusage;
3441	}
3442	if (!ret && enlarge)
3443		memcg_oom_recover(memcg);
3444
3445	return ret;
3446}
3447
3448static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3449					unsigned long long val)
3450{
3451	int retry_count;
3452	u64 memlimit, memswlimit, oldusage, curusage;
3453	int children = mem_cgroup_count_children(memcg);
3454	int ret = -EBUSY;
3455	int enlarge = 0;
3456
3457	/* see mem_cgroup_resize_res_limit */
3458 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3459	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3460	while (retry_count) {
3461		if (signal_pending(current)) {
3462			ret = -EINTR;
3463			break;
3464		}
3465		/*
3466		 * Rather than hide all in some function, I do this in
3467		 * open coded manner. You see what this really does.
3468		 * We have to guarantee mem->res.limit < mem->memsw.limit.
3469		 */
3470		mutex_lock(&set_limit_mutex);
3471		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3472		if (memlimit > val) {
3473			ret = -EINVAL;
3474			mutex_unlock(&set_limit_mutex);
3475			break;
3476		}
3477		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3478		if (memswlimit < val)
3479			enlarge = 1;
3480		ret = res_counter_set_limit(&memcg->memsw, val);
3481		if (!ret) {
3482			if (memlimit == val)
3483				memcg->memsw_is_minimum = true;
3484			else
3485				memcg->memsw_is_minimum = false;
3486		}
3487		mutex_unlock(&set_limit_mutex);
3488
3489		if (!ret)
3490			break;
3491
3492		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3493						MEM_CGROUP_RECLAIM_NOSWAP |
3494						MEM_CGROUP_RECLAIM_SHRINK,
3495						NULL);
3496		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3497		/* Usage is reduced ? */
3498		if (curusage >= oldusage)
3499			retry_count--;
3500		else
3501			oldusage = curusage;
3502	}
3503	if (!ret && enlarge)
3504		memcg_oom_recover(memcg);
3505	return ret;
3506}
3507
3508unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3509					    gfp_t gfp_mask,
3510					    unsigned long *total_scanned)
3511{
3512	unsigned long nr_reclaimed = 0;
3513	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3514	unsigned long reclaimed;
3515	int loop = 0;
3516	struct mem_cgroup_tree_per_zone *mctz;
3517	unsigned long long excess;
3518	unsigned long nr_scanned;
3519
3520	if (order > 0)
3521		return 0;
3522
3523	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3524	/*
3525	 * This loop can run a while, specially if mem_cgroup's continuously
3526	 * keep exceeding their soft limit and putting the system under
3527	 * pressure
3528	 */
3529	do {
3530		if (next_mz)
3531			mz = next_mz;
3532		else
3533			mz = mem_cgroup_largest_soft_limit_node(mctz);
3534		if (!mz)
3535			break;
3536
3537		nr_scanned = 0;
3538		reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3539						gfp_mask,
3540						MEM_CGROUP_RECLAIM_SOFT,
3541						&nr_scanned);
3542		nr_reclaimed += reclaimed;
3543		*total_scanned += nr_scanned;
3544		spin_lock(&mctz->lock);
3545
3546		/*
3547		 * If we failed to reclaim anything from this memory cgroup
3548		 * it is time to move on to the next cgroup
3549		 */
3550		next_mz = NULL;
3551		if (!reclaimed) {
3552			do {
3553				/*
3554				 * Loop until we find yet another one.
3555				 *
3556				 * By the time we get the soft_limit lock
3557				 * again, someone might have aded the
3558				 * group back on the RB tree. Iterate to
3559				 * make sure we get a different mem.
3560				 * mem_cgroup_largest_soft_limit_node returns
3561				 * NULL if no other cgroup is present on
3562				 * the tree
3563				 */
3564				next_mz =
3565				__mem_cgroup_largest_soft_limit_node(mctz);
3566				if (next_mz == mz)
3567					css_put(&next_mz->mem->css);
3568				else /* next_mz == NULL or other memcg */
3569					break;
3570			} while (1);
3571		}
3572		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3573		excess = res_counter_soft_limit_excess(&mz->mem->res);
3574		/*
3575		 * One school of thought says that we should not add
3576		 * back the node to the tree if reclaim returns 0.
3577		 * But our reclaim could return 0, simply because due
3578		 * to priority we are exposing a smaller subset of
3579		 * memory to reclaim from. Consider this as a longer
3580		 * term TODO.
3581		 */
3582		/* If excess == 0, no tree ops */
3583		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3584		spin_unlock(&mctz->lock);
3585		css_put(&mz->mem->css);
3586		loop++;
3587		/*
3588		 * Could not reclaim anything and there are no more
3589		 * mem cgroups to try or we seem to be looping without
3590		 * reclaiming anything.
3591		 */
3592		if (!nr_reclaimed &&
3593			(next_mz == NULL ||
3594			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3595			break;
3596	} while (!nr_reclaimed);
3597	if (next_mz)
3598		css_put(&next_mz->mem->css);
3599	return nr_reclaimed;
3600}
3601
3602/*
3603 * This routine traverse page_cgroup in given list and drop them all.
3604 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3605 */
3606static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
3607				int node, int zid, enum lru_list lru)
3608{
3609	struct zone *zone;
3610	struct mem_cgroup_per_zone *mz;
3611	struct page_cgroup *pc, *busy;
3612	unsigned long flags, loop;
3613	struct list_head *list;
3614	int ret = 0;
3615
3616	zone = &NODE_DATA(node)->node_zones[zid];
3617	mz = mem_cgroup_zoneinfo(mem, node, zid);
3618	list = &mz->lists[lru];
3619
3620	loop = MEM_CGROUP_ZSTAT(mz, lru);
3621	/* give some margin against EBUSY etc...*/
3622	loop += 256;
3623	busy = NULL;
3624	while (loop--) {
3625		struct page *page;
3626
3627		ret = 0;
3628		spin_lock_irqsave(&zone->lru_lock, flags);
3629		if (list_empty(list)) {
3630			spin_unlock_irqrestore(&zone->lru_lock, flags);
3631			break;
3632		}
3633		pc = list_entry(list->prev, struct page_cgroup, lru);
3634		if (busy == pc) {
3635			list_move(&pc->lru, list);
3636			busy = NULL;
3637			spin_unlock_irqrestore(&zone->lru_lock, flags);
3638			continue;
3639		}
3640		spin_unlock_irqrestore(&zone->lru_lock, flags);
3641
3642		page = lookup_cgroup_page(pc);
3643
3644		ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
3645		if (ret == -ENOMEM)
3646			break;
3647
3648		if (ret == -EBUSY || ret == -EINVAL) {
3649			/* found lock contention or "pc" is obsolete. */
3650			busy = pc;
3651			cond_resched();
3652		} else
3653			busy = NULL;
3654	}
3655
3656	if (!ret && !list_empty(list))
3657		return -EBUSY;
3658	return ret;
3659}
3660
3661/*
3662 * make mem_cgroup's charge to be 0 if there is no task.
3663 * This enables deleting this mem_cgroup.
3664 */
3665static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
3666{
3667	int ret;
3668	int node, zid, shrink;
3669	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3670	struct cgroup *cgrp = mem->css.cgroup;
3671
3672	css_get(&mem->css);
3673
3674	shrink = 0;
3675	/* should free all ? */
3676	if (free_all)
3677		goto try_to_free;
3678move_account:
3679	do {
3680		ret = -EBUSY;
3681		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3682			goto out;
3683		ret = -EINTR;
3684		if (signal_pending(current))
3685			goto out;
3686		/* This is for making all *used* pages to be on LRU. */
3687		lru_add_drain_all();
3688		drain_all_stock_sync();
3689		ret = 0;
3690		mem_cgroup_start_move(mem);
3691		for_each_node_state(node, N_HIGH_MEMORY) {
3692			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3693				enum lru_list l;
3694				for_each_lru(l) {
3695					ret = mem_cgroup_force_empty_list(mem,
3696							node, zid, l);
3697					if (ret)
3698						break;
3699				}
3700			}
3701			if (ret)
3702				break;
3703		}
3704		mem_cgroup_end_move(mem);
3705		memcg_oom_recover(mem);
3706		/* it seems parent cgroup doesn't have enough mem */
3707		if (ret == -ENOMEM)
3708			goto try_to_free;
3709		cond_resched();
3710	/* "ret" should also be checked to ensure all lists are empty. */
3711	} while (mem->res.usage > 0 || ret);
3712out:
3713	css_put(&mem->css);
3714	return ret;
3715
3716try_to_free:
3717	/* returns EBUSY if there is a task or if we come here twice. */
3718	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3719		ret = -EBUSY;
3720		goto out;
3721	}
3722	/* we call try-to-free pages for make this cgroup empty */
3723	lru_add_drain_all();
3724	/* try to free all pages in this cgroup */
3725	shrink = 1;
3726	while (nr_retries && mem->res.usage > 0) {
3727		int progress;
3728
3729		if (signal_pending(current)) {
3730			ret = -EINTR;
3731			goto out;
3732		}
3733		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3734						false, get_swappiness(mem));
3735		if (!progress) {
3736			nr_retries--;
3737			/* maybe some writeback is necessary */
3738			congestion_wait(BLK_RW_ASYNC, HZ/10);
3739		}
3740
3741	}
3742	lru_add_drain();
3743	/* try move_account...there may be some *locked* pages. */
3744	goto move_account;
3745}
3746
3747int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3748{
3749	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3750}
3751
3752
3753static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3754{
3755	return mem_cgroup_from_cont(cont)->use_hierarchy;
3756}
3757
3758static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3759					u64 val)
3760{
3761	int retval = 0;
3762	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3763	struct cgroup *parent = cont->parent;
3764	struct mem_cgroup *parent_mem = NULL;
3765
3766	if (parent)
3767		parent_mem = mem_cgroup_from_cont(parent);
3768
3769	cgroup_lock();
3770	/*
3771	 * If parent's use_hierarchy is set, we can't make any modifications
3772	 * in the child subtrees. If it is unset, then the change can
3773	 * occur, provided the current cgroup has no children.
3774	 *
3775	 * For the root cgroup, parent_mem is NULL, we allow value to be
3776	 * set if there are no children.
3777	 */
3778	if ((!parent_mem || !parent_mem->use_hierarchy) &&
3779				(val == 1 || val == 0)) {
3780		if (list_empty(&cont->children))
3781			mem->use_hierarchy = val;
3782		else
3783			retval = -EBUSY;
3784	} else
3785		retval = -EINVAL;
3786	cgroup_unlock();
3787
3788	return retval;
3789}
3790
3791
3792static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
3793					       enum mem_cgroup_stat_index idx)
3794{
3795	struct mem_cgroup *iter;
3796	long val = 0;
3797
3798	/* Per-cpu values can be negative, use a signed accumulator */
3799	for_each_mem_cgroup_tree(iter, mem)
3800		val += mem_cgroup_read_stat(iter, idx);
3801
3802	if (val < 0) /* race ? */
3803		val = 0;
3804	return val;
3805}
3806
3807static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3808{
3809	u64 val;
3810
3811	if (!mem_cgroup_is_root(mem)) {
3812		if (!swap)
3813			return res_counter_read_u64(&mem->res, RES_USAGE);
3814		else
3815			return res_counter_read_u64(&mem->memsw, RES_USAGE);
3816	}
3817
3818	val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
3819	val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
3820
3821	if (swap)
3822		val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3823
3824	return val << PAGE_SHIFT;
3825}
3826
3827static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3828{
3829	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3830	u64 val;
3831	int type, name;
3832
3833	type = MEMFILE_TYPE(cft->private);
3834	name = MEMFILE_ATTR(cft->private);
3835	switch (type) {
3836	case _MEM:
3837		if (name == RES_USAGE)
3838			val = mem_cgroup_usage(mem, false);
3839		else
3840			val = res_counter_read_u64(&mem->res, name);
3841		break;
3842	case _MEMSWAP:
3843		if (name == RES_USAGE)
3844			val = mem_cgroup_usage(mem, true);
3845		else
3846			val = res_counter_read_u64(&mem->memsw, name);
3847		break;
3848	default:
3849		BUG();
3850		break;
3851	}
3852	return val;
3853}
3854/*
3855 * The user of this function is...
3856 * RES_LIMIT.
3857 */
3858static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3859			    const char *buffer)
3860{
3861	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3862	int type, name;
3863	unsigned long long val;
3864	int ret;
3865
3866	type = MEMFILE_TYPE(cft->private);
3867	name = MEMFILE_ATTR(cft->private);
3868	switch (name) {
3869	case RES_LIMIT:
3870		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3871			ret = -EINVAL;
3872			break;
3873		}
3874		/* This function does all necessary parse...reuse it */
3875		ret = res_counter_memparse_write_strategy(buffer, &val);
3876		if (ret)
3877			break;
3878		if (type == _MEM)
3879			ret = mem_cgroup_resize_limit(memcg, val);
3880		else
3881			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3882		break;
3883	case RES_SOFT_LIMIT:
3884		ret = res_counter_memparse_write_strategy(buffer, &val);
3885		if (ret)
3886			break;
3887		/*
3888		 * For memsw, soft limits are hard to implement in terms
3889		 * of semantics, for now, we support soft limits for
3890		 * control without swap
3891		 */
3892		if (type == _MEM)
3893			ret = res_counter_set_soft_limit(&memcg->res, val);
3894		else
3895			ret = -EINVAL;
3896		break;
3897	default:
3898		ret = -EINVAL; /* should be BUG() ? */
3899		break;
3900	}
3901	return ret;
3902}
3903
3904static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3905		unsigned long long *mem_limit, unsigned long long *memsw_limit)
3906{
3907	struct cgroup *cgroup;
3908	unsigned long long min_limit, min_memsw_limit, tmp;
3909
3910	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3911	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3912	cgroup = memcg->css.cgroup;
3913	if (!memcg->use_hierarchy)
3914		goto out;
3915
3916	while (cgroup->parent) {
3917		cgroup = cgroup->parent;
3918		memcg = mem_cgroup_from_cont(cgroup);
3919		if (!memcg->use_hierarchy)
3920			break;
3921		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3922		min_limit = min(min_limit, tmp);
3923		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3924		min_memsw_limit = min(min_memsw_limit, tmp);
3925	}
3926out:
3927	*mem_limit = min_limit;
3928	*memsw_limit = min_memsw_limit;
3929	return;
3930}
3931
3932static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3933{
3934	struct mem_cgroup *mem;
3935	int type, name;
3936
3937	mem = mem_cgroup_from_cont(cont);
3938	type = MEMFILE_TYPE(event);
3939	name = MEMFILE_ATTR(event);
3940	switch (name) {
3941	case RES_MAX_USAGE:
3942		if (type == _MEM)
3943			res_counter_reset_max(&mem->res);
3944		else
3945			res_counter_reset_max(&mem->memsw);
3946		break;
3947	case RES_FAILCNT:
3948		if (type == _MEM)
3949			res_counter_reset_failcnt(&mem->res);
3950		else
3951			res_counter_reset_failcnt(&mem->memsw);
3952		break;
3953	}
3954
3955	return 0;
3956}
3957
3958static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3959					struct cftype *cft)
3960{
3961	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3962}
3963
3964#ifdef CONFIG_MMU
3965static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3966					struct cftype *cft, u64 val)
3967{
3968	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3969
3970	if (val >= (1 << NR_MOVE_TYPE))
3971		return -EINVAL;
3972	/*
3973	 * We check this value several times in both in can_attach() and
3974	 * attach(), so we need cgroup lock to prevent this value from being
3975	 * inconsistent.
3976	 */
3977	cgroup_lock();
3978	mem->move_charge_at_immigrate = val;
3979	cgroup_unlock();
3980
3981	return 0;
3982}
3983#else
3984static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3985					struct cftype *cft, u64 val)
3986{
3987	return -ENOSYS;
3988}
3989#endif
3990
3991
3992/* For read statistics */
3993enum {
3994	MCS_CACHE,
3995	MCS_RSS,
3996	MCS_FILE_MAPPED,
3997	MCS_PGPGIN,
3998	MCS_PGPGOUT,
3999	MCS_SWAP,
4000	MCS_PGFAULT,
4001	MCS_PGMAJFAULT,
4002	MCS_INACTIVE_ANON,
4003	MCS_ACTIVE_ANON,
4004	MCS_INACTIVE_FILE,
4005	MCS_ACTIVE_FILE,
4006	MCS_UNEVICTABLE,
4007	NR_MCS_STAT,
4008};
4009
4010struct mcs_total_stat {
4011	s64 stat[NR_MCS_STAT];
4012};
4013
4014struct {
4015	char *local_name;
4016	char *total_name;
4017} memcg_stat_strings[NR_MCS_STAT] = {
4018	{"cache", "total_cache"},
4019	{"rss", "total_rss"},
4020	{"mapped_file", "total_mapped_file"},
4021	{"pgpgin", "total_pgpgin"},
4022	{"pgpgout", "total_pgpgout"},
4023	{"swap", "total_swap"},
4024	{"pgfault", "total_pgfault"},
4025	{"pgmajfault", "total_pgmajfault"},
4026	{"inactive_anon", "total_inactive_anon"},
4027	{"active_anon", "total_active_anon"},
4028	{"inactive_file", "total_inactive_file"},
4029	{"active_file", "total_active_file"},
4030	{"unevictable", "total_unevictable"}
4031};
4032
4033
4034static void
4035mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
4036{
4037	s64 val;
4038
4039	/* per cpu stat */
4040	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
4041	s->stat[MCS_CACHE] += val * PAGE_SIZE;
4042	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
4043	s->stat[MCS_RSS] += val * PAGE_SIZE;
4044	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
4045	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
4046	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
4047	s->stat[MCS_PGPGIN] += val;
4048	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
4049	s->stat[MCS_PGPGOUT] += val;
4050	if (do_swap_account) {
4051		val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
4052		s->stat[MCS_SWAP] += val * PAGE_SIZE;
4053	}
4054	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
4055	s->stat[MCS_PGFAULT] += val;
4056	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
4057	s->stat[MCS_PGMAJFAULT] += val;
4058
4059	/* per zone stat */
4060	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
4061	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
4062	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
4063	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
4064	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
4065	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
4066	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
4067	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
4068	val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
4069	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
4070}
4071
4072static void
4073mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
4074{
4075	struct mem_cgroup *iter;
4076
4077	for_each_mem_cgroup_tree(iter, mem)
4078		mem_cgroup_get_local_stat(iter, s);
4079}
4080
4081#ifdef CONFIG_NUMA
4082static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4083{
4084	int nid;
4085	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4086	unsigned long node_nr;
4087	struct cgroup *cont = m->private;
4088	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4089
4090	total_nr = mem_cgroup_nr_lru_pages(mem_cont);
4091	seq_printf(m, "total=%lu", total_nr);
4092	for_each_node_state(nid, N_HIGH_MEMORY) {
4093		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid);
4094		seq_printf(m, " N%d=%lu", nid, node_nr);
4095	}
4096	seq_putc(m, '\n');
4097
4098	file_nr = mem_cgroup_nr_file_lru_pages(mem_cont);
4099	seq_printf(m, "file=%lu", file_nr);
4100	for_each_node_state(nid, N_HIGH_MEMORY) {
4101		node_nr = mem_cgroup_node_nr_file_lru_pages(mem_cont, nid);
4102		seq_printf(m, " N%d=%lu", nid, node_nr);
4103	}
4104	seq_putc(m, '\n');
4105
4106	anon_nr = mem_cgroup_nr_anon_lru_pages(mem_cont);
4107	seq_printf(m, "anon=%lu", anon_nr);
4108	for_each_node_state(nid, N_HIGH_MEMORY) {
4109		node_nr = mem_cgroup_node_nr_anon_lru_pages(mem_cont, nid);
4110		seq_printf(m, " N%d=%lu", nid, node_nr);
4111	}
4112	seq_putc(m, '\n');
4113
4114	unevictable_nr = mem_cgroup_nr_unevictable_lru_pages(mem_cont);
4115	seq_printf(m, "unevictable=%lu", unevictable_nr);
4116	for_each_node_state(nid, N_HIGH_MEMORY) {
4117		node_nr = mem_cgroup_node_nr_unevictable_lru_pages(mem_cont,
4118									nid);
4119		seq_printf(m, " N%d=%lu", nid, node_nr);
4120	}
4121	seq_putc(m, '\n');
4122	return 0;
4123}
4124#endif /* CONFIG_NUMA */
4125
4126static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4127				 struct cgroup_map_cb *cb)
4128{
4129	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4130	struct mcs_total_stat mystat;
4131	int i;
4132
4133	memset(&mystat, 0, sizeof(mystat));
4134	mem_cgroup_get_local_stat(mem_cont, &mystat);
4135
4136
4137	for (i = 0; i < NR_MCS_STAT; i++) {
4138		if (i == MCS_SWAP && !do_swap_account)
4139			continue;
4140		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
4141	}
4142
4143	/* Hierarchical information */
4144	{
4145		unsigned long long limit, memsw_limit;
4146		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
4147		cb->fill(cb, "hierarchical_memory_limit", limit);
4148		if (do_swap_account)
4149			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4150	}
4151
4152	memset(&mystat, 0, sizeof(mystat));
4153	mem_cgroup_get_total_stat(mem_cont, &mystat);
4154	for (i = 0; i < NR_MCS_STAT; i++) {
4155		if (i == MCS_SWAP && !do_swap_account)
4156			continue;
4157		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
4158	}
4159
4160#ifdef CONFIG_DEBUG_VM
4161	cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
4162
4163	{
4164		int nid, zid;
4165		struct mem_cgroup_per_zone *mz;
4166		unsigned long recent_rotated[2] = {0, 0};
4167		unsigned long recent_scanned[2] = {0, 0};
4168
4169		for_each_online_node(nid)
4170			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4171				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
4172
4173				recent_rotated[0] +=
4174					mz->reclaim_stat.recent_rotated[0];
4175				recent_rotated[1] +=
4176					mz->reclaim_stat.recent_rotated[1];
4177				recent_scanned[0] +=
4178					mz->reclaim_stat.recent_scanned[0];
4179				recent_scanned[1] +=
4180					mz->reclaim_stat.recent_scanned[1];
4181			}
4182		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
4183		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
4184		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
4185		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
4186	}
4187#endif
4188
4189	return 0;
4190}
4191
4192static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4193{
4194	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4195
4196	return get_swappiness(memcg);
4197}
4198
4199static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4200				       u64 val)
4201{
4202	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4203	struct mem_cgroup *parent;
4204
4205	if (val > 100)
4206		return -EINVAL;
4207
4208	if (cgrp->parent == NULL)
4209		return -EINVAL;
4210
4211	parent = mem_cgroup_from_cont(cgrp->parent);
4212
4213	cgroup_lock();
4214
4215	/* If under hierarchy, only empty-root can set this value */
4216	if ((parent->use_hierarchy) ||
4217	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4218		cgroup_unlock();
4219		return -EINVAL;
4220	}
4221
4222	memcg->swappiness = val;
4223
4224	cgroup_unlock();
4225
4226	return 0;
4227}
4228
4229static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4230{
4231	struct mem_cgroup_threshold_ary *t;
4232	u64 usage;
4233	int i;
4234
4235	rcu_read_lock();
4236	if (!swap)
4237		t = rcu_dereference(memcg->thresholds.primary);
4238	else
4239		t = rcu_dereference(memcg->memsw_thresholds.primary);
4240
4241	if (!t)
4242		goto unlock;
4243
4244	usage = mem_cgroup_usage(memcg, swap);
4245
4246	/*
4247	 * current_threshold points to threshold just below usage.
4248	 * If it's not true, a threshold was crossed after last
4249	 * call of __mem_cgroup_threshold().
4250	 */
4251	i = t->current_threshold;
4252
4253	/*
4254	 * Iterate backward over array of thresholds starting from
4255	 * current_threshold and check if a threshold is crossed.
4256	 * If none of thresholds below usage is crossed, we read
4257	 * only one element of the array here.
4258	 */
4259	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4260		eventfd_signal(t->entries[i].eventfd, 1);
4261
4262	/* i = current_threshold + 1 */
4263	i++;
4264
4265	/*
4266	 * Iterate forward over array of thresholds starting from
4267	 * current_threshold+1 and check if a threshold is crossed.
4268	 * If none of thresholds above usage is crossed, we read
4269	 * only one element of the array here.
4270	 */
4271	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4272		eventfd_signal(t->entries[i].eventfd, 1);
4273
4274	/* Update current_threshold */
4275	t->current_threshold = i - 1;
4276unlock:
4277	rcu_read_unlock();
4278}
4279
4280static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4281{
4282	while (memcg) {
4283		__mem_cgroup_threshold(memcg, false);
4284		if (do_swap_account)
4285			__mem_cgroup_threshold(memcg, true);
4286
4287		memcg = parent_mem_cgroup(memcg);
4288	}
4289}
4290
4291static int compare_thresholds(const void *a, const void *b)
4292{
4293	const struct mem_cgroup_threshold *_a = a;
4294	const struct mem_cgroup_threshold *_b = b;
4295
4296	return _a->threshold - _b->threshold;
4297}
4298
4299static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
4300{
4301	struct mem_cgroup_eventfd_list *ev;
4302
4303	list_for_each_entry(ev, &mem->oom_notify, list)
4304		eventfd_signal(ev->eventfd, 1);
4305	return 0;
4306}
4307
4308static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
4309{
4310	struct mem_cgroup *iter;
4311
4312	for_each_mem_cgroup_tree(iter, mem)
4313		mem_cgroup_oom_notify_cb(iter);
4314}
4315
4316static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4317	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4318{
4319	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4320	struct mem_cgroup_thresholds *thresholds;
4321	struct mem_cgroup_threshold_ary *new;
4322	int type = MEMFILE_TYPE(cft->private);
4323	u64 threshold, usage;
4324	int i, size, ret;
4325
4326	ret = res_counter_memparse_write_strategy(args, &threshold);
4327	if (ret)
4328		return ret;
4329
4330	mutex_lock(&memcg->thresholds_lock);
4331
4332	if (type == _MEM)
4333		thresholds = &memcg->thresholds;
4334	else if (type == _MEMSWAP)
4335		thresholds = &memcg->memsw_thresholds;
4336	else
4337		BUG();
4338
4339	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4340
4341	/* Check if a threshold crossed before adding a new one */
4342	if (thresholds->primary)
4343		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4344
4345	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4346
4347	/* Allocate memory for new array of thresholds */
4348	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4349			GFP_KERNEL);
4350	if (!new) {
4351		ret = -ENOMEM;
4352		goto unlock;
4353	}
4354	new->size = size;
4355
4356	/* Copy thresholds (if any) to new array */
4357	if (thresholds->primary) {
4358		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4359				sizeof(struct mem_cgroup_threshold));
4360	}
4361
4362	/* Add new threshold */
4363	new->entries[size - 1].eventfd = eventfd;
4364	new->entries[size - 1].threshold = threshold;
4365
4366	/* Sort thresholds. Registering of new threshold isn't time-critical */
4367	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4368			compare_thresholds, NULL);
4369
4370	/* Find current threshold */
4371	new->current_threshold = -1;
4372	for (i = 0; i < size; i++) {
4373		if (new->entries[i].threshold < usage) {
4374			/*
4375			 * new->current_threshold will not be used until
4376			 * rcu_assign_pointer(), so it's safe to increment
4377			 * it here.
4378			 */
4379			++new->current_threshold;
4380		}
4381	}
4382
4383	/* Free old spare buffer and save old primary buffer as spare */
4384	kfree(thresholds->spare);
4385	thresholds->spare = thresholds->primary;
4386
4387	rcu_assign_pointer(thresholds->primary, new);
4388
4389	/* To be sure that nobody uses thresholds */
4390	synchronize_rcu();
4391
4392unlock:
4393	mutex_unlock(&memcg->thresholds_lock);
4394
4395	return ret;
4396}
4397
4398static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4399	struct cftype *cft, struct eventfd_ctx *eventfd)
4400{
4401	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4402	struct mem_cgroup_thresholds *thresholds;
4403	struct mem_cgroup_threshold_ary *new;
4404	int type = MEMFILE_TYPE(cft->private);
4405	u64 usage;
4406	int i, j, size;
4407
4408	mutex_lock(&memcg->thresholds_lock);
4409	if (type == _MEM)
4410		thresholds = &memcg->thresholds;
4411	else if (type == _MEMSWAP)
4412		thresholds = &memcg->memsw_thresholds;
4413	else
4414		BUG();
4415
4416	/*
4417	 * Something went wrong if we trying to unregister a threshold
4418	 * if we don't have thresholds
4419	 */
4420	BUG_ON(!thresholds);
4421
4422	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4423
4424	/* Check if a threshold crossed before removing */
4425	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4426
4427	/* Calculate new number of threshold */
4428	size = 0;
4429	for (i = 0; i < thresholds->primary->size; i++) {
4430		if (thresholds->primary->entries[i].eventfd != eventfd)
4431			size++;
4432	}
4433
4434	new = thresholds->spare;
4435
4436	/* Set thresholds array to NULL if we don't have thresholds */
4437	if (!size) {
4438		kfree(new);
4439		new = NULL;
4440		goto swap_buffers;
4441	}
4442
4443	new->size = size;
4444
4445	/* Copy thresholds and find current threshold */
4446	new->current_threshold = -1;
4447	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4448		if (thresholds->primary->entries[i].eventfd == eventfd)
4449			continue;
4450
4451		new->entries[j] = thresholds->primary->entries[i];
4452		if (new->entries[j].threshold < usage) {
4453			/*
4454			 * new->current_threshold will not be used
4455			 * until rcu_assign_pointer(), so it's safe to increment
4456			 * it here.
4457			 */
4458			++new->current_threshold;
4459		}
4460		j++;
4461	}
4462
4463swap_buffers:
4464	/* Swap primary and spare array */
4465	thresholds->spare = thresholds->primary;
4466	rcu_assign_pointer(thresholds->primary, new);
4467
4468	/* To be sure that nobody uses thresholds */
4469	synchronize_rcu();
4470
4471	mutex_unlock(&memcg->thresholds_lock);
4472}
4473
4474static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4475	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4476{
4477	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4478	struct mem_cgroup_eventfd_list *event;
4479	int type = MEMFILE_TYPE(cft->private);
4480
4481	BUG_ON(type != _OOM_TYPE);
4482	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4483	if (!event)
4484		return -ENOMEM;
4485
4486	mutex_lock(&memcg_oom_mutex);
4487
4488	event->eventfd = eventfd;
4489	list_add(&event->list, &memcg->oom_notify);
4490
4491	/* already in OOM ? */
4492	if (atomic_read(&memcg->oom_lock))
4493		eventfd_signal(eventfd, 1);
4494	mutex_unlock(&memcg_oom_mutex);
4495
4496	return 0;
4497}
4498
4499static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4500	struct cftype *cft, struct eventfd_ctx *eventfd)
4501{
4502	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4503	struct mem_cgroup_eventfd_list *ev, *tmp;
4504	int type = MEMFILE_TYPE(cft->private);
4505
4506	BUG_ON(type != _OOM_TYPE);
4507
4508	mutex_lock(&memcg_oom_mutex);
4509
4510	list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
4511		if (ev->eventfd == eventfd) {
4512			list_del(&ev->list);
4513			kfree(ev);
4514		}
4515	}
4516
4517	mutex_unlock(&memcg_oom_mutex);
4518}
4519
4520static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4521	struct cftype *cft,  struct cgroup_map_cb *cb)
4522{
4523	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4524
4525	cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
4526
4527	if (atomic_read(&mem->oom_lock))
4528		cb->fill(cb, "under_oom", 1);
4529	else
4530		cb->fill(cb, "under_oom", 0);
4531	return 0;
4532}
4533
4534static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4535	struct cftype *cft, u64 val)
4536{
4537	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4538	struct mem_cgroup *parent;
4539
4540	/* cannot set to root cgroup and only 0 and 1 are allowed */
4541	if (!cgrp->parent || !((val == 0) || (val == 1)))
4542		return -EINVAL;
4543
4544	parent = mem_cgroup_from_cont(cgrp->parent);
4545
4546	cgroup_lock();
4547	/* oom-kill-disable is a flag for subhierarchy. */
4548	if ((parent->use_hierarchy) ||
4549	    (mem->use_hierarchy && !list_empty(&cgrp->children))) {
4550		cgroup_unlock();
4551		return -EINVAL;
4552	}
4553	mem->oom_kill_disable = val;
4554	if (!val)
4555		memcg_oom_recover(mem);
4556	cgroup_unlock();
4557	return 0;
4558}
4559
4560#ifdef CONFIG_NUMA
4561static const struct file_operations mem_control_numa_stat_file_operations = {
4562	.read = seq_read,
4563	.llseek = seq_lseek,
4564	.release = single_release,
4565};
4566
4567static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4568{
4569	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4570
4571	file->f_op = &mem_control_numa_stat_file_operations;
4572	return single_open(file, mem_control_numa_stat_show, cont);
4573}
4574#endif /* CONFIG_NUMA */
4575
4576static struct cftype mem_cgroup_files[] = {
4577	{
4578		.name = "usage_in_bytes",
4579		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4580		.read_u64 = mem_cgroup_read,
4581		.register_event = mem_cgroup_usage_register_event,
4582		.unregister_event = mem_cgroup_usage_unregister_event,
4583	},
4584	{
4585		.name = "max_usage_in_bytes",
4586		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4587		.trigger = mem_cgroup_reset,
4588		.read_u64 = mem_cgroup_read,
4589	},
4590	{
4591		.name = "limit_in_bytes",
4592		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4593		.write_string = mem_cgroup_write,
4594		.read_u64 = mem_cgroup_read,
4595	},
4596	{
4597		.name = "soft_limit_in_bytes",
4598		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4599		.write_string = mem_cgroup_write,
4600		.read_u64 = mem_cgroup_read,
4601	},
4602	{
4603		.name = "failcnt",
4604		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4605		.trigger = mem_cgroup_reset,
4606		.read_u64 = mem_cgroup_read,
4607	},
4608	{
4609		.name = "stat",
4610		.read_map = mem_control_stat_show,
4611	},
4612	{
4613		.name = "force_empty",
4614		.trigger = mem_cgroup_force_empty_write,
4615	},
4616	{
4617		.name = "use_hierarchy",
4618		.write_u64 = mem_cgroup_hierarchy_write,
4619		.read_u64 = mem_cgroup_hierarchy_read,
4620	},
4621	{
4622		.name = "swappiness",
4623		.read_u64 = mem_cgroup_swappiness_read,
4624		.write_u64 = mem_cgroup_swappiness_write,
4625	},
4626	{
4627		.name = "move_charge_at_immigrate",
4628		.read_u64 = mem_cgroup_move_charge_read,
4629		.write_u64 = mem_cgroup_move_charge_write,
4630	},
4631	{
4632		.name = "oom_control",
4633		.read_map = mem_cgroup_oom_control_read,
4634		.write_u64 = mem_cgroup_oom_control_write,
4635		.register_event = mem_cgroup_oom_register_event,
4636		.unregister_event = mem_cgroup_oom_unregister_event,
4637		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4638	},
4639#ifdef CONFIG_NUMA
4640	{
4641		.name = "numa_stat",
4642		.open = mem_control_numa_stat_open,
4643	},
4644#endif
4645};
4646
4647#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4648static struct cftype memsw_cgroup_files[] = {
4649	{
4650		.name = "memsw.usage_in_bytes",
4651		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4652		.read_u64 = mem_cgroup_read,
4653		.register_event = mem_cgroup_usage_register_event,
4654		.unregister_event = mem_cgroup_usage_unregister_event,
4655	},
4656	{
4657		.name = "memsw.max_usage_in_bytes",
4658		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4659		.trigger = mem_cgroup_reset,
4660		.read_u64 = mem_cgroup_read,
4661	},
4662	{
4663		.name = "memsw.limit_in_bytes",
4664		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4665		.write_string = mem_cgroup_write,
4666		.read_u64 = mem_cgroup_read,
4667	},
4668	{
4669		.name = "memsw.failcnt",
4670		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4671		.trigger = mem_cgroup_reset,
4672		.read_u64 = mem_cgroup_read,
4673	},
4674};
4675
4676static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4677{
4678	if (!do_swap_account)
4679		return 0;
4680	return cgroup_add_files(cont, ss, memsw_cgroup_files,
4681				ARRAY_SIZE(memsw_cgroup_files));
4682};
4683#else
4684static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4685{
4686	return 0;
4687}
4688#endif
4689
4690static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4691{
4692	struct mem_cgroup_per_node *pn;
4693	struct mem_cgroup_per_zone *mz;
4694	enum lru_list l;
4695	int zone, tmp = node;
4696	/*
4697	 * This routine is called against possible nodes.
4698	 * But it's BUG to call kmalloc() against offline node.
4699	 *
4700	 * TODO: this routine can waste much memory for nodes which will
4701	 *       never be onlined. It's better to use memory hotplug callback
4702	 *       function.
4703	 */
4704	if (!node_state(node, N_NORMAL_MEMORY))
4705		tmp = -1;
4706	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4707	if (!pn)
4708		return 1;
4709
4710	mem->info.nodeinfo[node] = pn;
4711	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4712		mz = &pn->zoneinfo[zone];
4713		for_each_lru(l)
4714			INIT_LIST_HEAD(&mz->lists[l]);
4715		mz->usage_in_excess = 0;
4716		mz->on_tree = false;
4717		mz->mem = mem;
4718	}
4719	return 0;
4720}
4721
4722static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4723{
4724	kfree(mem->info.nodeinfo[node]);
4725}
4726
4727static struct mem_cgroup *mem_cgroup_alloc(void)
4728{
4729	struct mem_cgroup *mem;
4730	int size = sizeof(struct mem_cgroup);
4731
4732	/* Can be very big if MAX_NUMNODES is very big */
4733	if (size < PAGE_SIZE)
4734		mem = kzalloc(size, GFP_KERNEL);
4735	else
4736		mem = vzalloc(size);
4737
4738	if (!mem)
4739		return NULL;
4740
4741	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4742	if (!mem->stat)
4743		goto out_free;
4744	spin_lock_init(&mem->pcp_counter_lock);
4745	return mem;
4746
4747out_free:
4748	if (size < PAGE_SIZE)
4749		kfree(mem);
4750	else
4751		vfree(mem);
4752	return NULL;
4753}
4754
4755/*
4756 * At destroying mem_cgroup, references from swap_cgroup can remain.
4757 * (scanning all at force_empty is too costly...)
4758 *
4759 * Instead of clearing all references at force_empty, we remember
4760 * the number of reference from swap_cgroup and free mem_cgroup when
4761 * it goes down to 0.
4762 *
4763 * Removal of cgroup itself succeeds regardless of refs from swap.
4764 */
4765
4766static void __mem_cgroup_free(struct mem_cgroup *mem)
4767{
4768	int node;
4769
4770	mem_cgroup_remove_from_trees(mem);
4771	free_css_id(&mem_cgroup_subsys, &mem->css);
4772
4773	for_each_node_state(node, N_POSSIBLE)
4774		free_mem_cgroup_per_zone_info(mem, node);
4775
4776	free_percpu(mem->stat);
4777	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4778		kfree(mem);
4779	else
4780		vfree(mem);
4781}
4782
4783static void mem_cgroup_get(struct mem_cgroup *mem)
4784{
4785	atomic_inc(&mem->refcnt);
4786}
4787
4788static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4789{
4790	if (atomic_sub_and_test(count, &mem->refcnt)) {
4791		struct mem_cgroup *parent = parent_mem_cgroup(mem);
4792		__mem_cgroup_free(mem);
4793		if (parent)
4794			mem_cgroup_put(parent);
4795	}
4796}
4797
4798static void mem_cgroup_put(struct mem_cgroup *mem)
4799{
4800	__mem_cgroup_put(mem, 1);
4801}
4802
4803/*
4804 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4805 */
4806static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4807{
4808	if (!mem->res.parent)
4809		return NULL;
4810	return mem_cgroup_from_res_counter(mem->res.parent, res);
4811}
4812
4813#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4814static void __init enable_swap_cgroup(void)
4815{
4816	if (!mem_cgroup_disabled() && really_do_swap_account)
4817		do_swap_account = 1;
4818}
4819#else
4820static void __init enable_swap_cgroup(void)
4821{
4822}
4823#endif
4824
4825static int mem_cgroup_soft_limit_tree_init(void)
4826{
4827	struct mem_cgroup_tree_per_node *rtpn;
4828	struct mem_cgroup_tree_per_zone *rtpz;
4829	int tmp, node, zone;
4830
4831	for_each_node_state(node, N_POSSIBLE) {
4832		tmp = node;
4833		if (!node_state(node, N_NORMAL_MEMORY))
4834			tmp = -1;
4835		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4836		if (!rtpn)
4837			return 1;
4838
4839		soft_limit_tree.rb_tree_per_node[node] = rtpn;
4840
4841		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4842			rtpz = &rtpn->rb_tree_per_zone[zone];
4843			rtpz->rb_root = RB_ROOT;
4844			spin_lock_init(&rtpz->lock);
4845		}
4846	}
4847	return 0;
4848}
4849
4850static struct cgroup_subsys_state * __ref
4851mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4852{
4853	struct mem_cgroup *mem, *parent;
4854	long error = -ENOMEM;
4855	int node;
4856
4857	mem = mem_cgroup_alloc();
4858	if (!mem)
4859		return ERR_PTR(error);
4860
4861	for_each_node_state(node, N_POSSIBLE)
4862		if (alloc_mem_cgroup_per_zone_info(mem, node))
4863			goto free_out;
4864
4865	/* root ? */
4866	if (cont->parent == NULL) {
4867		int cpu;
4868		enable_swap_cgroup();
4869		parent = NULL;
4870		root_mem_cgroup = mem;
4871		if (mem_cgroup_soft_limit_tree_init())
4872			goto free_out;
4873		for_each_possible_cpu(cpu) {
4874			struct memcg_stock_pcp *stock =
4875						&per_cpu(memcg_stock, cpu);
4876			INIT_WORK(&stock->work, drain_local_stock);
4877		}
4878		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4879	} else {
4880		parent = mem_cgroup_from_cont(cont->parent);
4881		mem->use_hierarchy = parent->use_hierarchy;
4882		mem->oom_kill_disable = parent->oom_kill_disable;
4883	}
4884
4885	if (parent && parent->use_hierarchy) {
4886		res_counter_init(&mem->res, &parent->res);
4887		res_counter_init(&mem->memsw, &parent->memsw);
4888		/*
4889		 * We increment refcnt of the parent to ensure that we can
4890		 * safely access it on res_counter_charge/uncharge.
4891		 * This refcnt will be decremented when freeing this
4892		 * mem_cgroup(see mem_cgroup_put).
4893		 */
4894		mem_cgroup_get(parent);
4895	} else {
4896		res_counter_init(&mem->res, NULL);
4897		res_counter_init(&mem->memsw, NULL);
4898	}
4899	mem->last_scanned_child = 0;
4900	mem->last_scanned_node = MAX_NUMNODES;
4901	INIT_LIST_HEAD(&mem->oom_notify);
4902
4903	if (parent)
4904		mem->swappiness = get_swappiness(parent);
4905	atomic_set(&mem->refcnt, 1);
4906	mem->move_charge_at_immigrate = 0;
4907	mutex_init(&mem->thresholds_lock);
4908	return &mem->css;
4909free_out:
4910	__mem_cgroup_free(mem);
4911	root_mem_cgroup = NULL;
4912	return ERR_PTR(error);
4913}
4914
4915static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4916					struct cgroup *cont)
4917{
4918	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4919
4920	return mem_cgroup_force_empty(mem, false);
4921}
4922
4923static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4924				struct cgroup *cont)
4925{
4926	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4927
4928	mem_cgroup_put(mem);
4929}
4930
4931static int mem_cgroup_populate(struct cgroup_subsys *ss,
4932				struct cgroup *cont)
4933{
4934	int ret;
4935
4936	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4937				ARRAY_SIZE(mem_cgroup_files));
4938
4939	if (!ret)
4940		ret = register_memsw_files(cont, ss);
4941	return ret;
4942}
4943
4944#ifdef CONFIG_MMU
4945/* Handlers for move charge at task migration. */
4946#define PRECHARGE_COUNT_AT_ONCE	256
4947static int mem_cgroup_do_precharge(unsigned long count)
4948{
4949	int ret = 0;
4950	int batch_count = PRECHARGE_COUNT_AT_ONCE;
4951	struct mem_cgroup *mem = mc.to;
4952
4953	if (mem_cgroup_is_root(mem)) {
4954		mc.precharge += count;
4955		/* we don't need css_get for root */
4956		return ret;
4957	}
4958	/* try to charge at once */
4959	if (count > 1) {
4960		struct res_counter *dummy;
4961		/*
4962		 * "mem" cannot be under rmdir() because we've already checked
4963		 * by cgroup_lock_live_cgroup() that it is not removed and we
4964		 * are still under the same cgroup_mutex. So we can postpone
4965		 * css_get().
4966		 */
4967		if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4968			goto one_by_one;
4969		if (do_swap_account && res_counter_charge(&mem->memsw,
4970						PAGE_SIZE * count, &dummy)) {
4971			res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4972			goto one_by_one;
4973		}
4974		mc.precharge += count;
4975		return ret;
4976	}
4977one_by_one:
4978	/* fall back to one by one charge */
4979	while (count--) {
4980		if (signal_pending(current)) {
4981			ret = -EINTR;
4982			break;
4983		}
4984		if (!batch_count--) {
4985			batch_count = PRECHARGE_COUNT_AT_ONCE;
4986			cond_resched();
4987		}
4988		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
4989		if (ret || !mem)
4990			/* mem_cgroup_clear_mc() will do uncharge later */
4991			return -ENOMEM;
4992		mc.precharge++;
4993	}
4994	return ret;
4995}
4996
4997/**
4998 * is_target_pte_for_mc - check a pte whether it is valid for move charge
4999 * @vma: the vma the pte to be checked belongs
5000 * @addr: the address corresponding to the pte to be checked
5001 * @ptent: the pte to be checked
5002 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5003 *
5004 * Returns
5005 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5006 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5007 *     move charge. if @target is not NULL, the page is stored in target->page
5008 *     with extra refcnt got(Callers should handle it).
5009 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5010 *     target for charge migration. if @target is not NULL, the entry is stored
5011 *     in target->ent.
5012 *
5013 * Called with pte lock held.
5014 */
5015union mc_target {
5016	struct page	*page;
5017	swp_entry_t	ent;
5018};
5019
5020enum mc_target_type {
5021	MC_TARGET_NONE,	/* not used */
5022	MC_TARGET_PAGE,
5023	MC_TARGET_SWAP,
5024};
5025
5026static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5027						unsigned long addr, pte_t ptent)
5028{
5029	struct page *page = vm_normal_page(vma, addr, ptent);
5030
5031	if (!page || !page_mapped(page))
5032		return NULL;
5033	if (PageAnon(page)) {
5034		/* we don't move shared anon */
5035		if (!move_anon() || page_mapcount(page) > 2)
5036			return NULL;
5037	} else if (!move_file())
5038		/* we ignore mapcount for file pages */
5039		return NULL;
5040	if (!get_page_unless_zero(page))
5041		return NULL;
5042
5043	return page;
5044}
5045
5046static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5047			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5048{
5049	int usage_count;
5050	struct page *page = NULL;
5051	swp_entry_t ent = pte_to_swp_entry(ptent);
5052
5053	if (!move_anon() || non_swap_entry(ent))
5054		return NULL;
5055	usage_count = mem_cgroup_count_swap_user(ent, &page);
5056	if (usage_count > 1) { /* we don't move shared anon */
5057		if (page)
5058			put_page(page);
5059		return NULL;
5060	}
5061	if (do_swap_account)
5062		entry->val = ent.val;
5063
5064	return page;
5065}
5066
5067static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5068			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5069{
5070	struct page *page = NULL;
5071	struct inode *inode;
5072	struct address_space *mapping;
5073	pgoff_t pgoff;
5074
5075	if (!vma->vm_file) /* anonymous vma */
5076		return NULL;
5077	if (!move_file())
5078		return NULL;
5079
5080	inode = vma->vm_file->f_path.dentry->d_inode;
5081	mapping = vma->vm_file->f_mapping;
5082	if (pte_none(ptent))
5083		pgoff = linear_page_index(vma, addr);
5084	else /* pte_file(ptent) is true */
5085		pgoff = pte_to_pgoff(ptent);
5086
5087	/* page is moved even if it's not RSS of this task(page-faulted). */
5088	if (!mapping_cap_swap_backed(mapping)) { /* normal file */
5089		page = find_get_page(mapping, pgoff);
5090	} else { /* shmem/tmpfs file. we should take account of swap too. */
5091		swp_entry_t ent;
5092		mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
5093		if (do_swap_account)
5094			entry->val = ent.val;
5095	}
5096
5097	return page;
5098}
5099
5100static int is_target_pte_for_mc(struct vm_area_struct *vma,
5101		unsigned long addr, pte_t ptent, union mc_target *target)
5102{
5103	struct page *page = NULL;
5104	struct page_cgroup *pc;
5105	int ret = 0;
5106	swp_entry_t ent = { .val = 0 };
5107
5108	if (pte_present(ptent))
5109		page = mc_handle_present_pte(vma, addr, ptent);
5110	else if (is_swap_pte(ptent))
5111		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5112	else if (pte_none(ptent) || pte_file(ptent))
5113		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5114
5115	if (!page && !ent.val)
5116		return 0;
5117	if (page) {
5118		pc = lookup_page_cgroup(page);
5119		/*
5120		 * Do only loose check w/o page_cgroup lock.
5121		 * mem_cgroup_move_account() checks the pc is valid or not under
5122		 * the lock.
5123		 */
5124		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5125			ret = MC_TARGET_PAGE;
5126			if (target)
5127				target->page = page;
5128		}
5129		if (!ret || !target)
5130			put_page(page);
5131	}
5132	/* There is a swap entry and a page doesn't exist or isn't charged */
5133	if (ent.val && !ret &&
5134			css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
5135		ret = MC_TARGET_SWAP;
5136		if (target)
5137			target->ent = ent;
5138	}
5139	return ret;
5140}
5141
5142static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5143					unsigned long addr, unsigned long end,
5144					struct mm_walk *walk)
5145{
5146	struct vm_area_struct *vma = walk->private;
5147	pte_t *pte;
5148	spinlock_t *ptl;
5149
5150	split_huge_page_pmd(walk->mm, pmd);
5151
5152	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5153	for (; addr != end; pte++, addr += PAGE_SIZE)
5154		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
5155			mc.precharge++;	/* increment precharge temporarily */
5156	pte_unmap_unlock(pte - 1, ptl);
5157	cond_resched();
5158
5159	return 0;
5160}
5161
5162static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5163{
5164	unsigned long precharge;
5165	struct vm_area_struct *vma;
5166
5167	down_read(&mm->mmap_sem);
5168	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5169		struct mm_walk mem_cgroup_count_precharge_walk = {
5170			.pmd_entry = mem_cgroup_count_precharge_pte_range,
5171			.mm = mm,
5172			.private = vma,
5173		};
5174		if (is_vm_hugetlb_page(vma))
5175			continue;
5176		walk_page_range(vma->vm_start, vma->vm_end,
5177					&mem_cgroup_count_precharge_walk);
5178	}
5179	up_read(&mm->mmap_sem);
5180
5181	precharge = mc.precharge;
5182	mc.precharge = 0;
5183
5184	return precharge;
5185}
5186
5187static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5188{
5189	unsigned long precharge = mem_cgroup_count_precharge(mm);
5190
5191	VM_BUG_ON(mc.moving_task);
5192	mc.moving_task = current;
5193	return mem_cgroup_do_precharge(precharge);
5194}
5195
5196/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5197static void __mem_cgroup_clear_mc(void)
5198{
5199	struct mem_cgroup *from = mc.from;
5200	struct mem_cgroup *to = mc.to;
5201
5202	/* we must uncharge all the leftover precharges from mc.to */
5203	if (mc.precharge) {
5204		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
5205		mc.precharge = 0;
5206	}
5207	/*
5208	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5209	 * we must uncharge here.
5210	 */
5211	if (mc.moved_charge) {
5212		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5213		mc.moved_charge = 0;
5214	}
5215	/* we must fixup refcnts and charges */
5216	if (mc.moved_swap) {
5217		/* uncharge swap account from the old cgroup */
5218		if (!mem_cgroup_is_root(mc.from))
5219			res_counter_uncharge(&mc.from->memsw,
5220						PAGE_SIZE * mc.moved_swap);
5221		__mem_cgroup_put(mc.from, mc.moved_swap);
5222
5223		if (!mem_cgroup_is_root(mc.to)) {
5224			/*
5225			 * we charged both to->res and to->memsw, so we should
5226			 * uncharge to->res.
5227			 */
5228			res_counter_uncharge(&mc.to->res,
5229						PAGE_SIZE * mc.moved_swap);
5230		}
5231		/* we've already done mem_cgroup_get(mc.to) */
5232		mc.moved_swap = 0;
5233	}
5234	memcg_oom_recover(from);
5235	memcg_oom_recover(to);
5236	wake_up_all(&mc.waitq);
5237}
5238
5239static void mem_cgroup_clear_mc(void)
5240{
5241	struct mem_cgroup *from = mc.from;
5242
5243	/*
5244	 * we must clear moving_task before waking up waiters at the end of
5245	 * task migration.
5246	 */
5247	mc.moving_task = NULL;
5248	__mem_cgroup_clear_mc();
5249	spin_lock(&mc.lock);
5250	mc.from = NULL;
5251	mc.to = NULL;
5252	spin_unlock(&mc.lock);
5253	mem_cgroup_end_move(from);
5254}
5255
5256static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5257				struct cgroup *cgroup,
5258				struct task_struct *p)
5259{
5260	int ret = 0;
5261	struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
5262
5263	if (mem->move_charge_at_immigrate) {
5264		struct mm_struct *mm;
5265		struct mem_cgroup *from = mem_cgroup_from_task(p);
5266
5267		VM_BUG_ON(from == mem);
5268
5269		mm = get_task_mm(p);
5270		if (!mm)
5271			return 0;
5272		/* We move charges only when we move a owner of the mm */
5273		if (mm->owner == p) {
5274			VM_BUG_ON(mc.from);
5275			VM_BUG_ON(mc.to);
5276			VM_BUG_ON(mc.precharge);
5277			VM_BUG_ON(mc.moved_charge);
5278			VM_BUG_ON(mc.moved_swap);
5279			mem_cgroup_start_move(from);
5280			spin_lock(&mc.lock);
5281			mc.from = from;
5282			mc.to = mem;
5283			spin_unlock(&mc.lock);
5284			/* We set mc.moving_task later */
5285
5286			ret = mem_cgroup_precharge_mc(mm);
5287			if (ret)
5288				mem_cgroup_clear_mc();
5289		}
5290		mmput(mm);
5291	}
5292	return ret;
5293}
5294
5295static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5296				struct cgroup *cgroup,
5297				struct task_struct *p)
5298{
5299	mem_cgroup_clear_mc();
5300}
5301
5302static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5303				unsigned long addr, unsigned long end,
5304				struct mm_walk *walk)
5305{
5306	int ret = 0;
5307	struct vm_area_struct *vma = walk->private;
5308	pte_t *pte;
5309	spinlock_t *ptl;
5310
5311	split_huge_page_pmd(walk->mm, pmd);
5312retry:
5313	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5314	for (; addr != end; addr += PAGE_SIZE) {
5315		pte_t ptent = *(pte++);
5316		union mc_target target;
5317		int type;
5318		struct page *page;
5319		struct page_cgroup *pc;
5320		swp_entry_t ent;
5321
5322		if (!mc.precharge)
5323			break;
5324
5325		type = is_target_pte_for_mc(vma, addr, ptent, &target);
5326		switch (type) {
5327		case MC_TARGET_PAGE:
5328			page = target.page;
5329			if (isolate_lru_page(page))
5330				goto put;
5331			pc = lookup_page_cgroup(page);
5332			if (!mem_cgroup_move_account(page, 1, pc,
5333						     mc.from, mc.to, false)) {
5334				mc.precharge--;
5335				/* we uncharge from mc.from later. */
5336				mc.moved_charge++;
5337			}
5338			putback_lru_page(page);
5339put:			/* is_target_pte_for_mc() gets the page */
5340			put_page(page);
5341			break;
5342		case MC_TARGET_SWAP:
5343			ent = target.ent;
5344			if (!mem_cgroup_move_swap_account(ent,
5345						mc.from, mc.to, false)) {
5346				mc.precharge--;
5347				/* we fixup refcnts and charges later. */
5348				mc.moved_swap++;
5349			}
5350			break;
5351		default:
5352			break;
5353		}
5354	}
5355	pte_unmap_unlock(pte - 1, ptl);
5356	cond_resched();
5357
5358	if (addr != end) {
5359		/*
5360		 * We have consumed all precharges we got in can_attach().
5361		 * We try charge one by one, but don't do any additional
5362		 * charges to mc.to if we have failed in charge once in attach()
5363		 * phase.
5364		 */
5365		ret = mem_cgroup_do_precharge(1);
5366		if (!ret)
5367			goto retry;
5368	}
5369
5370	return ret;
5371}
5372
5373static void mem_cgroup_move_charge(struct mm_struct *mm)
5374{
5375	struct vm_area_struct *vma;
5376
5377	lru_add_drain_all();
5378retry:
5379	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5380		/*
5381		 * Someone who are holding the mmap_sem might be waiting in
5382		 * waitq. So we cancel all extra charges, wake up all waiters,
5383		 * and retry. Because we cancel precharges, we might not be able
5384		 * to move enough charges, but moving charge is a best-effort
5385		 * feature anyway, so it wouldn't be a big problem.
5386		 */
5387		__mem_cgroup_clear_mc();
5388		cond_resched();
5389		goto retry;
5390	}
5391	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5392		int ret;
5393		struct mm_walk mem_cgroup_move_charge_walk = {
5394			.pmd_entry = mem_cgroup_move_charge_pte_range,
5395			.mm = mm,
5396			.private = vma,
5397		};
5398		if (is_vm_hugetlb_page(vma))
5399			continue;
5400		ret = walk_page_range(vma->vm_start, vma->vm_end,
5401						&mem_cgroup_move_charge_walk);
5402		if (ret)
5403			/*
5404			 * means we have consumed all precharges and failed in
5405			 * doing additional charge. Just abandon here.
5406			 */
5407			break;
5408	}
5409	up_read(&mm->mmap_sem);
5410}
5411
5412static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5413				struct cgroup *cont,
5414				struct cgroup *old_cont,
5415				struct task_struct *p)
5416{
5417	struct mm_struct *mm = get_task_mm(p);
5418
5419	if (mm) {
5420		if (mc.to)
5421			mem_cgroup_move_charge(mm);
5422		put_swap_token(mm);
5423		mmput(mm);
5424	}
5425	if (mc.to)
5426		mem_cgroup_clear_mc();
5427}
5428#else	/* !CONFIG_MMU */
5429static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5430				struct cgroup *cgroup,
5431				struct task_struct *p)
5432{
5433	return 0;
5434}
5435static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5436				struct cgroup *cgroup,
5437				struct task_struct *p)
5438{
5439}
5440static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5441				struct cgroup *cont,
5442				struct cgroup *old_cont,
5443				struct task_struct *p)
5444{
5445}
5446#endif
5447
5448struct cgroup_subsys mem_cgroup_subsys = {
5449	.name = "memory",
5450	.subsys_id = mem_cgroup_subsys_id,
5451	.create = mem_cgroup_create,
5452	.pre_destroy = mem_cgroup_pre_destroy,
5453	.destroy = mem_cgroup_destroy,
5454	.populate = mem_cgroup_populate,
5455	.can_attach = mem_cgroup_can_attach,
5456	.cancel_attach = mem_cgroup_cancel_attach,
5457	.attach = mem_cgroup_move_task,
5458	.early_init = 0,
5459	.use_id = 1,
5460};
5461
5462#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5463static int __init enable_swap_account(char *s)
5464{
5465	/* consider enabled if no parameter or 1 is given */
5466	if (!strcmp(s, "1"))
5467		really_do_swap_account = 1;
5468	else if (!strcmp(s, "0"))
5469		really_do_swap_account = 0;
5470	return 1;
5471}
5472__setup("swapaccount=", enable_swap_account);
5473
5474#endif
5475