memcontrol.c revision 5cfb80a73b5a52fb19d8b0611203e4dd58e8e9a2
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
27#include <linux/mm.h>
28#include <linux/hugetlb.h>
29#include <linux/pagemap.h>
30#include <linux/smp.h>
31#include <linux/page-flags.h>
32#include <linux/backing-dev.h>
33#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
35#include <linux/limits.h>
36#include <linux/mutex.h>
37#include <linux/rbtree.h>
38#include <linux/slab.h>
39#include <linux/swap.h>
40#include <linux/swapops.h>
41#include <linux/spinlock.h>
42#include <linux/eventfd.h>
43#include <linux/sort.h>
44#include <linux/fs.h>
45#include <linux/seq_file.h>
46#include <linux/vmalloc.h>
47#include <linux/mm_inline.h>
48#include <linux/page_cgroup.h>
49#include <linux/cpu.h>
50#include "internal.h"
51
52#include <asm/uaccess.h>
53
54struct cgroup_subsys mem_cgroup_subsys __read_mostly;
55#define MEM_CGROUP_RECLAIM_RETRIES	5
56struct mem_cgroup *root_mem_cgroup __read_mostly;
57
58#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
59/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
60int do_swap_account __read_mostly;
61static int really_do_swap_account __initdata = 1; /* for remember boot option*/
62#else
63#define do_swap_account		(0)
64#endif
65
66/*
67 * Per memcg event counter is incremented at every pagein/pageout. This counter
68 * is used for trigger some periodic events. This is straightforward and better
69 * than using jiffies etc. to handle periodic memcg event.
70 *
71 * These values will be used as !((event) & ((1 <<(thresh)) - 1))
72 */
73#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
74#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
75
76/*
77 * Statistics for memory cgroup.
78 */
79enum mem_cgroup_stat_index {
80	/*
81	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
82	 */
83	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
84	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
85	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
86	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
87	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
88	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
89	MEM_CGROUP_EVENTS,	/* incremented at every  pagein/pageout */
90
91	MEM_CGROUP_STAT_NSTATS,
92};
93
94struct mem_cgroup_stat_cpu {
95	s64 count[MEM_CGROUP_STAT_NSTATS];
96};
97
98/*
99 * per-zone information in memory controller.
100 */
101struct mem_cgroup_per_zone {
102	/*
103	 * spin_lock to protect the per cgroup LRU
104	 */
105	struct list_head	lists[NR_LRU_LISTS];
106	unsigned long		count[NR_LRU_LISTS];
107
108	struct zone_reclaim_stat reclaim_stat;
109	struct rb_node		tree_node;	/* RB tree node */
110	unsigned long long	usage_in_excess;/* Set to the value by which */
111						/* the soft limit is exceeded*/
112	bool			on_tree;
113	struct mem_cgroup	*mem;		/* Back pointer, we cannot */
114						/* use container_of	   */
115};
116/* Macro for accessing counter */
117#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
118
119struct mem_cgroup_per_node {
120	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
121};
122
123struct mem_cgroup_lru_info {
124	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
125};
126
127/*
128 * Cgroups above their limits are maintained in a RB-Tree, independent of
129 * their hierarchy representation
130 */
131
132struct mem_cgroup_tree_per_zone {
133	struct rb_root rb_root;
134	spinlock_t lock;
135};
136
137struct mem_cgroup_tree_per_node {
138	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
139};
140
141struct mem_cgroup_tree {
142	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
143};
144
145static struct mem_cgroup_tree soft_limit_tree __read_mostly;
146
147struct mem_cgroup_threshold {
148	struct eventfd_ctx *eventfd;
149	u64 threshold;
150};
151
152struct mem_cgroup_threshold_ary {
153	/* An array index points to threshold just below usage. */
154	atomic_t current_threshold;
155	/* Size of entries[] */
156	unsigned int size;
157	/* Array of thresholds */
158	struct mem_cgroup_threshold entries[0];
159};
160
161static void mem_cgroup_threshold(struct mem_cgroup *mem);
162
163/*
164 * The memory controller data structure. The memory controller controls both
165 * page cache and RSS per cgroup. We would eventually like to provide
166 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
167 * to help the administrator determine what knobs to tune.
168 *
169 * TODO: Add a water mark for the memory controller. Reclaim will begin when
170 * we hit the water mark. May be even add a low water mark, such that
171 * no reclaim occurs from a cgroup at it's low water mark, this is
172 * a feature that will be implemented much later in the future.
173 */
174struct mem_cgroup {
175	struct cgroup_subsys_state css;
176	/*
177	 * the counter to account for memory usage
178	 */
179	struct res_counter res;
180	/*
181	 * the counter to account for mem+swap usage.
182	 */
183	struct res_counter memsw;
184	/*
185	 * Per cgroup active and inactive list, similar to the
186	 * per zone LRU lists.
187	 */
188	struct mem_cgroup_lru_info info;
189
190	/*
191	  protect against reclaim related member.
192	*/
193	spinlock_t reclaim_param_lock;
194
195	int	prev_priority;	/* for recording reclaim priority */
196
197	/*
198	 * While reclaiming in a hierarchy, we cache the last child we
199	 * reclaimed from.
200	 */
201	int last_scanned_child;
202	/*
203	 * Should the accounting and control be hierarchical, per subtree?
204	 */
205	bool use_hierarchy;
206	atomic_t	oom_lock;
207	atomic_t	refcnt;
208
209	unsigned int	swappiness;
210
211	/* set when res.limit == memsw.limit */
212	bool		memsw_is_minimum;
213
214	/* protect arrays of thresholds */
215	struct mutex thresholds_lock;
216
217	/* thresholds for memory usage. RCU-protected */
218	struct mem_cgroup_threshold_ary *thresholds;
219
220	/* thresholds for mem+swap usage. RCU-protected */
221	struct mem_cgroup_threshold_ary *memsw_thresholds;
222
223	/*
224	 * Should we move charges of a task when a task is moved into this
225	 * mem_cgroup ? And what type of charges should we move ?
226	 */
227	unsigned long 	move_charge_at_immigrate;
228
229	/*
230	 * percpu counter.
231	 */
232	struct mem_cgroup_stat_cpu *stat;
233};
234
235/* Stuffs for move charges at task migration. */
236/*
237 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
238 * left-shifted bitmap of these types.
239 */
240enum move_type {
241	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
242	NR_MOVE_TYPE,
243};
244
245/* "mc" and its members are protected by cgroup_mutex */
246static struct move_charge_struct {
247	struct mem_cgroup *from;
248	struct mem_cgroup *to;
249	unsigned long precharge;
250	unsigned long moved_charge;
251	unsigned long moved_swap;
252	struct task_struct *moving_task;	/* a task moving charges */
253	wait_queue_head_t waitq;		/* a waitq for other context */
254} mc = {
255	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
256};
257
258/*
259 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
260 * limit reclaim to prevent infinite loops, if they ever occur.
261 */
262#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
263#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)
264
265enum charge_type {
266	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
267	MEM_CGROUP_CHARGE_TYPE_MAPPED,
268	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
269	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
270	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
271	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
272	NR_CHARGE_TYPE,
273};
274
275/* only for here (for easy reading.) */
276#define PCGF_CACHE	(1UL << PCG_CACHE)
277#define PCGF_USED	(1UL << PCG_USED)
278#define PCGF_LOCK	(1UL << PCG_LOCK)
279/* Not used, but added here for completeness */
280#define PCGF_ACCT	(1UL << PCG_ACCT)
281
282/* for encoding cft->private value on file */
283#define _MEM			(0)
284#define _MEMSWAP		(1)
285#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
286#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
287#define MEMFILE_ATTR(val)	((val) & 0xffff)
288
289/*
290 * Reclaim flags for mem_cgroup_hierarchical_reclaim
291 */
292#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
293#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
294#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
295#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
296#define MEM_CGROUP_RECLAIM_SOFT_BIT	0x2
297#define MEM_CGROUP_RECLAIM_SOFT		(1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
298
299static void mem_cgroup_get(struct mem_cgroup *mem);
300static void mem_cgroup_put(struct mem_cgroup *mem);
301static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
302static void drain_all_stock_async(void);
303
304static struct mem_cgroup_per_zone *
305mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
306{
307	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
308}
309
310struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
311{
312	return &mem->css;
313}
314
315static struct mem_cgroup_per_zone *
316page_cgroup_zoneinfo(struct page_cgroup *pc)
317{
318	struct mem_cgroup *mem = pc->mem_cgroup;
319	int nid = page_cgroup_nid(pc);
320	int zid = page_cgroup_zid(pc);
321
322	if (!mem)
323		return NULL;
324
325	return mem_cgroup_zoneinfo(mem, nid, zid);
326}
327
328static struct mem_cgroup_tree_per_zone *
329soft_limit_tree_node_zone(int nid, int zid)
330{
331	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
332}
333
334static struct mem_cgroup_tree_per_zone *
335soft_limit_tree_from_page(struct page *page)
336{
337	int nid = page_to_nid(page);
338	int zid = page_zonenum(page);
339
340	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
341}
342
343static void
344__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
345				struct mem_cgroup_per_zone *mz,
346				struct mem_cgroup_tree_per_zone *mctz,
347				unsigned long long new_usage_in_excess)
348{
349	struct rb_node **p = &mctz->rb_root.rb_node;
350	struct rb_node *parent = NULL;
351	struct mem_cgroup_per_zone *mz_node;
352
353	if (mz->on_tree)
354		return;
355
356	mz->usage_in_excess = new_usage_in_excess;
357	if (!mz->usage_in_excess)
358		return;
359	while (*p) {
360		parent = *p;
361		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
362					tree_node);
363		if (mz->usage_in_excess < mz_node->usage_in_excess)
364			p = &(*p)->rb_left;
365		/*
366		 * We can't avoid mem cgroups that are over their soft
367		 * limit by the same amount
368		 */
369		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
370			p = &(*p)->rb_right;
371	}
372	rb_link_node(&mz->tree_node, parent, p);
373	rb_insert_color(&mz->tree_node, &mctz->rb_root);
374	mz->on_tree = true;
375}
376
377static void
378__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
379				struct mem_cgroup_per_zone *mz,
380				struct mem_cgroup_tree_per_zone *mctz)
381{
382	if (!mz->on_tree)
383		return;
384	rb_erase(&mz->tree_node, &mctz->rb_root);
385	mz->on_tree = false;
386}
387
388static void
389mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
390				struct mem_cgroup_per_zone *mz,
391				struct mem_cgroup_tree_per_zone *mctz)
392{
393	spin_lock(&mctz->lock);
394	__mem_cgroup_remove_exceeded(mem, mz, mctz);
395	spin_unlock(&mctz->lock);
396}
397
398
399static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
400{
401	unsigned long long excess;
402	struct mem_cgroup_per_zone *mz;
403	struct mem_cgroup_tree_per_zone *mctz;
404	int nid = page_to_nid(page);
405	int zid = page_zonenum(page);
406	mctz = soft_limit_tree_from_page(page);
407
408	/*
409	 * Necessary to update all ancestors when hierarchy is used.
410	 * because their event counter is not touched.
411	 */
412	for (; mem; mem = parent_mem_cgroup(mem)) {
413		mz = mem_cgroup_zoneinfo(mem, nid, zid);
414		excess = res_counter_soft_limit_excess(&mem->res);
415		/*
416		 * We have to update the tree if mz is on RB-tree or
417		 * mem is over its softlimit.
418		 */
419		if (excess || mz->on_tree) {
420			spin_lock(&mctz->lock);
421			/* if on-tree, remove it */
422			if (mz->on_tree)
423				__mem_cgroup_remove_exceeded(mem, mz, mctz);
424			/*
425			 * Insert again. mz->usage_in_excess will be updated.
426			 * If excess is 0, no tree ops.
427			 */
428			__mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
429			spin_unlock(&mctz->lock);
430		}
431	}
432}
433
434static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
435{
436	int node, zone;
437	struct mem_cgroup_per_zone *mz;
438	struct mem_cgroup_tree_per_zone *mctz;
439
440	for_each_node_state(node, N_POSSIBLE) {
441		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
442			mz = mem_cgroup_zoneinfo(mem, node, zone);
443			mctz = soft_limit_tree_node_zone(node, zone);
444			mem_cgroup_remove_exceeded(mem, mz, mctz);
445		}
446	}
447}
448
449static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
450{
451	return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
452}
453
454static struct mem_cgroup_per_zone *
455__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
456{
457	struct rb_node *rightmost = NULL;
458	struct mem_cgroup_per_zone *mz;
459
460retry:
461	mz = NULL;
462	rightmost = rb_last(&mctz->rb_root);
463	if (!rightmost)
464		goto done;		/* Nothing to reclaim from */
465
466	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
467	/*
468	 * Remove the node now but someone else can add it back,
469	 * we will to add it back at the end of reclaim to its correct
470	 * position in the tree.
471	 */
472	__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
473	if (!res_counter_soft_limit_excess(&mz->mem->res) ||
474		!css_tryget(&mz->mem->css))
475		goto retry;
476done:
477	return mz;
478}
479
480static struct mem_cgroup_per_zone *
481mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
482{
483	struct mem_cgroup_per_zone *mz;
484
485	spin_lock(&mctz->lock);
486	mz = __mem_cgroup_largest_soft_limit_node(mctz);
487	spin_unlock(&mctz->lock);
488	return mz;
489}
490
491static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
492		enum mem_cgroup_stat_index idx)
493{
494	int cpu;
495	s64 val = 0;
496
497	for_each_possible_cpu(cpu)
498		val += per_cpu(mem->stat->count[idx], cpu);
499	return val;
500}
501
502static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
503{
504	s64 ret;
505
506	ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
507	ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
508	return ret;
509}
510
511static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
512					 bool charge)
513{
514	int val = (charge) ? 1 : -1;
515	this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
516}
517
518static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
519					 struct page_cgroup *pc,
520					 bool charge)
521{
522	int val = (charge) ? 1 : -1;
523
524	preempt_disable();
525
526	if (PageCgroupCache(pc))
527		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
528	else
529		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
530
531	if (charge)
532		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
533	else
534		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
535	__this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
536
537	preempt_enable();
538}
539
540static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
541					enum lru_list idx)
542{
543	int nid, zid;
544	struct mem_cgroup_per_zone *mz;
545	u64 total = 0;
546
547	for_each_online_node(nid)
548		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
549			mz = mem_cgroup_zoneinfo(mem, nid, zid);
550			total += MEM_CGROUP_ZSTAT(mz, idx);
551		}
552	return total;
553}
554
555static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
556{
557	s64 val;
558
559	val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
560
561	return !(val & ((1 << event_mask_shift) - 1));
562}
563
564/*
565 * Check events in order.
566 *
567 */
568static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
569{
570	/* threshold event is triggered in finer grain than soft limit */
571	if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
572		mem_cgroup_threshold(mem);
573		if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
574			mem_cgroup_update_tree(mem, page);
575	}
576}
577
578static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
579{
580	return container_of(cgroup_subsys_state(cont,
581				mem_cgroup_subsys_id), struct mem_cgroup,
582				css);
583}
584
585struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
586{
587	/*
588	 * mm_update_next_owner() may clear mm->owner to NULL
589	 * if it races with swapoff, page migration, etc.
590	 * So this can be called with p == NULL.
591	 */
592	if (unlikely(!p))
593		return NULL;
594
595	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
596				struct mem_cgroup, css);
597}
598
599static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
600{
601	struct mem_cgroup *mem = NULL;
602
603	if (!mm)
604		return NULL;
605	/*
606	 * Because we have no locks, mm->owner's may be being moved to other
607	 * cgroup. We use css_tryget() here even if this looks
608	 * pessimistic (rather than adding locks here).
609	 */
610	rcu_read_lock();
611	do {
612		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
613		if (unlikely(!mem))
614			break;
615	} while (!css_tryget(&mem->css));
616	rcu_read_unlock();
617	return mem;
618}
619
620/*
621 * Call callback function against all cgroup under hierarchy tree.
622 */
623static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
624			  int (*func)(struct mem_cgroup *, void *))
625{
626	int found, ret, nextid;
627	struct cgroup_subsys_state *css;
628	struct mem_cgroup *mem;
629
630	if (!root->use_hierarchy)
631		return (*func)(root, data);
632
633	nextid = 1;
634	do {
635		ret = 0;
636		mem = NULL;
637
638		rcu_read_lock();
639		css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
640				   &found);
641		if (css && css_tryget(css))
642			mem = container_of(css, struct mem_cgroup, css);
643		rcu_read_unlock();
644
645		if (mem) {
646			ret = (*func)(mem, data);
647			css_put(&mem->css);
648		}
649		nextid = found + 1;
650	} while (!ret && css);
651
652	return ret;
653}
654
655static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
656{
657	return (mem == root_mem_cgroup);
658}
659
660/*
661 * Following LRU functions are allowed to be used without PCG_LOCK.
662 * Operations are called by routine of global LRU independently from memcg.
663 * What we have to take care of here is validness of pc->mem_cgroup.
664 *
665 * Changes to pc->mem_cgroup happens when
666 * 1. charge
667 * 2. moving account
668 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
669 * It is added to LRU before charge.
670 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
671 * When moving account, the page is not on LRU. It's isolated.
672 */
673
674void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
675{
676	struct page_cgroup *pc;
677	struct mem_cgroup_per_zone *mz;
678
679	if (mem_cgroup_disabled())
680		return;
681	pc = lookup_page_cgroup(page);
682	/* can happen while we handle swapcache. */
683	if (!TestClearPageCgroupAcctLRU(pc))
684		return;
685	VM_BUG_ON(!pc->mem_cgroup);
686	/*
687	 * We don't check PCG_USED bit. It's cleared when the "page" is finally
688	 * removed from global LRU.
689	 */
690	mz = page_cgroup_zoneinfo(pc);
691	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
692	if (mem_cgroup_is_root(pc->mem_cgroup))
693		return;
694	VM_BUG_ON(list_empty(&pc->lru));
695	list_del_init(&pc->lru);
696	return;
697}
698
699void mem_cgroup_del_lru(struct page *page)
700{
701	mem_cgroup_del_lru_list(page, page_lru(page));
702}
703
704void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
705{
706	struct mem_cgroup_per_zone *mz;
707	struct page_cgroup *pc;
708
709	if (mem_cgroup_disabled())
710		return;
711
712	pc = lookup_page_cgroup(page);
713	/*
714	 * Used bit is set without atomic ops but after smp_wmb().
715	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
716	 */
717	smp_rmb();
718	/* unused or root page is not rotated. */
719	if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
720		return;
721	mz = page_cgroup_zoneinfo(pc);
722	list_move(&pc->lru, &mz->lists[lru]);
723}
724
725void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
726{
727	struct page_cgroup *pc;
728	struct mem_cgroup_per_zone *mz;
729
730	if (mem_cgroup_disabled())
731		return;
732	pc = lookup_page_cgroup(page);
733	VM_BUG_ON(PageCgroupAcctLRU(pc));
734	/*
735	 * Used bit is set without atomic ops but after smp_wmb().
736	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
737	 */
738	smp_rmb();
739	if (!PageCgroupUsed(pc))
740		return;
741
742	mz = page_cgroup_zoneinfo(pc);
743	MEM_CGROUP_ZSTAT(mz, lru) += 1;
744	SetPageCgroupAcctLRU(pc);
745	if (mem_cgroup_is_root(pc->mem_cgroup))
746		return;
747	list_add(&pc->lru, &mz->lists[lru]);
748}
749
750/*
751 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
752 * lru because the page may.be reused after it's fully uncharged (because of
753 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
754 * it again. This function is only used to charge SwapCache. It's done under
755 * lock_page and expected that zone->lru_lock is never held.
756 */
757static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
758{
759	unsigned long flags;
760	struct zone *zone = page_zone(page);
761	struct page_cgroup *pc = lookup_page_cgroup(page);
762
763	spin_lock_irqsave(&zone->lru_lock, flags);
764	/*
765	 * Forget old LRU when this page_cgroup is *not* used. This Used bit
766	 * is guarded by lock_page() because the page is SwapCache.
767	 */
768	if (!PageCgroupUsed(pc))
769		mem_cgroup_del_lru_list(page, page_lru(page));
770	spin_unlock_irqrestore(&zone->lru_lock, flags);
771}
772
773static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
774{
775	unsigned long flags;
776	struct zone *zone = page_zone(page);
777	struct page_cgroup *pc = lookup_page_cgroup(page);
778
779	spin_lock_irqsave(&zone->lru_lock, flags);
780	/* link when the page is linked to LRU but page_cgroup isn't */
781	if (PageLRU(page) && !PageCgroupAcctLRU(pc))
782		mem_cgroup_add_lru_list(page, page_lru(page));
783	spin_unlock_irqrestore(&zone->lru_lock, flags);
784}
785
786
787void mem_cgroup_move_lists(struct page *page,
788			   enum lru_list from, enum lru_list to)
789{
790	if (mem_cgroup_disabled())
791		return;
792	mem_cgroup_del_lru_list(page, from);
793	mem_cgroup_add_lru_list(page, to);
794}
795
796int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
797{
798	int ret;
799	struct mem_cgroup *curr = NULL;
800
801	task_lock(task);
802	rcu_read_lock();
803	curr = try_get_mem_cgroup_from_mm(task->mm);
804	rcu_read_unlock();
805	task_unlock(task);
806	if (!curr)
807		return 0;
808	/*
809	 * We should check use_hierarchy of "mem" not "curr". Because checking
810	 * use_hierarchy of "curr" here make this function true if hierarchy is
811	 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
812	 * hierarchy(even if use_hierarchy is disabled in "mem").
813	 */
814	if (mem->use_hierarchy)
815		ret = css_is_ancestor(&curr->css, &mem->css);
816	else
817		ret = (curr == mem);
818	css_put(&curr->css);
819	return ret;
820}
821
822/*
823 * prev_priority control...this will be used in memory reclaim path.
824 */
825int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
826{
827	int prev_priority;
828
829	spin_lock(&mem->reclaim_param_lock);
830	prev_priority = mem->prev_priority;
831	spin_unlock(&mem->reclaim_param_lock);
832
833	return prev_priority;
834}
835
836void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
837{
838	spin_lock(&mem->reclaim_param_lock);
839	if (priority < mem->prev_priority)
840		mem->prev_priority = priority;
841	spin_unlock(&mem->reclaim_param_lock);
842}
843
844void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
845{
846	spin_lock(&mem->reclaim_param_lock);
847	mem->prev_priority = priority;
848	spin_unlock(&mem->reclaim_param_lock);
849}
850
851static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
852{
853	unsigned long active;
854	unsigned long inactive;
855	unsigned long gb;
856	unsigned long inactive_ratio;
857
858	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
859	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
860
861	gb = (inactive + active) >> (30 - PAGE_SHIFT);
862	if (gb)
863		inactive_ratio = int_sqrt(10 * gb);
864	else
865		inactive_ratio = 1;
866
867	if (present_pages) {
868		present_pages[0] = inactive;
869		present_pages[1] = active;
870	}
871
872	return inactive_ratio;
873}
874
875int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
876{
877	unsigned long active;
878	unsigned long inactive;
879	unsigned long present_pages[2];
880	unsigned long inactive_ratio;
881
882	inactive_ratio = calc_inactive_ratio(memcg, present_pages);
883
884	inactive = present_pages[0];
885	active = present_pages[1];
886
887	if (inactive * inactive_ratio < active)
888		return 1;
889
890	return 0;
891}
892
893int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
894{
895	unsigned long active;
896	unsigned long inactive;
897
898	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
899	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
900
901	return (active > inactive);
902}
903
904unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
905				       struct zone *zone,
906				       enum lru_list lru)
907{
908	int nid = zone->zone_pgdat->node_id;
909	int zid = zone_idx(zone);
910	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
911
912	return MEM_CGROUP_ZSTAT(mz, lru);
913}
914
915struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
916						      struct zone *zone)
917{
918	int nid = zone->zone_pgdat->node_id;
919	int zid = zone_idx(zone);
920	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
921
922	return &mz->reclaim_stat;
923}
924
925struct zone_reclaim_stat *
926mem_cgroup_get_reclaim_stat_from_page(struct page *page)
927{
928	struct page_cgroup *pc;
929	struct mem_cgroup_per_zone *mz;
930
931	if (mem_cgroup_disabled())
932		return NULL;
933
934	pc = lookup_page_cgroup(page);
935	/*
936	 * Used bit is set without atomic ops but after smp_wmb().
937	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
938	 */
939	smp_rmb();
940	if (!PageCgroupUsed(pc))
941		return NULL;
942
943	mz = page_cgroup_zoneinfo(pc);
944	if (!mz)
945		return NULL;
946
947	return &mz->reclaim_stat;
948}
949
950unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
951					struct list_head *dst,
952					unsigned long *scanned, int order,
953					int mode, struct zone *z,
954					struct mem_cgroup *mem_cont,
955					int active, int file)
956{
957	unsigned long nr_taken = 0;
958	struct page *page;
959	unsigned long scan;
960	LIST_HEAD(pc_list);
961	struct list_head *src;
962	struct page_cgroup *pc, *tmp;
963	int nid = z->zone_pgdat->node_id;
964	int zid = zone_idx(z);
965	struct mem_cgroup_per_zone *mz;
966	int lru = LRU_FILE * file + active;
967	int ret;
968
969	BUG_ON(!mem_cont);
970	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
971	src = &mz->lists[lru];
972
973	scan = 0;
974	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
975		if (scan >= nr_to_scan)
976			break;
977
978		page = pc->page;
979		if (unlikely(!PageCgroupUsed(pc)))
980			continue;
981		if (unlikely(!PageLRU(page)))
982			continue;
983
984		scan++;
985		ret = __isolate_lru_page(page, mode, file);
986		switch (ret) {
987		case 0:
988			list_move(&page->lru, dst);
989			mem_cgroup_del_lru(page);
990			nr_taken++;
991			break;
992		case -EBUSY:
993			/* we don't affect global LRU but rotate in our LRU */
994			mem_cgroup_rotate_lru_list(page, page_lru(page));
995			break;
996		default:
997			break;
998		}
999	}
1000
1001	*scanned = scan;
1002	return nr_taken;
1003}
1004
1005#define mem_cgroup_from_res_counter(counter, member)	\
1006	container_of(counter, struct mem_cgroup, member)
1007
1008static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
1009{
1010	if (do_swap_account) {
1011		if (res_counter_check_under_limit(&mem->res) &&
1012			res_counter_check_under_limit(&mem->memsw))
1013			return true;
1014	} else
1015		if (res_counter_check_under_limit(&mem->res))
1016			return true;
1017	return false;
1018}
1019
1020static unsigned int get_swappiness(struct mem_cgroup *memcg)
1021{
1022	struct cgroup *cgrp = memcg->css.cgroup;
1023	unsigned int swappiness;
1024
1025	/* root ? */
1026	if (cgrp->parent == NULL)
1027		return vm_swappiness;
1028
1029	spin_lock(&memcg->reclaim_param_lock);
1030	swappiness = memcg->swappiness;
1031	spin_unlock(&memcg->reclaim_param_lock);
1032
1033	return swappiness;
1034}
1035
1036static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
1037{
1038	int *val = data;
1039	(*val)++;
1040	return 0;
1041}
1042
1043/**
1044 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1045 * @memcg: The memory cgroup that went over limit
1046 * @p: Task that is going to be killed
1047 *
1048 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1049 * enabled
1050 */
1051void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1052{
1053	struct cgroup *task_cgrp;
1054	struct cgroup *mem_cgrp;
1055	/*
1056	 * Need a buffer in BSS, can't rely on allocations. The code relies
1057	 * on the assumption that OOM is serialized for memory controller.
1058	 * If this assumption is broken, revisit this code.
1059	 */
1060	static char memcg_name[PATH_MAX];
1061	int ret;
1062
1063	if (!memcg || !p)
1064		return;
1065
1066
1067	rcu_read_lock();
1068
1069	mem_cgrp = memcg->css.cgroup;
1070	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1071
1072	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1073	if (ret < 0) {
1074		/*
1075		 * Unfortunately, we are unable to convert to a useful name
1076		 * But we'll still print out the usage information
1077		 */
1078		rcu_read_unlock();
1079		goto done;
1080	}
1081	rcu_read_unlock();
1082
1083	printk(KERN_INFO "Task in %s killed", memcg_name);
1084
1085	rcu_read_lock();
1086	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1087	if (ret < 0) {
1088		rcu_read_unlock();
1089		goto done;
1090	}
1091	rcu_read_unlock();
1092
1093	/*
1094	 * Continues from above, so we don't need an KERN_ level
1095	 */
1096	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1097done:
1098
1099	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1100		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1101		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1102		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1103	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1104		"failcnt %llu\n",
1105		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1106		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1107		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1108}
1109
1110/*
1111 * This function returns the number of memcg under hierarchy tree. Returns
1112 * 1(self count) if no children.
1113 */
1114static int mem_cgroup_count_children(struct mem_cgroup *mem)
1115{
1116	int num = 0;
1117 	mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
1118	return num;
1119}
1120
1121/*
1122 * Visit the first child (need not be the first child as per the ordering
1123 * of the cgroup list, since we track last_scanned_child) of @mem and use
1124 * that to reclaim free pages from.
1125 */
1126static struct mem_cgroup *
1127mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1128{
1129	struct mem_cgroup *ret = NULL;
1130	struct cgroup_subsys_state *css;
1131	int nextid, found;
1132
1133	if (!root_mem->use_hierarchy) {
1134		css_get(&root_mem->css);
1135		ret = root_mem;
1136	}
1137
1138	while (!ret) {
1139		rcu_read_lock();
1140		nextid = root_mem->last_scanned_child + 1;
1141		css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1142				   &found);
1143		if (css && css_tryget(css))
1144			ret = container_of(css, struct mem_cgroup, css);
1145
1146		rcu_read_unlock();
1147		/* Updates scanning parameter */
1148		spin_lock(&root_mem->reclaim_param_lock);
1149		if (!css) {
1150			/* this means start scan from ID:1 */
1151			root_mem->last_scanned_child = 0;
1152		} else
1153			root_mem->last_scanned_child = found;
1154		spin_unlock(&root_mem->reclaim_param_lock);
1155	}
1156
1157	return ret;
1158}
1159
1160/*
1161 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1162 * we reclaimed from, so that we don't end up penalizing one child extensively
1163 * based on its position in the children list.
1164 *
1165 * root_mem is the original ancestor that we've been reclaim from.
1166 *
1167 * We give up and return to the caller when we visit root_mem twice.
1168 * (other groups can be removed while we're walking....)
1169 *
1170 * If shrink==true, for avoiding to free too much, this returns immedieately.
1171 */
1172static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1173						struct zone *zone,
1174						gfp_t gfp_mask,
1175						unsigned long reclaim_options)
1176{
1177	struct mem_cgroup *victim;
1178	int ret, total = 0;
1179	int loop = 0;
1180	bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1181	bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1182	bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1183	unsigned long excess = mem_cgroup_get_excess(root_mem);
1184
1185	/* If memsw_is_minimum==1, swap-out is of-no-use. */
1186	if (root_mem->memsw_is_minimum)
1187		noswap = true;
1188
1189	while (1) {
1190		victim = mem_cgroup_select_victim(root_mem);
1191		if (victim == root_mem) {
1192			loop++;
1193			if (loop >= 1)
1194				drain_all_stock_async();
1195			if (loop >= 2) {
1196				/*
1197				 * If we have not been able to reclaim
1198				 * anything, it might because there are
1199				 * no reclaimable pages under this hierarchy
1200				 */
1201				if (!check_soft || !total) {
1202					css_put(&victim->css);
1203					break;
1204				}
1205				/*
1206				 * We want to do more targetted reclaim.
1207				 * excess >> 2 is not to excessive so as to
1208				 * reclaim too much, nor too less that we keep
1209				 * coming back to reclaim from this cgroup
1210				 */
1211				if (total >= (excess >> 2) ||
1212					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1213					css_put(&victim->css);
1214					break;
1215				}
1216			}
1217		}
1218		if (!mem_cgroup_local_usage(victim)) {
1219			/* this cgroup's local usage == 0 */
1220			css_put(&victim->css);
1221			continue;
1222		}
1223		/* we use swappiness of local cgroup */
1224		if (check_soft)
1225			ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1226				noswap, get_swappiness(victim), zone,
1227				zone->zone_pgdat->node_id);
1228		else
1229			ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1230						noswap, get_swappiness(victim));
1231		css_put(&victim->css);
1232		/*
1233		 * At shrinking usage, we can't check we should stop here or
1234		 * reclaim more. It's depends on callers. last_scanned_child
1235		 * will work enough for keeping fairness under tree.
1236		 */
1237		if (shrink)
1238			return ret;
1239		total += ret;
1240		if (check_soft) {
1241			if (res_counter_check_under_soft_limit(&root_mem->res))
1242				return total;
1243		} else if (mem_cgroup_check_under_limit(root_mem))
1244			return 1 + total;
1245	}
1246	return total;
1247}
1248
1249static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
1250{
1251	int *val = (int *)data;
1252	int x;
1253	/*
1254	 * Logically, we can stop scanning immediately when we find
1255	 * a memcg is already locked. But condidering unlock ops and
1256	 * creation/removal of memcg, scan-all is simple operation.
1257	 */
1258	x = atomic_inc_return(&mem->oom_lock);
1259	*val = max(x, *val);
1260	return 0;
1261}
1262/*
1263 * Check OOM-Killer is already running under our hierarchy.
1264 * If someone is running, return false.
1265 */
1266static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1267{
1268	int lock_count = 0;
1269
1270	mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
1271
1272	if (lock_count == 1)
1273		return true;
1274	return false;
1275}
1276
1277static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
1278{
1279	/*
1280	 * When a new child is created while the hierarchy is under oom,
1281	 * mem_cgroup_oom_lock() may not be called. We have to use
1282	 * atomic_add_unless() here.
1283	 */
1284	atomic_add_unless(&mem->oom_lock, -1, 0);
1285	return 0;
1286}
1287
1288static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1289{
1290	mem_cgroup_walk_tree(mem, NULL,	mem_cgroup_oom_unlock_cb);
1291}
1292
1293static DEFINE_MUTEX(memcg_oom_mutex);
1294static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1295
1296/*
1297 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1298 */
1299bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1300{
1301	DEFINE_WAIT(wait);
1302	bool locked;
1303
1304	/* At first, try to OOM lock hierarchy under mem.*/
1305	mutex_lock(&memcg_oom_mutex);
1306	locked = mem_cgroup_oom_lock(mem);
1307	/*
1308	 * Even if signal_pending(), we can't quit charge() loop without
1309	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1310	 * under OOM is always welcomed, use TASK_KILLABLE here.
1311	 */
1312	if (!locked)
1313		prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE);
1314	mutex_unlock(&memcg_oom_mutex);
1315
1316	if (locked)
1317		mem_cgroup_out_of_memory(mem, mask);
1318	else {
1319		schedule();
1320		finish_wait(&memcg_oom_waitq, &wait);
1321	}
1322	mutex_lock(&memcg_oom_mutex);
1323	mem_cgroup_oom_unlock(mem);
1324	/*
1325	 * Here, we use global waitq .....more fine grained waitq ?
1326	 * Assume following hierarchy.
1327	 * A/
1328	 *   01
1329	 *   02
1330	 * assume OOM happens both in A and 01 at the same time. Tthey are
1331	 * mutually exclusive by lock. (kill in 01 helps A.)
1332	 * When we use per memcg waitq, we have to wake up waiters on A and 02
1333	 * in addtion to waiters on 01. We use global waitq for avoiding mess.
1334	 * It will not be a big problem.
1335	 * (And a task may be moved to other groups while it's waiting for OOM.)
1336	 */
1337	wake_up_all(&memcg_oom_waitq);
1338	mutex_unlock(&memcg_oom_mutex);
1339
1340	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1341		return false;
1342	/* Give chance to dying process */
1343	schedule_timeout(1);
1344	return true;
1345}
1346
1347/*
1348 * Currently used to update mapped file statistics, but the routine can be
1349 * generalized to update other statistics as well.
1350 */
1351void mem_cgroup_update_file_mapped(struct page *page, int val)
1352{
1353	struct mem_cgroup *mem;
1354	struct page_cgroup *pc;
1355
1356	pc = lookup_page_cgroup(page);
1357	if (unlikely(!pc))
1358		return;
1359
1360	lock_page_cgroup(pc);
1361	mem = pc->mem_cgroup;
1362	if (!mem)
1363		goto done;
1364
1365	if (!PageCgroupUsed(pc))
1366		goto done;
1367
1368	/*
1369	 * Preemption is already disabled. We can use __this_cpu_xxx
1370	 */
1371	__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], val);
1372
1373done:
1374	unlock_page_cgroup(pc);
1375}
1376
1377/*
1378 * size of first charge trial. "32" comes from vmscan.c's magic value.
1379 * TODO: maybe necessary to use big numbers in big irons.
1380 */
1381#define CHARGE_SIZE	(32 * PAGE_SIZE)
1382struct memcg_stock_pcp {
1383	struct mem_cgroup *cached; /* this never be root cgroup */
1384	int charge;
1385	struct work_struct work;
1386};
1387static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1388static atomic_t memcg_drain_count;
1389
1390/*
1391 * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1392 * from local stock and true is returned. If the stock is 0 or charges from a
1393 * cgroup which is not current target, returns false. This stock will be
1394 * refilled.
1395 */
1396static bool consume_stock(struct mem_cgroup *mem)
1397{
1398	struct memcg_stock_pcp *stock;
1399	bool ret = true;
1400
1401	stock = &get_cpu_var(memcg_stock);
1402	if (mem == stock->cached && stock->charge)
1403		stock->charge -= PAGE_SIZE;
1404	else /* need to call res_counter_charge */
1405		ret = false;
1406	put_cpu_var(memcg_stock);
1407	return ret;
1408}
1409
1410/*
1411 * Returns stocks cached in percpu to res_counter and reset cached information.
1412 */
1413static void drain_stock(struct memcg_stock_pcp *stock)
1414{
1415	struct mem_cgroup *old = stock->cached;
1416
1417	if (stock->charge) {
1418		res_counter_uncharge(&old->res, stock->charge);
1419		if (do_swap_account)
1420			res_counter_uncharge(&old->memsw, stock->charge);
1421	}
1422	stock->cached = NULL;
1423	stock->charge = 0;
1424}
1425
1426/*
1427 * This must be called under preempt disabled or must be called by
1428 * a thread which is pinned to local cpu.
1429 */
1430static void drain_local_stock(struct work_struct *dummy)
1431{
1432	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1433	drain_stock(stock);
1434}
1435
1436/*
1437 * Cache charges(val) which is from res_counter, to local per_cpu area.
1438 * This will be consumed by consumt_stock() function, later.
1439 */
1440static void refill_stock(struct mem_cgroup *mem, int val)
1441{
1442	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1443
1444	if (stock->cached != mem) { /* reset if necessary */
1445		drain_stock(stock);
1446		stock->cached = mem;
1447	}
1448	stock->charge += val;
1449	put_cpu_var(memcg_stock);
1450}
1451
1452/*
1453 * Tries to drain stocked charges in other cpus. This function is asynchronous
1454 * and just put a work per cpu for draining localy on each cpu. Caller can
1455 * expects some charges will be back to res_counter later but cannot wait for
1456 * it.
1457 */
1458static void drain_all_stock_async(void)
1459{
1460	int cpu;
1461	/* This function is for scheduling "drain" in asynchronous way.
1462	 * The result of "drain" is not directly handled by callers. Then,
1463	 * if someone is calling drain, we don't have to call drain more.
1464	 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1465	 * there is a race. We just do loose check here.
1466	 */
1467	if (atomic_read(&memcg_drain_count))
1468		return;
1469	/* Notify other cpus that system-wide "drain" is running */
1470	atomic_inc(&memcg_drain_count);
1471	get_online_cpus();
1472	for_each_online_cpu(cpu) {
1473		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1474		schedule_work_on(cpu, &stock->work);
1475	}
1476 	put_online_cpus();
1477	atomic_dec(&memcg_drain_count);
1478	/* We don't wait for flush_work */
1479}
1480
1481/* This is a synchronous drain interface. */
1482static void drain_all_stock_sync(void)
1483{
1484	/* called when force_empty is called */
1485	atomic_inc(&memcg_drain_count);
1486	schedule_on_each_cpu(drain_local_stock);
1487	atomic_dec(&memcg_drain_count);
1488}
1489
1490static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1491					unsigned long action,
1492					void *hcpu)
1493{
1494	int cpu = (unsigned long)hcpu;
1495	struct memcg_stock_pcp *stock;
1496
1497	if (action != CPU_DEAD)
1498		return NOTIFY_OK;
1499	stock = &per_cpu(memcg_stock, cpu);
1500	drain_stock(stock);
1501	return NOTIFY_OK;
1502}
1503
1504/*
1505 * Unlike exported interface, "oom" parameter is added. if oom==true,
1506 * oom-killer can be invoked.
1507 */
1508static int __mem_cgroup_try_charge(struct mm_struct *mm,
1509			gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1510{
1511	struct mem_cgroup *mem, *mem_over_limit;
1512	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1513	struct res_counter *fail_res;
1514	int csize = CHARGE_SIZE;
1515
1516	/*
1517	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1518	 * in system level. So, allow to go ahead dying process in addition to
1519	 * MEMDIE process.
1520	 */
1521	if (unlikely(test_thread_flag(TIF_MEMDIE)
1522		     || fatal_signal_pending(current)))
1523		goto bypass;
1524
1525	/*
1526	 * We always charge the cgroup the mm_struct belongs to.
1527	 * The mm_struct's mem_cgroup changes on task migration if the
1528	 * thread group leader migrates. It's possible that mm is not
1529	 * set, if so charge the init_mm (happens for pagecache usage).
1530	 */
1531	mem = *memcg;
1532	if (likely(!mem)) {
1533		mem = try_get_mem_cgroup_from_mm(mm);
1534		*memcg = mem;
1535	} else {
1536		css_get(&mem->css);
1537	}
1538	if (unlikely(!mem))
1539		return 0;
1540
1541	VM_BUG_ON(css_is_removed(&mem->css));
1542	if (mem_cgroup_is_root(mem))
1543		goto done;
1544
1545	while (1) {
1546		int ret = 0;
1547		unsigned long flags = 0;
1548
1549		if (consume_stock(mem))
1550			goto done;
1551
1552		ret = res_counter_charge(&mem->res, csize, &fail_res);
1553		if (likely(!ret)) {
1554			if (!do_swap_account)
1555				break;
1556			ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1557			if (likely(!ret))
1558				break;
1559			/* mem+swap counter fails */
1560			res_counter_uncharge(&mem->res, csize);
1561			flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1562			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1563									memsw);
1564		} else
1565			/* mem counter fails */
1566			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1567									res);
1568
1569		/* reduce request size and retry */
1570		if (csize > PAGE_SIZE) {
1571			csize = PAGE_SIZE;
1572			continue;
1573		}
1574		if (!(gfp_mask & __GFP_WAIT))
1575			goto nomem;
1576
1577		ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1578						gfp_mask, flags);
1579		if (ret)
1580			continue;
1581
1582		/*
1583		 * try_to_free_mem_cgroup_pages() might not give us a full
1584		 * picture of reclaim. Some pages are reclaimed and might be
1585		 * moved to swap cache or just unmapped from the cgroup.
1586		 * Check the limit again to see if the reclaim reduced the
1587		 * current usage of the cgroup before giving up
1588		 *
1589		 */
1590		if (mem_cgroup_check_under_limit(mem_over_limit))
1591			continue;
1592
1593		/* try to avoid oom while someone is moving charge */
1594		if (mc.moving_task && current != mc.moving_task) {
1595			struct mem_cgroup *from, *to;
1596			bool do_continue = false;
1597			/*
1598			 * There is a small race that "from" or "to" can be
1599			 * freed by rmdir, so we use css_tryget().
1600			 */
1601			rcu_read_lock();
1602			from = mc.from;
1603			to = mc.to;
1604			if (from && css_tryget(&from->css)) {
1605				if (mem_over_limit->use_hierarchy)
1606					do_continue = css_is_ancestor(
1607							&from->css,
1608							&mem_over_limit->css);
1609				else
1610					do_continue = (from == mem_over_limit);
1611				css_put(&from->css);
1612			}
1613			if (!do_continue && to && css_tryget(&to->css)) {
1614				if (mem_over_limit->use_hierarchy)
1615					do_continue = css_is_ancestor(
1616							&to->css,
1617							&mem_over_limit->css);
1618				else
1619					do_continue = (to == mem_over_limit);
1620				css_put(&to->css);
1621			}
1622			rcu_read_unlock();
1623			if (do_continue) {
1624				DEFINE_WAIT(wait);
1625				prepare_to_wait(&mc.waitq, &wait,
1626							TASK_INTERRUPTIBLE);
1627				/* moving charge context might have finished. */
1628				if (mc.moving_task)
1629					schedule();
1630				finish_wait(&mc.waitq, &wait);
1631				continue;
1632			}
1633		}
1634
1635		if (!nr_retries--) {
1636			if (!oom)
1637				goto nomem;
1638			if (mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) {
1639				nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1640				continue;
1641			}
1642			/* When we reach here, current task is dying .*/
1643			css_put(&mem->css);
1644			goto bypass;
1645		}
1646	}
1647	if (csize > PAGE_SIZE)
1648		refill_stock(mem, csize - PAGE_SIZE);
1649done:
1650	return 0;
1651nomem:
1652	css_put(&mem->css);
1653	return -ENOMEM;
1654bypass:
1655	*memcg = NULL;
1656	return 0;
1657}
1658
1659/*
1660 * Somemtimes we have to undo a charge we got by try_charge().
1661 * This function is for that and do uncharge, put css's refcnt.
1662 * gotten by try_charge().
1663 */
1664static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
1665							unsigned long count)
1666{
1667	if (!mem_cgroup_is_root(mem)) {
1668		res_counter_uncharge(&mem->res, PAGE_SIZE * count);
1669		if (do_swap_account)
1670			res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
1671		VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
1672		WARN_ON_ONCE(count > INT_MAX);
1673		__css_put(&mem->css, (int)count);
1674	}
1675	/* we don't need css_put for root */
1676}
1677
1678static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
1679{
1680	__mem_cgroup_cancel_charge(mem, 1);
1681}
1682
1683/*
1684 * A helper function to get mem_cgroup from ID. must be called under
1685 * rcu_read_lock(). The caller must check css_is_removed() or some if
1686 * it's concern. (dropping refcnt from swap can be called against removed
1687 * memcg.)
1688 */
1689static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1690{
1691	struct cgroup_subsys_state *css;
1692
1693	/* ID 0 is unused ID */
1694	if (!id)
1695		return NULL;
1696	css = css_lookup(&mem_cgroup_subsys, id);
1697	if (!css)
1698		return NULL;
1699	return container_of(css, struct mem_cgroup, css);
1700}
1701
1702struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
1703{
1704	struct mem_cgroup *mem = NULL;
1705	struct page_cgroup *pc;
1706	unsigned short id;
1707	swp_entry_t ent;
1708
1709	VM_BUG_ON(!PageLocked(page));
1710
1711	pc = lookup_page_cgroup(page);
1712	lock_page_cgroup(pc);
1713	if (PageCgroupUsed(pc)) {
1714		mem = pc->mem_cgroup;
1715		if (mem && !css_tryget(&mem->css))
1716			mem = NULL;
1717	} else if (PageSwapCache(page)) {
1718		ent.val = page_private(page);
1719		id = lookup_swap_cgroup(ent);
1720		rcu_read_lock();
1721		mem = mem_cgroup_lookup(id);
1722		if (mem && !css_tryget(&mem->css))
1723			mem = NULL;
1724		rcu_read_unlock();
1725	}
1726	unlock_page_cgroup(pc);
1727	return mem;
1728}
1729
1730/*
1731 * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1732 * USED state. If already USED, uncharge and return.
1733 */
1734
1735static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1736				     struct page_cgroup *pc,
1737				     enum charge_type ctype)
1738{
1739	/* try_charge() can return NULL to *memcg, taking care of it. */
1740	if (!mem)
1741		return;
1742
1743	lock_page_cgroup(pc);
1744	if (unlikely(PageCgroupUsed(pc))) {
1745		unlock_page_cgroup(pc);
1746		mem_cgroup_cancel_charge(mem);
1747		return;
1748	}
1749
1750	pc->mem_cgroup = mem;
1751	/*
1752	 * We access a page_cgroup asynchronously without lock_page_cgroup().
1753	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1754	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
1755	 * before USED bit, we need memory barrier here.
1756	 * See mem_cgroup_add_lru_list(), etc.
1757 	 */
1758	smp_wmb();
1759	switch (ctype) {
1760	case MEM_CGROUP_CHARGE_TYPE_CACHE:
1761	case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1762		SetPageCgroupCache(pc);
1763		SetPageCgroupUsed(pc);
1764		break;
1765	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1766		ClearPageCgroupCache(pc);
1767		SetPageCgroupUsed(pc);
1768		break;
1769	default:
1770		break;
1771	}
1772
1773	mem_cgroup_charge_statistics(mem, pc, true);
1774
1775	unlock_page_cgroup(pc);
1776	/*
1777	 * "charge_statistics" updated event counter. Then, check it.
1778	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1779	 * if they exceeds softlimit.
1780	 */
1781	memcg_check_events(mem, pc->page);
1782}
1783
1784/**
1785 * __mem_cgroup_move_account - move account of the page
1786 * @pc:	page_cgroup of the page.
1787 * @from: mem_cgroup which the page is moved from.
1788 * @to:	mem_cgroup which the page is moved to. @from != @to.
1789 * @uncharge: whether we should call uncharge and css_put against @from.
1790 *
1791 * The caller must confirm following.
1792 * - page is not on LRU (isolate_page() is useful.)
1793 * - the pc is locked, used, and ->mem_cgroup points to @from.
1794 *
1795 * This function doesn't do "charge" nor css_get to new cgroup. It should be
1796 * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
1797 * true, this function does "uncharge" from old cgroup, but it doesn't if
1798 * @uncharge is false, so a caller should do "uncharge".
1799 */
1800
1801static void __mem_cgroup_move_account(struct page_cgroup *pc,
1802	struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1803{
1804	struct page *page;
1805
1806	VM_BUG_ON(from == to);
1807	VM_BUG_ON(PageLRU(pc->page));
1808	VM_BUG_ON(!PageCgroupLocked(pc));
1809	VM_BUG_ON(!PageCgroupUsed(pc));
1810	VM_BUG_ON(pc->mem_cgroup != from);
1811
1812	page = pc->page;
1813	if (page_mapped(page) && !PageAnon(page)) {
1814		/* Update mapped_file data for mem_cgroup */
1815		preempt_disable();
1816		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1817		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1818		preempt_enable();
1819	}
1820	mem_cgroup_charge_statistics(from, pc, false);
1821	if (uncharge)
1822		/* This is not "cancel", but cancel_charge does all we need. */
1823		mem_cgroup_cancel_charge(from);
1824
1825	/* caller should have done css_get */
1826	pc->mem_cgroup = to;
1827	mem_cgroup_charge_statistics(to, pc, true);
1828	/*
1829	 * We charges against "to" which may not have any tasks. Then, "to"
1830	 * can be under rmdir(). But in current implementation, caller of
1831	 * this function is just force_empty() and move charge, so it's
1832	 * garanteed that "to" is never removed. So, we don't check rmdir
1833	 * status here.
1834	 */
1835}
1836
1837/*
1838 * check whether the @pc is valid for moving account and call
1839 * __mem_cgroup_move_account()
1840 */
1841static int mem_cgroup_move_account(struct page_cgroup *pc,
1842		struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1843{
1844	int ret = -EINVAL;
1845	lock_page_cgroup(pc);
1846	if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
1847		__mem_cgroup_move_account(pc, from, to, uncharge);
1848		ret = 0;
1849	}
1850	unlock_page_cgroup(pc);
1851	/*
1852	 * check events
1853	 */
1854	memcg_check_events(to, pc->page);
1855	memcg_check_events(from, pc->page);
1856	return ret;
1857}
1858
1859/*
1860 * move charges to its parent.
1861 */
1862
1863static int mem_cgroup_move_parent(struct page_cgroup *pc,
1864				  struct mem_cgroup *child,
1865				  gfp_t gfp_mask)
1866{
1867	struct page *page = pc->page;
1868	struct cgroup *cg = child->css.cgroup;
1869	struct cgroup *pcg = cg->parent;
1870	struct mem_cgroup *parent;
1871	int ret;
1872
1873	/* Is ROOT ? */
1874	if (!pcg)
1875		return -EINVAL;
1876
1877	ret = -EBUSY;
1878	if (!get_page_unless_zero(page))
1879		goto out;
1880	if (isolate_lru_page(page))
1881		goto put;
1882
1883	parent = mem_cgroup_from_cont(pcg);
1884	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1885	if (ret || !parent)
1886		goto put_back;
1887
1888	ret = mem_cgroup_move_account(pc, child, parent, true);
1889	if (ret)
1890		mem_cgroup_cancel_charge(parent);
1891put_back:
1892	putback_lru_page(page);
1893put:
1894	put_page(page);
1895out:
1896	return ret;
1897}
1898
1899/*
1900 * Charge the memory controller for page usage.
1901 * Return
1902 * 0 if the charge was successful
1903 * < 0 if the cgroup is over its limit
1904 */
1905static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1906				gfp_t gfp_mask, enum charge_type ctype,
1907				struct mem_cgroup *memcg)
1908{
1909	struct mem_cgroup *mem;
1910	struct page_cgroup *pc;
1911	int ret;
1912
1913	pc = lookup_page_cgroup(page);
1914	/* can happen at boot */
1915	if (unlikely(!pc))
1916		return 0;
1917	prefetchw(pc);
1918
1919	mem = memcg;
1920	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1921	if (ret || !mem)
1922		return ret;
1923
1924	__mem_cgroup_commit_charge(mem, pc, ctype);
1925	return 0;
1926}
1927
1928int mem_cgroup_newpage_charge(struct page *page,
1929			      struct mm_struct *mm, gfp_t gfp_mask)
1930{
1931	if (mem_cgroup_disabled())
1932		return 0;
1933	if (PageCompound(page))
1934		return 0;
1935	/*
1936	 * If already mapped, we don't have to account.
1937	 * If page cache, page->mapping has address_space.
1938	 * But page->mapping may have out-of-use anon_vma pointer,
1939	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1940	 * is NULL.
1941  	 */
1942	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1943		return 0;
1944	if (unlikely(!mm))
1945		mm = &init_mm;
1946	return mem_cgroup_charge_common(page, mm, gfp_mask,
1947				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1948}
1949
1950static void
1951__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1952					enum charge_type ctype);
1953
1954int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1955				gfp_t gfp_mask)
1956{
1957	struct mem_cgroup *mem = NULL;
1958	int ret;
1959
1960	if (mem_cgroup_disabled())
1961		return 0;
1962	if (PageCompound(page))
1963		return 0;
1964	/*
1965	 * Corner case handling. This is called from add_to_page_cache()
1966	 * in usual. But some FS (shmem) precharges this page before calling it
1967	 * and call add_to_page_cache() with GFP_NOWAIT.
1968	 *
1969	 * For GFP_NOWAIT case, the page may be pre-charged before calling
1970	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1971	 * charge twice. (It works but has to pay a bit larger cost.)
1972	 * And when the page is SwapCache, it should take swap information
1973	 * into account. This is under lock_page() now.
1974	 */
1975	if (!(gfp_mask & __GFP_WAIT)) {
1976		struct page_cgroup *pc;
1977
1978
1979		pc = lookup_page_cgroup(page);
1980		if (!pc)
1981			return 0;
1982		lock_page_cgroup(pc);
1983		if (PageCgroupUsed(pc)) {
1984			unlock_page_cgroup(pc);
1985			return 0;
1986		}
1987		unlock_page_cgroup(pc);
1988	}
1989
1990	if (unlikely(!mm && !mem))
1991		mm = &init_mm;
1992
1993	if (page_is_file_cache(page))
1994		return mem_cgroup_charge_common(page, mm, gfp_mask,
1995				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1996
1997	/* shmem */
1998	if (PageSwapCache(page)) {
1999		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2000		if (!ret)
2001			__mem_cgroup_commit_charge_swapin(page, mem,
2002					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2003	} else
2004		ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2005					MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
2006
2007	return ret;
2008}
2009
2010/*
2011 * While swap-in, try_charge -> commit or cancel, the page is locked.
2012 * And when try_charge() successfully returns, one refcnt to memcg without
2013 * struct page_cgroup is acquired. This refcnt will be consumed by
2014 * "commit()" or removed by "cancel()"
2015 */
2016int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2017				 struct page *page,
2018				 gfp_t mask, struct mem_cgroup **ptr)
2019{
2020	struct mem_cgroup *mem;
2021	int ret;
2022
2023	if (mem_cgroup_disabled())
2024		return 0;
2025
2026	if (!do_swap_account)
2027		goto charge_cur_mm;
2028	/*
2029	 * A racing thread's fault, or swapoff, may have already updated
2030	 * the pte, and even removed page from swap cache: in those cases
2031	 * do_swap_page()'s pte_same() test will fail; but there's also a
2032	 * KSM case which does need to charge the page.
2033	 */
2034	if (!PageSwapCache(page))
2035		goto charge_cur_mm;
2036	mem = try_get_mem_cgroup_from_page(page);
2037	if (!mem)
2038		goto charge_cur_mm;
2039	*ptr = mem;
2040	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
2041	/* drop extra refcnt from tryget */
2042	css_put(&mem->css);
2043	return ret;
2044charge_cur_mm:
2045	if (unlikely(!mm))
2046		mm = &init_mm;
2047	return __mem_cgroup_try_charge(mm, mask, ptr, true);
2048}
2049
2050static void
2051__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2052					enum charge_type ctype)
2053{
2054	struct page_cgroup *pc;
2055
2056	if (mem_cgroup_disabled())
2057		return;
2058	if (!ptr)
2059		return;
2060	cgroup_exclude_rmdir(&ptr->css);
2061	pc = lookup_page_cgroup(page);
2062	mem_cgroup_lru_del_before_commit_swapcache(page);
2063	__mem_cgroup_commit_charge(ptr, pc, ctype);
2064	mem_cgroup_lru_add_after_commit_swapcache(page);
2065	/*
2066	 * Now swap is on-memory. This means this page may be
2067	 * counted both as mem and swap....double count.
2068	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2069	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2070	 * may call delete_from_swap_cache() before reach here.
2071	 */
2072	if (do_swap_account && PageSwapCache(page)) {
2073		swp_entry_t ent = {.val = page_private(page)};
2074		unsigned short id;
2075		struct mem_cgroup *memcg;
2076
2077		id = swap_cgroup_record(ent, 0);
2078		rcu_read_lock();
2079		memcg = mem_cgroup_lookup(id);
2080		if (memcg) {
2081			/*
2082			 * This recorded memcg can be obsolete one. So, avoid
2083			 * calling css_tryget
2084			 */
2085			if (!mem_cgroup_is_root(memcg))
2086				res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2087			mem_cgroup_swap_statistics(memcg, false);
2088			mem_cgroup_put(memcg);
2089		}
2090		rcu_read_unlock();
2091	}
2092	/*
2093	 * At swapin, we may charge account against cgroup which has no tasks.
2094	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2095	 * In that case, we need to call pre_destroy() again. check it here.
2096	 */
2097	cgroup_release_and_wakeup_rmdir(&ptr->css);
2098}
2099
2100void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2101{
2102	__mem_cgroup_commit_charge_swapin(page, ptr,
2103					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2104}
2105
2106void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2107{
2108	if (mem_cgroup_disabled())
2109		return;
2110	if (!mem)
2111		return;
2112	mem_cgroup_cancel_charge(mem);
2113}
2114
2115static void
2116__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2117{
2118	struct memcg_batch_info *batch = NULL;
2119	bool uncharge_memsw = true;
2120	/* If swapout, usage of swap doesn't decrease */
2121	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2122		uncharge_memsw = false;
2123	/*
2124	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2125	 * In those cases, all pages freed continously can be expected to be in
2126	 * the same cgroup and we have chance to coalesce uncharges.
2127	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2128	 * because we want to do uncharge as soon as possible.
2129	 */
2130	if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
2131		goto direct_uncharge;
2132
2133	batch = &current->memcg_batch;
2134	/*
2135	 * In usual, we do css_get() when we remember memcg pointer.
2136	 * But in this case, we keep res->usage until end of a series of
2137	 * uncharges. Then, it's ok to ignore memcg's refcnt.
2138	 */
2139	if (!batch->memcg)
2140		batch->memcg = mem;
2141	/*
2142	 * In typical case, batch->memcg == mem. This means we can
2143	 * merge a series of uncharges to an uncharge of res_counter.
2144	 * If not, we uncharge res_counter ony by one.
2145	 */
2146	if (batch->memcg != mem)
2147		goto direct_uncharge;
2148	/* remember freed charge and uncharge it later */
2149	batch->bytes += PAGE_SIZE;
2150	if (uncharge_memsw)
2151		batch->memsw_bytes += PAGE_SIZE;
2152	return;
2153direct_uncharge:
2154	res_counter_uncharge(&mem->res, PAGE_SIZE);
2155	if (uncharge_memsw)
2156		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2157	return;
2158}
2159
2160/*
2161 * uncharge if !page_mapped(page)
2162 */
2163static struct mem_cgroup *
2164__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2165{
2166	struct page_cgroup *pc;
2167	struct mem_cgroup *mem = NULL;
2168	struct mem_cgroup_per_zone *mz;
2169
2170	if (mem_cgroup_disabled())
2171		return NULL;
2172
2173	if (PageSwapCache(page))
2174		return NULL;
2175
2176	/*
2177	 * Check if our page_cgroup is valid
2178	 */
2179	pc = lookup_page_cgroup(page);
2180	if (unlikely(!pc || !PageCgroupUsed(pc)))
2181		return NULL;
2182
2183	lock_page_cgroup(pc);
2184
2185	mem = pc->mem_cgroup;
2186
2187	if (!PageCgroupUsed(pc))
2188		goto unlock_out;
2189
2190	switch (ctype) {
2191	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2192	case MEM_CGROUP_CHARGE_TYPE_DROP:
2193		if (page_mapped(page))
2194			goto unlock_out;
2195		break;
2196	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2197		if (!PageAnon(page)) {	/* Shared memory */
2198			if (page->mapping && !page_is_file_cache(page))
2199				goto unlock_out;
2200		} else if (page_mapped(page)) /* Anon */
2201				goto unlock_out;
2202		break;
2203	default:
2204		break;
2205	}
2206
2207	if (!mem_cgroup_is_root(mem))
2208		__do_uncharge(mem, ctype);
2209	if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2210		mem_cgroup_swap_statistics(mem, true);
2211	mem_cgroup_charge_statistics(mem, pc, false);
2212
2213	ClearPageCgroupUsed(pc);
2214	/*
2215	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2216	 * freed from LRU. This is safe because uncharged page is expected not
2217	 * to be reused (freed soon). Exception is SwapCache, it's handled by
2218	 * special functions.
2219	 */
2220
2221	mz = page_cgroup_zoneinfo(pc);
2222	unlock_page_cgroup(pc);
2223
2224	memcg_check_events(mem, page);
2225	/* at swapout, this memcg will be accessed to record to swap */
2226	if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2227		css_put(&mem->css);
2228
2229	return mem;
2230
2231unlock_out:
2232	unlock_page_cgroup(pc);
2233	return NULL;
2234}
2235
2236void mem_cgroup_uncharge_page(struct page *page)
2237{
2238	/* early check. */
2239	if (page_mapped(page))
2240		return;
2241	if (page->mapping && !PageAnon(page))
2242		return;
2243	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2244}
2245
2246void mem_cgroup_uncharge_cache_page(struct page *page)
2247{
2248	VM_BUG_ON(page_mapped(page));
2249	VM_BUG_ON(page->mapping);
2250	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2251}
2252
2253/*
2254 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2255 * In that cases, pages are freed continuously and we can expect pages
2256 * are in the same memcg. All these calls itself limits the number of
2257 * pages freed at once, then uncharge_start/end() is called properly.
2258 * This may be called prural(2) times in a context,
2259 */
2260
2261void mem_cgroup_uncharge_start(void)
2262{
2263	current->memcg_batch.do_batch++;
2264	/* We can do nest. */
2265	if (current->memcg_batch.do_batch == 1) {
2266		current->memcg_batch.memcg = NULL;
2267		current->memcg_batch.bytes = 0;
2268		current->memcg_batch.memsw_bytes = 0;
2269	}
2270}
2271
2272void mem_cgroup_uncharge_end(void)
2273{
2274	struct memcg_batch_info *batch = &current->memcg_batch;
2275
2276	if (!batch->do_batch)
2277		return;
2278
2279	batch->do_batch--;
2280	if (batch->do_batch) /* If stacked, do nothing. */
2281		return;
2282
2283	if (!batch->memcg)
2284		return;
2285	/*
2286	 * This "batch->memcg" is valid without any css_get/put etc...
2287	 * bacause we hide charges behind us.
2288	 */
2289	if (batch->bytes)
2290		res_counter_uncharge(&batch->memcg->res, batch->bytes);
2291	if (batch->memsw_bytes)
2292		res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2293	/* forget this pointer (for sanity check) */
2294	batch->memcg = NULL;
2295}
2296
2297#ifdef CONFIG_SWAP
2298/*
2299 * called after __delete_from_swap_cache() and drop "page" account.
2300 * memcg information is recorded to swap_cgroup of "ent"
2301 */
2302void
2303mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2304{
2305	struct mem_cgroup *memcg;
2306	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2307
2308	if (!swapout) /* this was a swap cache but the swap is unused ! */
2309		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2310
2311	memcg = __mem_cgroup_uncharge_common(page, ctype);
2312
2313	/* record memcg information */
2314	if (do_swap_account && swapout && memcg) {
2315		swap_cgroup_record(ent, css_id(&memcg->css));
2316		mem_cgroup_get(memcg);
2317	}
2318	if (swapout && memcg)
2319		css_put(&memcg->css);
2320}
2321#endif
2322
2323#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2324/*
2325 * called from swap_entry_free(). remove record in swap_cgroup and
2326 * uncharge "memsw" account.
2327 */
2328void mem_cgroup_uncharge_swap(swp_entry_t ent)
2329{
2330	struct mem_cgroup *memcg;
2331	unsigned short id;
2332
2333	if (!do_swap_account)
2334		return;
2335
2336	id = swap_cgroup_record(ent, 0);
2337	rcu_read_lock();
2338	memcg = mem_cgroup_lookup(id);
2339	if (memcg) {
2340		/*
2341		 * We uncharge this because swap is freed.
2342		 * This memcg can be obsolete one. We avoid calling css_tryget
2343		 */
2344		if (!mem_cgroup_is_root(memcg))
2345			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2346		mem_cgroup_swap_statistics(memcg, false);
2347		mem_cgroup_put(memcg);
2348	}
2349	rcu_read_unlock();
2350}
2351
2352/**
2353 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2354 * @entry: swap entry to be moved
2355 * @from:  mem_cgroup which the entry is moved from
2356 * @to:  mem_cgroup which the entry is moved to
2357 * @need_fixup: whether we should fixup res_counters and refcounts.
2358 *
2359 * It succeeds only when the swap_cgroup's record for this entry is the same
2360 * as the mem_cgroup's id of @from.
2361 *
2362 * Returns 0 on success, -EINVAL on failure.
2363 *
2364 * The caller must have charged to @to, IOW, called res_counter_charge() about
2365 * both res and memsw, and called css_get().
2366 */
2367static int mem_cgroup_move_swap_account(swp_entry_t entry,
2368		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2369{
2370	unsigned short old_id, new_id;
2371
2372	old_id = css_id(&from->css);
2373	new_id = css_id(&to->css);
2374
2375	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2376		mem_cgroup_swap_statistics(from, false);
2377		mem_cgroup_swap_statistics(to, true);
2378		/*
2379		 * This function is only called from task migration context now.
2380		 * It postpones res_counter and refcount handling till the end
2381		 * of task migration(mem_cgroup_clear_mc()) for performance
2382		 * improvement. But we cannot postpone mem_cgroup_get(to)
2383		 * because if the process that has been moved to @to does
2384		 * swap-in, the refcount of @to might be decreased to 0.
2385		 */
2386		mem_cgroup_get(to);
2387		if (need_fixup) {
2388			if (!mem_cgroup_is_root(from))
2389				res_counter_uncharge(&from->memsw, PAGE_SIZE);
2390			mem_cgroup_put(from);
2391			/*
2392			 * we charged both to->res and to->memsw, so we should
2393			 * uncharge to->res.
2394			 */
2395			if (!mem_cgroup_is_root(to))
2396				res_counter_uncharge(&to->res, PAGE_SIZE);
2397			css_put(&to->css);
2398		}
2399		return 0;
2400	}
2401	return -EINVAL;
2402}
2403#else
2404static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2405		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2406{
2407	return -EINVAL;
2408}
2409#endif
2410
2411/*
2412 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2413 * page belongs to.
2414 */
2415int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
2416{
2417	struct page_cgroup *pc;
2418	struct mem_cgroup *mem = NULL;
2419	int ret = 0;
2420
2421	if (mem_cgroup_disabled())
2422		return 0;
2423
2424	pc = lookup_page_cgroup(page);
2425	lock_page_cgroup(pc);
2426	if (PageCgroupUsed(pc)) {
2427		mem = pc->mem_cgroup;
2428		css_get(&mem->css);
2429	}
2430	unlock_page_cgroup(pc);
2431
2432	if (mem) {
2433		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
2434		css_put(&mem->css);
2435	}
2436	*ptr = mem;
2437	return ret;
2438}
2439
2440/* remove redundant charge if migration failed*/
2441void mem_cgroup_end_migration(struct mem_cgroup *mem,
2442		struct page *oldpage, struct page *newpage)
2443{
2444	struct page *target, *unused;
2445	struct page_cgroup *pc;
2446	enum charge_type ctype;
2447
2448	if (!mem)
2449		return;
2450	cgroup_exclude_rmdir(&mem->css);
2451	/* at migration success, oldpage->mapping is NULL. */
2452	if (oldpage->mapping) {
2453		target = oldpage;
2454		unused = NULL;
2455	} else {
2456		target = newpage;
2457		unused = oldpage;
2458	}
2459
2460	if (PageAnon(target))
2461		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2462	else if (page_is_file_cache(target))
2463		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2464	else
2465		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2466
2467	/* unused page is not on radix-tree now. */
2468	if (unused)
2469		__mem_cgroup_uncharge_common(unused, ctype);
2470
2471	pc = lookup_page_cgroup(target);
2472	/*
2473	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
2474	 * So, double-counting is effectively avoided.
2475	 */
2476	__mem_cgroup_commit_charge(mem, pc, ctype);
2477
2478	/*
2479	 * Both of oldpage and newpage are still under lock_page().
2480	 * Then, we don't have to care about race in radix-tree.
2481	 * But we have to be careful that this page is unmapped or not.
2482	 *
2483	 * There is a case for !page_mapped(). At the start of
2484	 * migration, oldpage was mapped. But now, it's zapped.
2485	 * But we know *target* page is not freed/reused under us.
2486	 * mem_cgroup_uncharge_page() does all necessary checks.
2487	 */
2488	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
2489		mem_cgroup_uncharge_page(target);
2490	/*
2491	 * At migration, we may charge account against cgroup which has no tasks
2492	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2493	 * In that case, we need to call pre_destroy() again. check it here.
2494	 */
2495	cgroup_release_and_wakeup_rmdir(&mem->css);
2496}
2497
2498/*
2499 * A call to try to shrink memory usage on charge failure at shmem's swapin.
2500 * Calling hierarchical_reclaim is not enough because we should update
2501 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2502 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2503 * not from the memcg which this page would be charged to.
2504 * try_charge_swapin does all of these works properly.
2505 */
2506int mem_cgroup_shmem_charge_fallback(struct page *page,
2507			    struct mm_struct *mm,
2508			    gfp_t gfp_mask)
2509{
2510	struct mem_cgroup *mem = NULL;
2511	int ret;
2512
2513	if (mem_cgroup_disabled())
2514		return 0;
2515
2516	ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2517	if (!ret)
2518		mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
2519
2520	return ret;
2521}
2522
2523static DEFINE_MUTEX(set_limit_mutex);
2524
2525static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2526				unsigned long long val)
2527{
2528	int retry_count;
2529	u64 memswlimit;
2530	int ret = 0;
2531	int children = mem_cgroup_count_children(memcg);
2532	u64 curusage, oldusage;
2533
2534	/*
2535	 * For keeping hierarchical_reclaim simple, how long we should retry
2536	 * is depends on callers. We set our retry-count to be function
2537	 * of # of children which we should visit in this loop.
2538	 */
2539	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2540
2541	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2542
2543	while (retry_count) {
2544		if (signal_pending(current)) {
2545			ret = -EINTR;
2546			break;
2547		}
2548		/*
2549		 * Rather than hide all in some function, I do this in
2550		 * open coded manner. You see what this really does.
2551		 * We have to guarantee mem->res.limit < mem->memsw.limit.
2552		 */
2553		mutex_lock(&set_limit_mutex);
2554		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2555		if (memswlimit < val) {
2556			ret = -EINVAL;
2557			mutex_unlock(&set_limit_mutex);
2558			break;
2559		}
2560		ret = res_counter_set_limit(&memcg->res, val);
2561		if (!ret) {
2562			if (memswlimit == val)
2563				memcg->memsw_is_minimum = true;
2564			else
2565				memcg->memsw_is_minimum = false;
2566		}
2567		mutex_unlock(&set_limit_mutex);
2568
2569		if (!ret)
2570			break;
2571
2572		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2573						MEM_CGROUP_RECLAIM_SHRINK);
2574		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2575		/* Usage is reduced ? */
2576  		if (curusage >= oldusage)
2577			retry_count--;
2578		else
2579			oldusage = curusage;
2580	}
2581
2582	return ret;
2583}
2584
2585static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2586					unsigned long long val)
2587{
2588	int retry_count;
2589	u64 memlimit, oldusage, curusage;
2590	int children = mem_cgroup_count_children(memcg);
2591	int ret = -EBUSY;
2592
2593	/* see mem_cgroup_resize_res_limit */
2594 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2595	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2596	while (retry_count) {
2597		if (signal_pending(current)) {
2598			ret = -EINTR;
2599			break;
2600		}
2601		/*
2602		 * Rather than hide all in some function, I do this in
2603		 * open coded manner. You see what this really does.
2604		 * We have to guarantee mem->res.limit < mem->memsw.limit.
2605		 */
2606		mutex_lock(&set_limit_mutex);
2607		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2608		if (memlimit > val) {
2609			ret = -EINVAL;
2610			mutex_unlock(&set_limit_mutex);
2611			break;
2612		}
2613		ret = res_counter_set_limit(&memcg->memsw, val);
2614		if (!ret) {
2615			if (memlimit == val)
2616				memcg->memsw_is_minimum = true;
2617			else
2618				memcg->memsw_is_minimum = false;
2619		}
2620		mutex_unlock(&set_limit_mutex);
2621
2622		if (!ret)
2623			break;
2624
2625		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2626						MEM_CGROUP_RECLAIM_NOSWAP |
2627						MEM_CGROUP_RECLAIM_SHRINK);
2628		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2629		/* Usage is reduced ? */
2630		if (curusage >= oldusage)
2631			retry_count--;
2632		else
2633			oldusage = curusage;
2634	}
2635	return ret;
2636}
2637
2638unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2639						gfp_t gfp_mask, int nid,
2640						int zid)
2641{
2642	unsigned long nr_reclaimed = 0;
2643	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2644	unsigned long reclaimed;
2645	int loop = 0;
2646	struct mem_cgroup_tree_per_zone *mctz;
2647	unsigned long long excess;
2648
2649	if (order > 0)
2650		return 0;
2651
2652	mctz = soft_limit_tree_node_zone(nid, zid);
2653	/*
2654	 * This loop can run a while, specially if mem_cgroup's continuously
2655	 * keep exceeding their soft limit and putting the system under
2656	 * pressure
2657	 */
2658	do {
2659		if (next_mz)
2660			mz = next_mz;
2661		else
2662			mz = mem_cgroup_largest_soft_limit_node(mctz);
2663		if (!mz)
2664			break;
2665
2666		reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
2667						gfp_mask,
2668						MEM_CGROUP_RECLAIM_SOFT);
2669		nr_reclaimed += reclaimed;
2670		spin_lock(&mctz->lock);
2671
2672		/*
2673		 * If we failed to reclaim anything from this memory cgroup
2674		 * it is time to move on to the next cgroup
2675		 */
2676		next_mz = NULL;
2677		if (!reclaimed) {
2678			do {
2679				/*
2680				 * Loop until we find yet another one.
2681				 *
2682				 * By the time we get the soft_limit lock
2683				 * again, someone might have aded the
2684				 * group back on the RB tree. Iterate to
2685				 * make sure we get a different mem.
2686				 * mem_cgroup_largest_soft_limit_node returns
2687				 * NULL if no other cgroup is present on
2688				 * the tree
2689				 */
2690				next_mz =
2691				__mem_cgroup_largest_soft_limit_node(mctz);
2692				if (next_mz == mz) {
2693					css_put(&next_mz->mem->css);
2694					next_mz = NULL;
2695				} else /* next_mz == NULL or other memcg */
2696					break;
2697			} while (1);
2698		}
2699		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2700		excess = res_counter_soft_limit_excess(&mz->mem->res);
2701		/*
2702		 * One school of thought says that we should not add
2703		 * back the node to the tree if reclaim returns 0.
2704		 * But our reclaim could return 0, simply because due
2705		 * to priority we are exposing a smaller subset of
2706		 * memory to reclaim from. Consider this as a longer
2707		 * term TODO.
2708		 */
2709		/* If excess == 0, no tree ops */
2710		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2711		spin_unlock(&mctz->lock);
2712		css_put(&mz->mem->css);
2713		loop++;
2714		/*
2715		 * Could not reclaim anything and there are no more
2716		 * mem cgroups to try or we seem to be looping without
2717		 * reclaiming anything.
2718		 */
2719		if (!nr_reclaimed &&
2720			(next_mz == NULL ||
2721			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2722			break;
2723	} while (!nr_reclaimed);
2724	if (next_mz)
2725		css_put(&next_mz->mem->css);
2726	return nr_reclaimed;
2727}
2728
2729/*
2730 * This routine traverse page_cgroup in given list and drop them all.
2731 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
2732 */
2733static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
2734				int node, int zid, enum lru_list lru)
2735{
2736	struct zone *zone;
2737	struct mem_cgroup_per_zone *mz;
2738	struct page_cgroup *pc, *busy;
2739	unsigned long flags, loop;
2740	struct list_head *list;
2741	int ret = 0;
2742
2743	zone = &NODE_DATA(node)->node_zones[zid];
2744	mz = mem_cgroup_zoneinfo(mem, node, zid);
2745	list = &mz->lists[lru];
2746
2747	loop = MEM_CGROUP_ZSTAT(mz, lru);
2748	/* give some margin against EBUSY etc...*/
2749	loop += 256;
2750	busy = NULL;
2751	while (loop--) {
2752		ret = 0;
2753		spin_lock_irqsave(&zone->lru_lock, flags);
2754		if (list_empty(list)) {
2755			spin_unlock_irqrestore(&zone->lru_lock, flags);
2756			break;
2757		}
2758		pc = list_entry(list->prev, struct page_cgroup, lru);
2759		if (busy == pc) {
2760			list_move(&pc->lru, list);
2761			busy = NULL;
2762			spin_unlock_irqrestore(&zone->lru_lock, flags);
2763			continue;
2764		}
2765		spin_unlock_irqrestore(&zone->lru_lock, flags);
2766
2767		ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
2768		if (ret == -ENOMEM)
2769			break;
2770
2771		if (ret == -EBUSY || ret == -EINVAL) {
2772			/* found lock contention or "pc" is obsolete. */
2773			busy = pc;
2774			cond_resched();
2775		} else
2776			busy = NULL;
2777	}
2778
2779	if (!ret && !list_empty(list))
2780		return -EBUSY;
2781	return ret;
2782}
2783
2784/*
2785 * make mem_cgroup's charge to be 0 if there is no task.
2786 * This enables deleting this mem_cgroup.
2787 */
2788static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2789{
2790	int ret;
2791	int node, zid, shrink;
2792	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2793	struct cgroup *cgrp = mem->css.cgroup;
2794
2795	css_get(&mem->css);
2796
2797	shrink = 0;
2798	/* should free all ? */
2799	if (free_all)
2800		goto try_to_free;
2801move_account:
2802	do {
2803		ret = -EBUSY;
2804		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2805			goto out;
2806		ret = -EINTR;
2807		if (signal_pending(current))
2808			goto out;
2809		/* This is for making all *used* pages to be on LRU. */
2810		lru_add_drain_all();
2811		drain_all_stock_sync();
2812		ret = 0;
2813		for_each_node_state(node, N_HIGH_MEMORY) {
2814			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
2815				enum lru_list l;
2816				for_each_lru(l) {
2817					ret = mem_cgroup_force_empty_list(mem,
2818							node, zid, l);
2819					if (ret)
2820						break;
2821				}
2822			}
2823			if (ret)
2824				break;
2825		}
2826		/* it seems parent cgroup doesn't have enough mem */
2827		if (ret == -ENOMEM)
2828			goto try_to_free;
2829		cond_resched();
2830	/* "ret" should also be checked to ensure all lists are empty. */
2831	} while (mem->res.usage > 0 || ret);
2832out:
2833	css_put(&mem->css);
2834	return ret;
2835
2836try_to_free:
2837	/* returns EBUSY if there is a task or if we come here twice. */
2838	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
2839		ret = -EBUSY;
2840		goto out;
2841	}
2842	/* we call try-to-free pages for make this cgroup empty */
2843	lru_add_drain_all();
2844	/* try to free all pages in this cgroup */
2845	shrink = 1;
2846	while (nr_retries && mem->res.usage > 0) {
2847		int progress;
2848
2849		if (signal_pending(current)) {
2850			ret = -EINTR;
2851			goto out;
2852		}
2853		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
2854						false, get_swappiness(mem));
2855		if (!progress) {
2856			nr_retries--;
2857			/* maybe some writeback is necessary */
2858			congestion_wait(BLK_RW_ASYNC, HZ/10);
2859		}
2860
2861	}
2862	lru_add_drain();
2863	/* try move_account...there may be some *locked* pages. */
2864	goto move_account;
2865}
2866
2867int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
2868{
2869	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
2870}
2871
2872
2873static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
2874{
2875	return mem_cgroup_from_cont(cont)->use_hierarchy;
2876}
2877
2878static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
2879					u64 val)
2880{
2881	int retval = 0;
2882	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2883	struct cgroup *parent = cont->parent;
2884	struct mem_cgroup *parent_mem = NULL;
2885
2886	if (parent)
2887		parent_mem = mem_cgroup_from_cont(parent);
2888
2889	cgroup_lock();
2890	/*
2891	 * If parent's use_hierarchy is set, we can't make any modifications
2892	 * in the child subtrees. If it is unset, then the change can
2893	 * occur, provided the current cgroup has no children.
2894	 *
2895	 * For the root cgroup, parent_mem is NULL, we allow value to be
2896	 * set if there are no children.
2897	 */
2898	if ((!parent_mem || !parent_mem->use_hierarchy) &&
2899				(val == 1 || val == 0)) {
2900		if (list_empty(&cont->children))
2901			mem->use_hierarchy = val;
2902		else
2903			retval = -EBUSY;
2904	} else
2905		retval = -EINVAL;
2906	cgroup_unlock();
2907
2908	return retval;
2909}
2910
2911struct mem_cgroup_idx_data {
2912	s64 val;
2913	enum mem_cgroup_stat_index idx;
2914};
2915
2916static int
2917mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
2918{
2919	struct mem_cgroup_idx_data *d = data;
2920	d->val += mem_cgroup_read_stat(mem, d->idx);
2921	return 0;
2922}
2923
2924static void
2925mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
2926				enum mem_cgroup_stat_index idx, s64 *val)
2927{
2928	struct mem_cgroup_idx_data d;
2929	d.idx = idx;
2930	d.val = 0;
2931	mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
2932	*val = d.val;
2933}
2934
2935static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
2936{
2937	u64 idx_val, val;
2938
2939	if (!mem_cgroup_is_root(mem)) {
2940		if (!swap)
2941			return res_counter_read_u64(&mem->res, RES_USAGE);
2942		else
2943			return res_counter_read_u64(&mem->memsw, RES_USAGE);
2944	}
2945
2946	mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
2947	val = idx_val;
2948	mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
2949	val += idx_val;
2950
2951	if (swap) {
2952		mem_cgroup_get_recursive_idx_stat(mem,
2953				MEM_CGROUP_STAT_SWAPOUT, &idx_val);
2954		val += idx_val;
2955	}
2956
2957	return val << PAGE_SHIFT;
2958}
2959
2960static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
2961{
2962	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2963	u64 val;
2964	int type, name;
2965
2966	type = MEMFILE_TYPE(cft->private);
2967	name = MEMFILE_ATTR(cft->private);
2968	switch (type) {
2969	case _MEM:
2970		if (name == RES_USAGE)
2971			val = mem_cgroup_usage(mem, false);
2972		else
2973			val = res_counter_read_u64(&mem->res, name);
2974		break;
2975	case _MEMSWAP:
2976		if (name == RES_USAGE)
2977			val = mem_cgroup_usage(mem, true);
2978		else
2979			val = res_counter_read_u64(&mem->memsw, name);
2980		break;
2981	default:
2982		BUG();
2983		break;
2984	}
2985	return val;
2986}
2987/*
2988 * The user of this function is...
2989 * RES_LIMIT.
2990 */
2991static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
2992			    const char *buffer)
2993{
2994	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
2995	int type, name;
2996	unsigned long long val;
2997	int ret;
2998
2999	type = MEMFILE_TYPE(cft->private);
3000	name = MEMFILE_ATTR(cft->private);
3001	switch (name) {
3002	case RES_LIMIT:
3003		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3004			ret = -EINVAL;
3005			break;
3006		}
3007		/* This function does all necessary parse...reuse it */
3008		ret = res_counter_memparse_write_strategy(buffer, &val);
3009		if (ret)
3010			break;
3011		if (type == _MEM)
3012			ret = mem_cgroup_resize_limit(memcg, val);
3013		else
3014			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3015		break;
3016	case RES_SOFT_LIMIT:
3017		ret = res_counter_memparse_write_strategy(buffer, &val);
3018		if (ret)
3019			break;
3020		/*
3021		 * For memsw, soft limits are hard to implement in terms
3022		 * of semantics, for now, we support soft limits for
3023		 * control without swap
3024		 */
3025		if (type == _MEM)
3026			ret = res_counter_set_soft_limit(&memcg->res, val);
3027		else
3028			ret = -EINVAL;
3029		break;
3030	default:
3031		ret = -EINVAL; /* should be BUG() ? */
3032		break;
3033	}
3034	return ret;
3035}
3036
3037static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3038		unsigned long long *mem_limit, unsigned long long *memsw_limit)
3039{
3040	struct cgroup *cgroup;
3041	unsigned long long min_limit, min_memsw_limit, tmp;
3042
3043	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3044	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3045	cgroup = memcg->css.cgroup;
3046	if (!memcg->use_hierarchy)
3047		goto out;
3048
3049	while (cgroup->parent) {
3050		cgroup = cgroup->parent;
3051		memcg = mem_cgroup_from_cont(cgroup);
3052		if (!memcg->use_hierarchy)
3053			break;
3054		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3055		min_limit = min(min_limit, tmp);
3056		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3057		min_memsw_limit = min(min_memsw_limit, tmp);
3058	}
3059out:
3060	*mem_limit = min_limit;
3061	*memsw_limit = min_memsw_limit;
3062	return;
3063}
3064
3065static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3066{
3067	struct mem_cgroup *mem;
3068	int type, name;
3069
3070	mem = mem_cgroup_from_cont(cont);
3071	type = MEMFILE_TYPE(event);
3072	name = MEMFILE_ATTR(event);
3073	switch (name) {
3074	case RES_MAX_USAGE:
3075		if (type == _MEM)
3076			res_counter_reset_max(&mem->res);
3077		else
3078			res_counter_reset_max(&mem->memsw);
3079		break;
3080	case RES_FAILCNT:
3081		if (type == _MEM)
3082			res_counter_reset_failcnt(&mem->res);
3083		else
3084			res_counter_reset_failcnt(&mem->memsw);
3085		break;
3086	}
3087
3088	return 0;
3089}
3090
3091static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3092					struct cftype *cft)
3093{
3094	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3095}
3096
3097#ifdef CONFIG_MMU
3098static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3099					struct cftype *cft, u64 val)
3100{
3101	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3102
3103	if (val >= (1 << NR_MOVE_TYPE))
3104		return -EINVAL;
3105	/*
3106	 * We check this value several times in both in can_attach() and
3107	 * attach(), so we need cgroup lock to prevent this value from being
3108	 * inconsistent.
3109	 */
3110	cgroup_lock();
3111	mem->move_charge_at_immigrate = val;
3112	cgroup_unlock();
3113
3114	return 0;
3115}
3116#else
3117static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3118					struct cftype *cft, u64 val)
3119{
3120	return -ENOSYS;
3121}
3122#endif
3123
3124
3125/* For read statistics */
3126enum {
3127	MCS_CACHE,
3128	MCS_RSS,
3129	MCS_FILE_MAPPED,
3130	MCS_PGPGIN,
3131	MCS_PGPGOUT,
3132	MCS_SWAP,
3133	MCS_INACTIVE_ANON,
3134	MCS_ACTIVE_ANON,
3135	MCS_INACTIVE_FILE,
3136	MCS_ACTIVE_FILE,
3137	MCS_UNEVICTABLE,
3138	NR_MCS_STAT,
3139};
3140
3141struct mcs_total_stat {
3142	s64 stat[NR_MCS_STAT];
3143};
3144
3145struct {
3146	char *local_name;
3147	char *total_name;
3148} memcg_stat_strings[NR_MCS_STAT] = {
3149	{"cache", "total_cache"},
3150	{"rss", "total_rss"},
3151	{"mapped_file", "total_mapped_file"},
3152	{"pgpgin", "total_pgpgin"},
3153	{"pgpgout", "total_pgpgout"},
3154	{"swap", "total_swap"},
3155	{"inactive_anon", "total_inactive_anon"},
3156	{"active_anon", "total_active_anon"},
3157	{"inactive_file", "total_inactive_file"},
3158	{"active_file", "total_active_file"},
3159	{"unevictable", "total_unevictable"}
3160};
3161
3162
3163static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
3164{
3165	struct mcs_total_stat *s = data;
3166	s64 val;
3167
3168	/* per cpu stat */
3169	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3170	s->stat[MCS_CACHE] += val * PAGE_SIZE;
3171	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3172	s->stat[MCS_RSS] += val * PAGE_SIZE;
3173	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3174	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3175	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
3176	s->stat[MCS_PGPGIN] += val;
3177	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
3178	s->stat[MCS_PGPGOUT] += val;
3179	if (do_swap_account) {
3180		val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3181		s->stat[MCS_SWAP] += val * PAGE_SIZE;
3182	}
3183
3184	/* per zone stat */
3185	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3186	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3187	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3188	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3189	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3190	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3191	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3192	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3193	val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3194	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3195	return 0;
3196}
3197
3198static void
3199mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3200{
3201	mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
3202}
3203
3204static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3205				 struct cgroup_map_cb *cb)
3206{
3207	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3208	struct mcs_total_stat mystat;
3209	int i;
3210
3211	memset(&mystat, 0, sizeof(mystat));
3212	mem_cgroup_get_local_stat(mem_cont, &mystat);
3213
3214	for (i = 0; i < NR_MCS_STAT; i++) {
3215		if (i == MCS_SWAP && !do_swap_account)
3216			continue;
3217		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3218	}
3219
3220	/* Hierarchical information */
3221	{
3222		unsigned long long limit, memsw_limit;
3223		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3224		cb->fill(cb, "hierarchical_memory_limit", limit);
3225		if (do_swap_account)
3226			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3227	}
3228
3229	memset(&mystat, 0, sizeof(mystat));
3230	mem_cgroup_get_total_stat(mem_cont, &mystat);
3231	for (i = 0; i < NR_MCS_STAT; i++) {
3232		if (i == MCS_SWAP && !do_swap_account)
3233			continue;
3234		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3235	}
3236
3237#ifdef CONFIG_DEBUG_VM
3238	cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3239
3240	{
3241		int nid, zid;
3242		struct mem_cgroup_per_zone *mz;
3243		unsigned long recent_rotated[2] = {0, 0};
3244		unsigned long recent_scanned[2] = {0, 0};
3245
3246		for_each_online_node(nid)
3247			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3248				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3249
3250				recent_rotated[0] +=
3251					mz->reclaim_stat.recent_rotated[0];
3252				recent_rotated[1] +=
3253					mz->reclaim_stat.recent_rotated[1];
3254				recent_scanned[0] +=
3255					mz->reclaim_stat.recent_scanned[0];
3256				recent_scanned[1] +=
3257					mz->reclaim_stat.recent_scanned[1];
3258			}
3259		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3260		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3261		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3262		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3263	}
3264#endif
3265
3266	return 0;
3267}
3268
3269static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3270{
3271	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3272
3273	return get_swappiness(memcg);
3274}
3275
3276static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3277				       u64 val)
3278{
3279	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3280	struct mem_cgroup *parent;
3281
3282	if (val > 100)
3283		return -EINVAL;
3284
3285	if (cgrp->parent == NULL)
3286		return -EINVAL;
3287
3288	parent = mem_cgroup_from_cont(cgrp->parent);
3289
3290	cgroup_lock();
3291
3292	/* If under hierarchy, only empty-root can set this value */
3293	if ((parent->use_hierarchy) ||
3294	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3295		cgroup_unlock();
3296		return -EINVAL;
3297	}
3298
3299	spin_lock(&memcg->reclaim_param_lock);
3300	memcg->swappiness = val;
3301	spin_unlock(&memcg->reclaim_param_lock);
3302
3303	cgroup_unlock();
3304
3305	return 0;
3306}
3307
3308static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3309{
3310	struct mem_cgroup_threshold_ary *t;
3311	u64 usage;
3312	int i;
3313
3314	rcu_read_lock();
3315	if (!swap)
3316		t = rcu_dereference(memcg->thresholds);
3317	else
3318		t = rcu_dereference(memcg->memsw_thresholds);
3319
3320	if (!t)
3321		goto unlock;
3322
3323	usage = mem_cgroup_usage(memcg, swap);
3324
3325	/*
3326	 * current_threshold points to threshold just below usage.
3327	 * If it's not true, a threshold was crossed after last
3328	 * call of __mem_cgroup_threshold().
3329	 */
3330	i = atomic_read(&t->current_threshold);
3331
3332	/*
3333	 * Iterate backward over array of thresholds starting from
3334	 * current_threshold and check if a threshold is crossed.
3335	 * If none of thresholds below usage is crossed, we read
3336	 * only one element of the array here.
3337	 */
3338	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3339		eventfd_signal(t->entries[i].eventfd, 1);
3340
3341	/* i = current_threshold + 1 */
3342	i++;
3343
3344	/*
3345	 * Iterate forward over array of thresholds starting from
3346	 * current_threshold+1 and check if a threshold is crossed.
3347	 * If none of thresholds above usage is crossed, we read
3348	 * only one element of the array here.
3349	 */
3350	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3351		eventfd_signal(t->entries[i].eventfd, 1);
3352
3353	/* Update current_threshold */
3354	atomic_set(&t->current_threshold, i - 1);
3355unlock:
3356	rcu_read_unlock();
3357}
3358
3359static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3360{
3361	__mem_cgroup_threshold(memcg, false);
3362	if (do_swap_account)
3363		__mem_cgroup_threshold(memcg, true);
3364}
3365
3366static int compare_thresholds(const void *a, const void *b)
3367{
3368	const struct mem_cgroup_threshold *_a = a;
3369	const struct mem_cgroup_threshold *_b = b;
3370
3371	return _a->threshold - _b->threshold;
3372}
3373
3374static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft,
3375		struct eventfd_ctx *eventfd, const char *args)
3376{
3377	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3378	struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
3379	int type = MEMFILE_TYPE(cft->private);
3380	u64 threshold, usage;
3381	int size;
3382	int i, ret;
3383
3384	ret = res_counter_memparse_write_strategy(args, &threshold);
3385	if (ret)
3386		return ret;
3387
3388	mutex_lock(&memcg->thresholds_lock);
3389	if (type == _MEM)
3390		thresholds = memcg->thresholds;
3391	else if (type == _MEMSWAP)
3392		thresholds = memcg->memsw_thresholds;
3393	else
3394		BUG();
3395
3396	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3397
3398	/* Check if a threshold crossed before adding a new one */
3399	if (thresholds)
3400		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3401
3402	if (thresholds)
3403		size = thresholds->size + 1;
3404	else
3405		size = 1;
3406
3407	/* Allocate memory for new array of thresholds */
3408	thresholds_new = kmalloc(sizeof(*thresholds_new) +
3409			size * sizeof(struct mem_cgroup_threshold),
3410			GFP_KERNEL);
3411	if (!thresholds_new) {
3412		ret = -ENOMEM;
3413		goto unlock;
3414	}
3415	thresholds_new->size = size;
3416
3417	/* Copy thresholds (if any) to new array */
3418	if (thresholds)
3419		memcpy(thresholds_new->entries, thresholds->entries,
3420				thresholds->size *
3421				sizeof(struct mem_cgroup_threshold));
3422	/* Add new threshold */
3423	thresholds_new->entries[size - 1].eventfd = eventfd;
3424	thresholds_new->entries[size - 1].threshold = threshold;
3425
3426	/* Sort thresholds. Registering of new threshold isn't time-critical */
3427	sort(thresholds_new->entries, size,
3428			sizeof(struct mem_cgroup_threshold),
3429			compare_thresholds, NULL);
3430
3431	/* Find current threshold */
3432	atomic_set(&thresholds_new->current_threshold, -1);
3433	for (i = 0; i < size; i++) {
3434		if (thresholds_new->entries[i].threshold < usage) {
3435			/*
3436			 * thresholds_new->current_threshold will not be used
3437			 * until rcu_assign_pointer(), so it's safe to increment
3438			 * it here.
3439			 */
3440			atomic_inc(&thresholds_new->current_threshold);
3441		}
3442	}
3443
3444	if (type == _MEM)
3445		rcu_assign_pointer(memcg->thresholds, thresholds_new);
3446	else
3447		rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3448
3449	/* To be sure that nobody uses thresholds before freeing it */
3450	synchronize_rcu();
3451
3452	kfree(thresholds);
3453unlock:
3454	mutex_unlock(&memcg->thresholds_lock);
3455
3456	return ret;
3457}
3458
3459static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft,
3460		struct eventfd_ctx *eventfd)
3461{
3462	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3463	struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
3464	int type = MEMFILE_TYPE(cft->private);
3465	u64 usage;
3466	int size = 0;
3467	int i, j, ret;
3468
3469	mutex_lock(&memcg->thresholds_lock);
3470	if (type == _MEM)
3471		thresholds = memcg->thresholds;
3472	else if (type == _MEMSWAP)
3473		thresholds = memcg->memsw_thresholds;
3474	else
3475		BUG();
3476
3477	/*
3478	 * Something went wrong if we trying to unregister a threshold
3479	 * if we don't have thresholds
3480	 */
3481	BUG_ON(!thresholds);
3482
3483	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3484
3485	/* Check if a threshold crossed before removing */
3486	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3487
3488	/* Calculate new number of threshold */
3489	for (i = 0; i < thresholds->size; i++) {
3490		if (thresholds->entries[i].eventfd != eventfd)
3491			size++;
3492	}
3493
3494	/* Set thresholds array to NULL if we don't have thresholds */
3495	if (!size) {
3496		thresholds_new = NULL;
3497		goto assign;
3498	}
3499
3500	/* Allocate memory for new array of thresholds */
3501	thresholds_new = kmalloc(sizeof(*thresholds_new) +
3502			size * sizeof(struct mem_cgroup_threshold),
3503			GFP_KERNEL);
3504	if (!thresholds_new) {
3505		ret = -ENOMEM;
3506		goto unlock;
3507	}
3508	thresholds_new->size = size;
3509
3510	/* Copy thresholds and find current threshold */
3511	atomic_set(&thresholds_new->current_threshold, -1);
3512	for (i = 0, j = 0; i < thresholds->size; i++) {
3513		if (thresholds->entries[i].eventfd == eventfd)
3514			continue;
3515
3516		thresholds_new->entries[j] = thresholds->entries[i];
3517		if (thresholds_new->entries[j].threshold < usage) {
3518			/*
3519			 * thresholds_new->current_threshold will not be used
3520			 * until rcu_assign_pointer(), so it's safe to increment
3521			 * it here.
3522			 */
3523			atomic_inc(&thresholds_new->current_threshold);
3524		}
3525		j++;
3526	}
3527
3528assign:
3529	if (type == _MEM)
3530		rcu_assign_pointer(memcg->thresholds, thresholds_new);
3531	else
3532		rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3533
3534	/* To be sure that nobody uses thresholds before freeing it */
3535	synchronize_rcu();
3536
3537	kfree(thresholds);
3538unlock:
3539	mutex_unlock(&memcg->thresholds_lock);
3540
3541	return ret;
3542}
3543
3544static struct cftype mem_cgroup_files[] = {
3545	{
3546		.name = "usage_in_bytes",
3547		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3548		.read_u64 = mem_cgroup_read,
3549		.register_event = mem_cgroup_register_event,
3550		.unregister_event = mem_cgroup_unregister_event,
3551	},
3552	{
3553		.name = "max_usage_in_bytes",
3554		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3555		.trigger = mem_cgroup_reset,
3556		.read_u64 = mem_cgroup_read,
3557	},
3558	{
3559		.name = "limit_in_bytes",
3560		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3561		.write_string = mem_cgroup_write,
3562		.read_u64 = mem_cgroup_read,
3563	},
3564	{
3565		.name = "soft_limit_in_bytes",
3566		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3567		.write_string = mem_cgroup_write,
3568		.read_u64 = mem_cgroup_read,
3569	},
3570	{
3571		.name = "failcnt",
3572		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3573		.trigger = mem_cgroup_reset,
3574		.read_u64 = mem_cgroup_read,
3575	},
3576	{
3577		.name = "stat",
3578		.read_map = mem_control_stat_show,
3579	},
3580	{
3581		.name = "force_empty",
3582		.trigger = mem_cgroup_force_empty_write,
3583	},
3584	{
3585		.name = "use_hierarchy",
3586		.write_u64 = mem_cgroup_hierarchy_write,
3587		.read_u64 = mem_cgroup_hierarchy_read,
3588	},
3589	{
3590		.name = "swappiness",
3591		.read_u64 = mem_cgroup_swappiness_read,
3592		.write_u64 = mem_cgroup_swappiness_write,
3593	},
3594	{
3595		.name = "move_charge_at_immigrate",
3596		.read_u64 = mem_cgroup_move_charge_read,
3597		.write_u64 = mem_cgroup_move_charge_write,
3598	},
3599};
3600
3601#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3602static struct cftype memsw_cgroup_files[] = {
3603	{
3604		.name = "memsw.usage_in_bytes",
3605		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
3606		.read_u64 = mem_cgroup_read,
3607		.register_event = mem_cgroup_register_event,
3608		.unregister_event = mem_cgroup_unregister_event,
3609	},
3610	{
3611		.name = "memsw.max_usage_in_bytes",
3612		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
3613		.trigger = mem_cgroup_reset,
3614		.read_u64 = mem_cgroup_read,
3615	},
3616	{
3617		.name = "memsw.limit_in_bytes",
3618		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
3619		.write_string = mem_cgroup_write,
3620		.read_u64 = mem_cgroup_read,
3621	},
3622	{
3623		.name = "memsw.failcnt",
3624		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
3625		.trigger = mem_cgroup_reset,
3626		.read_u64 = mem_cgroup_read,
3627	},
3628};
3629
3630static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3631{
3632	if (!do_swap_account)
3633		return 0;
3634	return cgroup_add_files(cont, ss, memsw_cgroup_files,
3635				ARRAY_SIZE(memsw_cgroup_files));
3636};
3637#else
3638static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3639{
3640	return 0;
3641}
3642#endif
3643
3644static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3645{
3646	struct mem_cgroup_per_node *pn;
3647	struct mem_cgroup_per_zone *mz;
3648	enum lru_list l;
3649	int zone, tmp = node;
3650	/*
3651	 * This routine is called against possible nodes.
3652	 * But it's BUG to call kmalloc() against offline node.
3653	 *
3654	 * TODO: this routine can waste much memory for nodes which will
3655	 *       never be onlined. It's better to use memory hotplug callback
3656	 *       function.
3657	 */
3658	if (!node_state(node, N_NORMAL_MEMORY))
3659		tmp = -1;
3660	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
3661	if (!pn)
3662		return 1;
3663
3664	mem->info.nodeinfo[node] = pn;
3665	memset(pn, 0, sizeof(*pn));
3666
3667	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3668		mz = &pn->zoneinfo[zone];
3669		for_each_lru(l)
3670			INIT_LIST_HEAD(&mz->lists[l]);
3671		mz->usage_in_excess = 0;
3672		mz->on_tree = false;
3673		mz->mem = mem;
3674	}
3675	return 0;
3676}
3677
3678static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3679{
3680	kfree(mem->info.nodeinfo[node]);
3681}
3682
3683static struct mem_cgroup *mem_cgroup_alloc(void)
3684{
3685	struct mem_cgroup *mem;
3686	int size = sizeof(struct mem_cgroup);
3687
3688	/* Can be very big if MAX_NUMNODES is very big */
3689	if (size < PAGE_SIZE)
3690		mem = kmalloc(size, GFP_KERNEL);
3691	else
3692		mem = vmalloc(size);
3693
3694	if (mem)
3695		memset(mem, 0, size);
3696	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
3697	if (!mem->stat) {
3698		if (size < PAGE_SIZE)
3699			kfree(mem);
3700		else
3701			vfree(mem);
3702		mem = NULL;
3703	}
3704	return mem;
3705}
3706
3707/*
3708 * At destroying mem_cgroup, references from swap_cgroup can remain.
3709 * (scanning all at force_empty is too costly...)
3710 *
3711 * Instead of clearing all references at force_empty, we remember
3712 * the number of reference from swap_cgroup and free mem_cgroup when
3713 * it goes down to 0.
3714 *
3715 * Removal of cgroup itself succeeds regardless of refs from swap.
3716 */
3717
3718static void __mem_cgroup_free(struct mem_cgroup *mem)
3719{
3720	int node;
3721
3722	mem_cgroup_remove_from_trees(mem);
3723	free_css_id(&mem_cgroup_subsys, &mem->css);
3724
3725	for_each_node_state(node, N_POSSIBLE)
3726		free_mem_cgroup_per_zone_info(mem, node);
3727
3728	free_percpu(mem->stat);
3729	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
3730		kfree(mem);
3731	else
3732		vfree(mem);
3733}
3734
3735static void mem_cgroup_get(struct mem_cgroup *mem)
3736{
3737	atomic_inc(&mem->refcnt);
3738}
3739
3740static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
3741{
3742	if (atomic_sub_and_test(count, &mem->refcnt)) {
3743		struct mem_cgroup *parent = parent_mem_cgroup(mem);
3744		__mem_cgroup_free(mem);
3745		if (parent)
3746			mem_cgroup_put(parent);
3747	}
3748}
3749
3750static void mem_cgroup_put(struct mem_cgroup *mem)
3751{
3752	__mem_cgroup_put(mem, 1);
3753}
3754
3755/*
3756 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
3757 */
3758static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
3759{
3760	if (!mem->res.parent)
3761		return NULL;
3762	return mem_cgroup_from_res_counter(mem->res.parent, res);
3763}
3764
3765#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3766static void __init enable_swap_cgroup(void)
3767{
3768	if (!mem_cgroup_disabled() && really_do_swap_account)
3769		do_swap_account = 1;
3770}
3771#else
3772static void __init enable_swap_cgroup(void)
3773{
3774}
3775#endif
3776
3777static int mem_cgroup_soft_limit_tree_init(void)
3778{
3779	struct mem_cgroup_tree_per_node *rtpn;
3780	struct mem_cgroup_tree_per_zone *rtpz;
3781	int tmp, node, zone;
3782
3783	for_each_node_state(node, N_POSSIBLE) {
3784		tmp = node;
3785		if (!node_state(node, N_NORMAL_MEMORY))
3786			tmp = -1;
3787		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
3788		if (!rtpn)
3789			return 1;
3790
3791		soft_limit_tree.rb_tree_per_node[node] = rtpn;
3792
3793		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3794			rtpz = &rtpn->rb_tree_per_zone[zone];
3795			rtpz->rb_root = RB_ROOT;
3796			spin_lock_init(&rtpz->lock);
3797		}
3798	}
3799	return 0;
3800}
3801
3802static struct cgroup_subsys_state * __ref
3803mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3804{
3805	struct mem_cgroup *mem, *parent;
3806	long error = -ENOMEM;
3807	int node;
3808
3809	mem = mem_cgroup_alloc();
3810	if (!mem)
3811		return ERR_PTR(error);
3812
3813	for_each_node_state(node, N_POSSIBLE)
3814		if (alloc_mem_cgroup_per_zone_info(mem, node))
3815			goto free_out;
3816
3817	/* root ? */
3818	if (cont->parent == NULL) {
3819		int cpu;
3820		enable_swap_cgroup();
3821		parent = NULL;
3822		root_mem_cgroup = mem;
3823		if (mem_cgroup_soft_limit_tree_init())
3824			goto free_out;
3825		for_each_possible_cpu(cpu) {
3826			struct memcg_stock_pcp *stock =
3827						&per_cpu(memcg_stock, cpu);
3828			INIT_WORK(&stock->work, drain_local_stock);
3829		}
3830		hotcpu_notifier(memcg_stock_cpu_callback, 0);
3831	} else {
3832		parent = mem_cgroup_from_cont(cont->parent);
3833		mem->use_hierarchy = parent->use_hierarchy;
3834	}
3835
3836	if (parent && parent->use_hierarchy) {
3837		res_counter_init(&mem->res, &parent->res);
3838		res_counter_init(&mem->memsw, &parent->memsw);
3839		/*
3840		 * We increment refcnt of the parent to ensure that we can
3841		 * safely access it on res_counter_charge/uncharge.
3842		 * This refcnt will be decremented when freeing this
3843		 * mem_cgroup(see mem_cgroup_put).
3844		 */
3845		mem_cgroup_get(parent);
3846	} else {
3847		res_counter_init(&mem->res, NULL);
3848		res_counter_init(&mem->memsw, NULL);
3849	}
3850	mem->last_scanned_child = 0;
3851	spin_lock_init(&mem->reclaim_param_lock);
3852
3853	if (parent)
3854		mem->swappiness = get_swappiness(parent);
3855	atomic_set(&mem->refcnt, 1);
3856	mem->move_charge_at_immigrate = 0;
3857	mutex_init(&mem->thresholds_lock);
3858	return &mem->css;
3859free_out:
3860	__mem_cgroup_free(mem);
3861	root_mem_cgroup = NULL;
3862	return ERR_PTR(error);
3863}
3864
3865static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
3866					struct cgroup *cont)
3867{
3868	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3869
3870	return mem_cgroup_force_empty(mem, false);
3871}
3872
3873static void mem_cgroup_destroy(struct cgroup_subsys *ss,
3874				struct cgroup *cont)
3875{
3876	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3877
3878	mem_cgroup_put(mem);
3879}
3880
3881static int mem_cgroup_populate(struct cgroup_subsys *ss,
3882				struct cgroup *cont)
3883{
3884	int ret;
3885
3886	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
3887				ARRAY_SIZE(mem_cgroup_files));
3888
3889	if (!ret)
3890		ret = register_memsw_files(cont, ss);
3891	return ret;
3892}
3893
3894#ifdef CONFIG_MMU
3895/* Handlers for move charge at task migration. */
3896#define PRECHARGE_COUNT_AT_ONCE	256
3897static int mem_cgroup_do_precharge(unsigned long count)
3898{
3899	int ret = 0;
3900	int batch_count = PRECHARGE_COUNT_AT_ONCE;
3901	struct mem_cgroup *mem = mc.to;
3902
3903	if (mem_cgroup_is_root(mem)) {
3904		mc.precharge += count;
3905		/* we don't need css_get for root */
3906		return ret;
3907	}
3908	/* try to charge at once */
3909	if (count > 1) {
3910		struct res_counter *dummy;
3911		/*
3912		 * "mem" cannot be under rmdir() because we've already checked
3913		 * by cgroup_lock_live_cgroup() that it is not removed and we
3914		 * are still under the same cgroup_mutex. So we can postpone
3915		 * css_get().
3916		 */
3917		if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
3918			goto one_by_one;
3919		if (do_swap_account && res_counter_charge(&mem->memsw,
3920						PAGE_SIZE * count, &dummy)) {
3921			res_counter_uncharge(&mem->res, PAGE_SIZE * count);
3922			goto one_by_one;
3923		}
3924		mc.precharge += count;
3925		VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
3926		WARN_ON_ONCE(count > INT_MAX);
3927		__css_get(&mem->css, (int)count);
3928		return ret;
3929	}
3930one_by_one:
3931	/* fall back to one by one charge */
3932	while (count--) {
3933		if (signal_pending(current)) {
3934			ret = -EINTR;
3935			break;
3936		}
3937		if (!batch_count--) {
3938			batch_count = PRECHARGE_COUNT_AT_ONCE;
3939			cond_resched();
3940		}
3941		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
3942		if (ret || !mem)
3943			/* mem_cgroup_clear_mc() will do uncharge later */
3944			return -ENOMEM;
3945		mc.precharge++;
3946	}
3947	return ret;
3948}
3949
3950/**
3951 * is_target_pte_for_mc - check a pte whether it is valid for move charge
3952 * @vma: the vma the pte to be checked belongs
3953 * @addr: the address corresponding to the pte to be checked
3954 * @ptent: the pte to be checked
3955 * @target: the pointer the target page or swap ent will be stored(can be NULL)
3956 *
3957 * Returns
3958 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
3959 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
3960 *     move charge. if @target is not NULL, the page is stored in target->page
3961 *     with extra refcnt got(Callers should handle it).
3962 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
3963 *     target for charge migration. if @target is not NULL, the entry is stored
3964 *     in target->ent.
3965 *
3966 * Called with pte lock held.
3967 */
3968union mc_target {
3969	struct page	*page;
3970	swp_entry_t	ent;
3971};
3972
3973enum mc_target_type {
3974	MC_TARGET_NONE,	/* not used */
3975	MC_TARGET_PAGE,
3976	MC_TARGET_SWAP,
3977};
3978
3979static int is_target_pte_for_mc(struct vm_area_struct *vma,
3980		unsigned long addr, pte_t ptent, union mc_target *target)
3981{
3982	struct page *page = NULL;
3983	struct page_cgroup *pc;
3984	int ret = 0;
3985	swp_entry_t ent = { .val = 0 };
3986	int usage_count = 0;
3987	bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
3988					&mc.to->move_charge_at_immigrate);
3989
3990	if (!pte_present(ptent)) {
3991		/* TODO: handle swap of shmes/tmpfs */
3992		if (pte_none(ptent) || pte_file(ptent))
3993			return 0;
3994		else if (is_swap_pte(ptent)) {
3995			ent = pte_to_swp_entry(ptent);
3996			if (!move_anon || non_swap_entry(ent))
3997				return 0;
3998			usage_count = mem_cgroup_count_swap_user(ent, &page);
3999		}
4000	} else {
4001		page = vm_normal_page(vma, addr, ptent);
4002		if (!page || !page_mapped(page))
4003			return 0;
4004		/*
4005		 * TODO: We don't move charges of file(including shmem/tmpfs)
4006		 * pages for now.
4007		 */
4008		if (!move_anon || !PageAnon(page))
4009			return 0;
4010		if (!get_page_unless_zero(page))
4011			return 0;
4012		usage_count = page_mapcount(page);
4013	}
4014	if (usage_count > 1) {
4015		/*
4016		 * TODO: We don't move charges of shared(used by multiple
4017		 * processes) pages for now.
4018		 */
4019		if (page)
4020			put_page(page);
4021		return 0;
4022	}
4023	if (page) {
4024		pc = lookup_page_cgroup(page);
4025		/*
4026		 * Do only loose check w/o page_cgroup lock.
4027		 * mem_cgroup_move_account() checks the pc is valid or not under
4028		 * the lock.
4029		 */
4030		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4031			ret = MC_TARGET_PAGE;
4032			if (target)
4033				target->page = page;
4034		}
4035		if (!ret || !target)
4036			put_page(page);
4037	}
4038	/* throught */
4039	if (ent.val && do_swap_account && !ret &&
4040			css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4041		ret = MC_TARGET_SWAP;
4042		if (target)
4043			target->ent = ent;
4044	}
4045	return ret;
4046}
4047
4048static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4049					unsigned long addr, unsigned long end,
4050					struct mm_walk *walk)
4051{
4052	struct vm_area_struct *vma = walk->private;
4053	pte_t *pte;
4054	spinlock_t *ptl;
4055
4056	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4057	for (; addr != end; pte++, addr += PAGE_SIZE)
4058		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4059			mc.precharge++;	/* increment precharge temporarily */
4060	pte_unmap_unlock(pte - 1, ptl);
4061	cond_resched();
4062
4063	return 0;
4064}
4065
4066static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4067{
4068	unsigned long precharge;
4069	struct vm_area_struct *vma;
4070
4071	down_read(&mm->mmap_sem);
4072	for (vma = mm->mmap; vma; vma = vma->vm_next) {
4073		struct mm_walk mem_cgroup_count_precharge_walk = {
4074			.pmd_entry = mem_cgroup_count_precharge_pte_range,
4075			.mm = mm,
4076			.private = vma,
4077		};
4078		if (is_vm_hugetlb_page(vma))
4079			continue;
4080		/* TODO: We don't move charges of shmem/tmpfs pages for now. */
4081		if (vma->vm_flags & VM_SHARED)
4082			continue;
4083		walk_page_range(vma->vm_start, vma->vm_end,
4084					&mem_cgroup_count_precharge_walk);
4085	}
4086	up_read(&mm->mmap_sem);
4087
4088	precharge = mc.precharge;
4089	mc.precharge = 0;
4090
4091	return precharge;
4092}
4093
4094static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4095{
4096	return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
4097}
4098
4099static void mem_cgroup_clear_mc(void)
4100{
4101	/* we must uncharge all the leftover precharges from mc.to */
4102	if (mc.precharge) {
4103		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
4104		mc.precharge = 0;
4105	}
4106	/*
4107	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4108	 * we must uncharge here.
4109	 */
4110	if (mc.moved_charge) {
4111		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4112		mc.moved_charge = 0;
4113	}
4114	/* we must fixup refcnts and charges */
4115	if (mc.moved_swap) {
4116		WARN_ON_ONCE(mc.moved_swap > INT_MAX);
4117		/* uncharge swap account from the old cgroup */
4118		if (!mem_cgroup_is_root(mc.from))
4119			res_counter_uncharge(&mc.from->memsw,
4120						PAGE_SIZE * mc.moved_swap);
4121		__mem_cgroup_put(mc.from, mc.moved_swap);
4122
4123		if (!mem_cgroup_is_root(mc.to)) {
4124			/*
4125			 * we charged both to->res and to->memsw, so we should
4126			 * uncharge to->res.
4127			 */
4128			res_counter_uncharge(&mc.to->res,
4129						PAGE_SIZE * mc.moved_swap);
4130			VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
4131			__css_put(&mc.to->css, mc.moved_swap);
4132		}
4133		/* we've already done mem_cgroup_get(mc.to) */
4134
4135		mc.moved_swap = 0;
4136	}
4137	mc.from = NULL;
4138	mc.to = NULL;
4139	mc.moving_task = NULL;
4140	wake_up_all(&mc.waitq);
4141}
4142
4143static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4144				struct cgroup *cgroup,
4145				struct task_struct *p,
4146				bool threadgroup)
4147{
4148	int ret = 0;
4149	struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4150
4151	if (mem->move_charge_at_immigrate) {
4152		struct mm_struct *mm;
4153		struct mem_cgroup *from = mem_cgroup_from_task(p);
4154
4155		VM_BUG_ON(from == mem);
4156
4157		mm = get_task_mm(p);
4158		if (!mm)
4159			return 0;
4160		/* We move charges only when we move a owner of the mm */
4161		if (mm->owner == p) {
4162			VM_BUG_ON(mc.from);
4163			VM_BUG_ON(mc.to);
4164			VM_BUG_ON(mc.precharge);
4165			VM_BUG_ON(mc.moved_charge);
4166			VM_BUG_ON(mc.moved_swap);
4167			VM_BUG_ON(mc.moving_task);
4168			mc.from = from;
4169			mc.to = mem;
4170			mc.precharge = 0;
4171			mc.moved_charge = 0;
4172			mc.moved_swap = 0;
4173			mc.moving_task = current;
4174
4175			ret = mem_cgroup_precharge_mc(mm);
4176			if (ret)
4177				mem_cgroup_clear_mc();
4178		}
4179		mmput(mm);
4180	}
4181	return ret;
4182}
4183
4184static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4185				struct cgroup *cgroup,
4186				struct task_struct *p,
4187				bool threadgroup)
4188{
4189	mem_cgroup_clear_mc();
4190}
4191
4192static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4193				unsigned long addr, unsigned long end,
4194				struct mm_walk *walk)
4195{
4196	int ret = 0;
4197	struct vm_area_struct *vma = walk->private;
4198	pte_t *pte;
4199	spinlock_t *ptl;
4200
4201retry:
4202	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4203	for (; addr != end; addr += PAGE_SIZE) {
4204		pte_t ptent = *(pte++);
4205		union mc_target target;
4206		int type;
4207		struct page *page;
4208		struct page_cgroup *pc;
4209		swp_entry_t ent;
4210
4211		if (!mc.precharge)
4212			break;
4213
4214		type = is_target_pte_for_mc(vma, addr, ptent, &target);
4215		switch (type) {
4216		case MC_TARGET_PAGE:
4217			page = target.page;
4218			if (isolate_lru_page(page))
4219				goto put;
4220			pc = lookup_page_cgroup(page);
4221			if (!mem_cgroup_move_account(pc,
4222						mc.from, mc.to, false)) {
4223				mc.precharge--;
4224				/* we uncharge from mc.from later. */
4225				mc.moved_charge++;
4226			}
4227			putback_lru_page(page);
4228put:			/* is_target_pte_for_mc() gets the page */
4229			put_page(page);
4230			break;
4231		case MC_TARGET_SWAP:
4232			ent = target.ent;
4233			if (!mem_cgroup_move_swap_account(ent,
4234						mc.from, mc.to, false)) {
4235				mc.precharge--;
4236				/* we fixup refcnts and charges later. */
4237				mc.moved_swap++;
4238			}
4239			break;
4240		default:
4241			break;
4242		}
4243	}
4244	pte_unmap_unlock(pte - 1, ptl);
4245	cond_resched();
4246
4247	if (addr != end) {
4248		/*
4249		 * We have consumed all precharges we got in can_attach().
4250		 * We try charge one by one, but don't do any additional
4251		 * charges to mc.to if we have failed in charge once in attach()
4252		 * phase.
4253		 */
4254		ret = mem_cgroup_do_precharge(1);
4255		if (!ret)
4256			goto retry;
4257	}
4258
4259	return ret;
4260}
4261
4262static void mem_cgroup_move_charge(struct mm_struct *mm)
4263{
4264	struct vm_area_struct *vma;
4265
4266	lru_add_drain_all();
4267	down_read(&mm->mmap_sem);
4268	for (vma = mm->mmap; vma; vma = vma->vm_next) {
4269		int ret;
4270		struct mm_walk mem_cgroup_move_charge_walk = {
4271			.pmd_entry = mem_cgroup_move_charge_pte_range,
4272			.mm = mm,
4273			.private = vma,
4274		};
4275		if (is_vm_hugetlb_page(vma))
4276			continue;
4277		/* TODO: We don't move charges of shmem/tmpfs pages for now. */
4278		if (vma->vm_flags & VM_SHARED)
4279			continue;
4280		ret = walk_page_range(vma->vm_start, vma->vm_end,
4281						&mem_cgroup_move_charge_walk);
4282		if (ret)
4283			/*
4284			 * means we have consumed all precharges and failed in
4285			 * doing additional charge. Just abandon here.
4286			 */
4287			break;
4288	}
4289	up_read(&mm->mmap_sem);
4290}
4291
4292static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4293				struct cgroup *cont,
4294				struct cgroup *old_cont,
4295				struct task_struct *p,
4296				bool threadgroup)
4297{
4298	struct mm_struct *mm;
4299
4300	if (!mc.to)
4301		/* no need to move charge */
4302		return;
4303
4304	mm = get_task_mm(p);
4305	if (mm) {
4306		mem_cgroup_move_charge(mm);
4307		mmput(mm);
4308	}
4309	mem_cgroup_clear_mc();
4310}
4311#else	/* !CONFIG_MMU */
4312static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4313				struct cgroup *cgroup,
4314				struct task_struct *p,
4315				bool threadgroup)
4316{
4317	return 0;
4318}
4319static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4320				struct cgroup *cgroup,
4321				struct task_struct *p,
4322				bool threadgroup)
4323{
4324}
4325static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4326				struct cgroup *cont,
4327				struct cgroup *old_cont,
4328				struct task_struct *p,
4329				bool threadgroup)
4330{
4331}
4332#endif
4333
4334struct cgroup_subsys mem_cgroup_subsys = {
4335	.name = "memory",
4336	.subsys_id = mem_cgroup_subsys_id,
4337	.create = mem_cgroup_create,
4338	.pre_destroy = mem_cgroup_pre_destroy,
4339	.destroy = mem_cgroup_destroy,
4340	.populate = mem_cgroup_populate,
4341	.can_attach = mem_cgroup_can_attach,
4342	.cancel_attach = mem_cgroup_cancel_attach,
4343	.attach = mem_cgroup_move_task,
4344	.early_init = 0,
4345	.use_id = 1,
4346};
4347
4348#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4349
4350static int __init disable_swap_account(char *s)
4351{
4352	really_do_swap_account = 0;
4353	return 1;
4354}
4355__setup("noswapaccount", disable_swap_account);
4356#endif
4357