memcontrol.c revision c137b5ece4b111e46981aae7da77315b9909809f
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/res_counter.h>
21#include <linux/memcontrol.h>
22#include <linux/cgroup.h>
23#include <linux/mm.h>
24#include <linux/pagemap.h>
25#include <linux/smp.h>
26#include <linux/page-flags.h>
27#include <linux/backing-dev.h>
28#include <linux/bit_spinlock.h>
29#include <linux/rcupdate.h>
30#include <linux/limits.h>
31#include <linux/mutex.h>
32#include <linux/slab.h>
33#include <linux/swap.h>
34#include <linux/spinlock.h>
35#include <linux/fs.h>
36#include <linux/seq_file.h>
37#include <linux/vmalloc.h>
38#include <linux/mm_inline.h>
39#include <linux/page_cgroup.h>
40#include "internal.h"
41
42#include <asm/uaccess.h>
43
44struct cgroup_subsys mem_cgroup_subsys __read_mostly;
45#define MEM_CGROUP_RECLAIM_RETRIES	5
46
47#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
48/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
49int do_swap_account __read_mostly;
50static int really_do_swap_account __initdata = 1; /* for remember boot option*/
51#else
52#define do_swap_account		(0)
53#endif
54
55static DEFINE_MUTEX(memcg_tasklist);	/* can be hold under cgroup_mutex */
56
57/*
58 * Statistics for memory cgroup.
59 */
60enum mem_cgroup_stat_index {
61	/*
62	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
63	 */
64	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
65	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
66	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
67	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
68
69	MEM_CGROUP_STAT_NSTATS,
70};
71
72struct mem_cgroup_stat_cpu {
73	s64 count[MEM_CGROUP_STAT_NSTATS];
74} ____cacheline_aligned_in_smp;
75
76struct mem_cgroup_stat {
77	struct mem_cgroup_stat_cpu cpustat[0];
78};
79
80/*
81 * For accounting under irq disable, no need for increment preempt count.
82 */
83static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
84		enum mem_cgroup_stat_index idx, int val)
85{
86	stat->count[idx] += val;
87}
88
89static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
90		enum mem_cgroup_stat_index idx)
91{
92	int cpu;
93	s64 ret = 0;
94	for_each_possible_cpu(cpu)
95		ret += stat->cpustat[cpu].count[idx];
96	return ret;
97}
98
99static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
100{
101	s64 ret;
102
103	ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
104	ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
105	return ret;
106}
107
108/*
109 * per-zone information in memory controller.
110 */
111struct mem_cgroup_per_zone {
112	/*
113	 * spin_lock to protect the per cgroup LRU
114	 */
115	struct list_head	lists[NR_LRU_LISTS];
116	unsigned long		count[NR_LRU_LISTS];
117
118	struct zone_reclaim_stat reclaim_stat;
119};
120/* Macro for accessing counter */
121#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
122
123struct mem_cgroup_per_node {
124	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
125};
126
127struct mem_cgroup_lru_info {
128	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
129};
130
131/*
132 * The memory controller data structure. The memory controller controls both
133 * page cache and RSS per cgroup. We would eventually like to provide
134 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
135 * to help the administrator determine what knobs to tune.
136 *
137 * TODO: Add a water mark for the memory controller. Reclaim will begin when
138 * we hit the water mark. May be even add a low water mark, such that
139 * no reclaim occurs from a cgroup at it's low water mark, this is
140 * a feature that will be implemented much later in the future.
141 */
142struct mem_cgroup {
143	struct cgroup_subsys_state css;
144	/*
145	 * the counter to account for memory usage
146	 */
147	struct res_counter res;
148	/*
149	 * the counter to account for mem+swap usage.
150	 */
151	struct res_counter memsw;
152	/*
153	 * Per cgroup active and inactive list, similar to the
154	 * per zone LRU lists.
155	 */
156	struct mem_cgroup_lru_info info;
157
158	/*
159	  protect against reclaim related member.
160	*/
161	spinlock_t reclaim_param_lock;
162
163	int	prev_priority;	/* for recording reclaim priority */
164
165	/*
166	 * While reclaiming in a hiearchy, we cache the last child we
167	 * reclaimed from.
168	 */
169	int last_scanned_child;
170	/*
171	 * Should the accounting and control be hierarchical, per subtree?
172	 */
173	bool use_hierarchy;
174	unsigned long	last_oom_jiffies;
175	atomic_t	refcnt;
176
177	unsigned int	swappiness;
178
179	/*
180	 * statistics. This must be placed at the end of memcg.
181	 */
182	struct mem_cgroup_stat stat;
183};
184
185enum charge_type {
186	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
187	MEM_CGROUP_CHARGE_TYPE_MAPPED,
188	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
189	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
190	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
191	NR_CHARGE_TYPE,
192};
193
194/* only for here (for easy reading.) */
195#define PCGF_CACHE	(1UL << PCG_CACHE)
196#define PCGF_USED	(1UL << PCG_USED)
197#define PCGF_LOCK	(1UL << PCG_LOCK)
198static const unsigned long
199pcg_default_flags[NR_CHARGE_TYPE] = {
200	PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
201	PCGF_USED | PCGF_LOCK, /* Anon */
202	PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
203	0, /* FORCE */
204};
205
206/* for encoding cft->private value on file */
207#define _MEM			(0)
208#define _MEMSWAP		(1)
209#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
210#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
211#define MEMFILE_ATTR(val)	((val) & 0xffff)
212
213static void mem_cgroup_get(struct mem_cgroup *mem);
214static void mem_cgroup_put(struct mem_cgroup *mem);
215static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
216
217static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
218					 struct page_cgroup *pc,
219					 bool charge)
220{
221	int val = (charge)? 1 : -1;
222	struct mem_cgroup_stat *stat = &mem->stat;
223	struct mem_cgroup_stat_cpu *cpustat;
224	int cpu = get_cpu();
225
226	cpustat = &stat->cpustat[cpu];
227	if (PageCgroupCache(pc))
228		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
229	else
230		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
231
232	if (charge)
233		__mem_cgroup_stat_add_safe(cpustat,
234				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
235	else
236		__mem_cgroup_stat_add_safe(cpustat,
237				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
238	put_cpu();
239}
240
241static struct mem_cgroup_per_zone *
242mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
243{
244	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
245}
246
247static struct mem_cgroup_per_zone *
248page_cgroup_zoneinfo(struct page_cgroup *pc)
249{
250	struct mem_cgroup *mem = pc->mem_cgroup;
251	int nid = page_cgroup_nid(pc);
252	int zid = page_cgroup_zid(pc);
253
254	if (!mem)
255		return NULL;
256
257	return mem_cgroup_zoneinfo(mem, nid, zid);
258}
259
260static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
261					enum lru_list idx)
262{
263	int nid, zid;
264	struct mem_cgroup_per_zone *mz;
265	u64 total = 0;
266
267	for_each_online_node(nid)
268		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
269			mz = mem_cgroup_zoneinfo(mem, nid, zid);
270			total += MEM_CGROUP_ZSTAT(mz, idx);
271		}
272	return total;
273}
274
275static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
276{
277	return container_of(cgroup_subsys_state(cont,
278				mem_cgroup_subsys_id), struct mem_cgroup,
279				css);
280}
281
282struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
283{
284	/*
285	 * mm_update_next_owner() may clear mm->owner to NULL
286	 * if it races with swapoff, page migration, etc.
287	 * So this can be called with p == NULL.
288	 */
289	if (unlikely(!p))
290		return NULL;
291
292	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
293				struct mem_cgroup, css);
294}
295
296static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
297{
298	struct mem_cgroup *mem = NULL;
299
300	if (!mm)
301		return NULL;
302	/*
303	 * Because we have no locks, mm->owner's may be being moved to other
304	 * cgroup. We use css_tryget() here even if this looks
305	 * pessimistic (rather than adding locks here).
306	 */
307	rcu_read_lock();
308	do {
309		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
310		if (unlikely(!mem))
311			break;
312	} while (!css_tryget(&mem->css));
313	rcu_read_unlock();
314	return mem;
315}
316
317static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
318{
319	if (!mem)
320		return true;
321	return css_is_removed(&mem->css);
322}
323
324
325/*
326 * Call callback function against all cgroup under hierarchy tree.
327 */
328static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
329			  int (*func)(struct mem_cgroup *, void *))
330{
331	int found, ret, nextid;
332	struct cgroup_subsys_state *css;
333	struct mem_cgroup *mem;
334
335	if (!root->use_hierarchy)
336		return (*func)(root, data);
337
338	nextid = 1;
339	do {
340		ret = 0;
341		mem = NULL;
342
343		rcu_read_lock();
344		css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
345				   &found);
346		if (css && css_tryget(css))
347			mem = container_of(css, struct mem_cgroup, css);
348		rcu_read_unlock();
349
350		if (mem) {
351			ret = (*func)(mem, data);
352			css_put(&mem->css);
353		}
354		nextid = found + 1;
355	} while (!ret && css);
356
357	return ret;
358}
359
360/*
361 * Following LRU functions are allowed to be used without PCG_LOCK.
362 * Operations are called by routine of global LRU independently from memcg.
363 * What we have to take care of here is validness of pc->mem_cgroup.
364 *
365 * Changes to pc->mem_cgroup happens when
366 * 1. charge
367 * 2. moving account
368 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
369 * It is added to LRU before charge.
370 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
371 * When moving account, the page is not on LRU. It's isolated.
372 */
373
374void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
375{
376	struct page_cgroup *pc;
377	struct mem_cgroup *mem;
378	struct mem_cgroup_per_zone *mz;
379
380	if (mem_cgroup_disabled())
381		return;
382	pc = lookup_page_cgroup(page);
383	/* can happen while we handle swapcache. */
384	if (list_empty(&pc->lru) || !pc->mem_cgroup)
385		return;
386	/*
387	 * We don't check PCG_USED bit. It's cleared when the "page" is finally
388	 * removed from global LRU.
389	 */
390	mz = page_cgroup_zoneinfo(pc);
391	mem = pc->mem_cgroup;
392	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
393	list_del_init(&pc->lru);
394	return;
395}
396
397void mem_cgroup_del_lru(struct page *page)
398{
399	mem_cgroup_del_lru_list(page, page_lru(page));
400}
401
402void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
403{
404	struct mem_cgroup_per_zone *mz;
405	struct page_cgroup *pc;
406
407	if (mem_cgroup_disabled())
408		return;
409
410	pc = lookup_page_cgroup(page);
411	/*
412	 * Used bit is set without atomic ops but after smp_wmb().
413	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
414	 */
415	smp_rmb();
416	/* unused page is not rotated. */
417	if (!PageCgroupUsed(pc))
418		return;
419	mz = page_cgroup_zoneinfo(pc);
420	list_move(&pc->lru, &mz->lists[lru]);
421}
422
423void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
424{
425	struct page_cgroup *pc;
426	struct mem_cgroup_per_zone *mz;
427
428	if (mem_cgroup_disabled())
429		return;
430	pc = lookup_page_cgroup(page);
431	/*
432	 * Used bit is set without atomic ops but after smp_wmb().
433	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
434	 */
435	smp_rmb();
436	if (!PageCgroupUsed(pc))
437		return;
438
439	mz = page_cgroup_zoneinfo(pc);
440	MEM_CGROUP_ZSTAT(mz, lru) += 1;
441	list_add(&pc->lru, &mz->lists[lru]);
442}
443
444/*
445 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
446 * lru because the page may.be reused after it's fully uncharged (because of
447 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
448 * it again. This function is only used to charge SwapCache. It's done under
449 * lock_page and expected that zone->lru_lock is never held.
450 */
451static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
452{
453	unsigned long flags;
454	struct zone *zone = page_zone(page);
455	struct page_cgroup *pc = lookup_page_cgroup(page);
456
457	spin_lock_irqsave(&zone->lru_lock, flags);
458	/*
459	 * Forget old LRU when this page_cgroup is *not* used. This Used bit
460	 * is guarded by lock_page() because the page is SwapCache.
461	 */
462	if (!PageCgroupUsed(pc))
463		mem_cgroup_del_lru_list(page, page_lru(page));
464	spin_unlock_irqrestore(&zone->lru_lock, flags);
465}
466
467static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
468{
469	unsigned long flags;
470	struct zone *zone = page_zone(page);
471	struct page_cgroup *pc = lookup_page_cgroup(page);
472
473	spin_lock_irqsave(&zone->lru_lock, flags);
474	/* link when the page is linked to LRU but page_cgroup isn't */
475	if (PageLRU(page) && list_empty(&pc->lru))
476		mem_cgroup_add_lru_list(page, page_lru(page));
477	spin_unlock_irqrestore(&zone->lru_lock, flags);
478}
479
480
481void mem_cgroup_move_lists(struct page *page,
482			   enum lru_list from, enum lru_list to)
483{
484	if (mem_cgroup_disabled())
485		return;
486	mem_cgroup_del_lru_list(page, from);
487	mem_cgroup_add_lru_list(page, to);
488}
489
490int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
491{
492	int ret;
493	struct mem_cgroup *curr = NULL;
494
495	task_lock(task);
496	rcu_read_lock();
497	curr = try_get_mem_cgroup_from_mm(task->mm);
498	rcu_read_unlock();
499	task_unlock(task);
500	if (!curr)
501		return 0;
502	if (curr->use_hierarchy)
503		ret = css_is_ancestor(&curr->css, &mem->css);
504	else
505		ret = (curr == mem);
506	css_put(&curr->css);
507	return ret;
508}
509
510/*
511 * prev_priority control...this will be used in memory reclaim path.
512 */
513int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
514{
515	int prev_priority;
516
517	spin_lock(&mem->reclaim_param_lock);
518	prev_priority = mem->prev_priority;
519	spin_unlock(&mem->reclaim_param_lock);
520
521	return prev_priority;
522}
523
524void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
525{
526	spin_lock(&mem->reclaim_param_lock);
527	if (priority < mem->prev_priority)
528		mem->prev_priority = priority;
529	spin_unlock(&mem->reclaim_param_lock);
530}
531
532void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
533{
534	spin_lock(&mem->reclaim_param_lock);
535	mem->prev_priority = priority;
536	spin_unlock(&mem->reclaim_param_lock);
537}
538
539static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
540{
541	unsigned long active;
542	unsigned long inactive;
543	unsigned long gb;
544	unsigned long inactive_ratio;
545
546	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
547	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
548
549	gb = (inactive + active) >> (30 - PAGE_SHIFT);
550	if (gb)
551		inactive_ratio = int_sqrt(10 * gb);
552	else
553		inactive_ratio = 1;
554
555	if (present_pages) {
556		present_pages[0] = inactive;
557		present_pages[1] = active;
558	}
559
560	return inactive_ratio;
561}
562
563int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
564{
565	unsigned long active;
566	unsigned long inactive;
567	unsigned long present_pages[2];
568	unsigned long inactive_ratio;
569
570	inactive_ratio = calc_inactive_ratio(memcg, present_pages);
571
572	inactive = present_pages[0];
573	active = present_pages[1];
574
575	if (inactive * inactive_ratio < active)
576		return 1;
577
578	return 0;
579}
580
581unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
582				       struct zone *zone,
583				       enum lru_list lru)
584{
585	int nid = zone->zone_pgdat->node_id;
586	int zid = zone_idx(zone);
587	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
588
589	return MEM_CGROUP_ZSTAT(mz, lru);
590}
591
592struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
593						      struct zone *zone)
594{
595	int nid = zone->zone_pgdat->node_id;
596	int zid = zone_idx(zone);
597	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
598
599	return &mz->reclaim_stat;
600}
601
602struct zone_reclaim_stat *
603mem_cgroup_get_reclaim_stat_from_page(struct page *page)
604{
605	struct page_cgroup *pc;
606	struct mem_cgroup_per_zone *mz;
607
608	if (mem_cgroup_disabled())
609		return NULL;
610
611	pc = lookup_page_cgroup(page);
612	/*
613	 * Used bit is set without atomic ops but after smp_wmb().
614	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
615	 */
616	smp_rmb();
617	if (!PageCgroupUsed(pc))
618		return NULL;
619
620	mz = page_cgroup_zoneinfo(pc);
621	if (!mz)
622		return NULL;
623
624	return &mz->reclaim_stat;
625}
626
627unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
628					struct list_head *dst,
629					unsigned long *scanned, int order,
630					int mode, struct zone *z,
631					struct mem_cgroup *mem_cont,
632					int active, int file)
633{
634	unsigned long nr_taken = 0;
635	struct page *page;
636	unsigned long scan;
637	LIST_HEAD(pc_list);
638	struct list_head *src;
639	struct page_cgroup *pc, *tmp;
640	int nid = z->zone_pgdat->node_id;
641	int zid = zone_idx(z);
642	struct mem_cgroup_per_zone *mz;
643	int lru = LRU_FILE * !!file + !!active;
644
645	BUG_ON(!mem_cont);
646	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
647	src = &mz->lists[lru];
648
649	scan = 0;
650	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
651		if (scan >= nr_to_scan)
652			break;
653
654		page = pc->page;
655		if (unlikely(!PageCgroupUsed(pc)))
656			continue;
657		if (unlikely(!PageLRU(page)))
658			continue;
659
660		scan++;
661		if (__isolate_lru_page(page, mode, file) == 0) {
662			list_move(&page->lru, dst);
663			nr_taken++;
664		}
665	}
666
667	*scanned = scan;
668	return nr_taken;
669}
670
671#define mem_cgroup_from_res_counter(counter, member)	\
672	container_of(counter, struct mem_cgroup, member)
673
674static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
675{
676	if (do_swap_account) {
677		if (res_counter_check_under_limit(&mem->res) &&
678			res_counter_check_under_limit(&mem->memsw))
679			return true;
680	} else
681		if (res_counter_check_under_limit(&mem->res))
682			return true;
683	return false;
684}
685
686static unsigned int get_swappiness(struct mem_cgroup *memcg)
687{
688	struct cgroup *cgrp = memcg->css.cgroup;
689	unsigned int swappiness;
690
691	/* root ? */
692	if (cgrp->parent == NULL)
693		return vm_swappiness;
694
695	spin_lock(&memcg->reclaim_param_lock);
696	swappiness = memcg->swappiness;
697	spin_unlock(&memcg->reclaim_param_lock);
698
699	return swappiness;
700}
701
702static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
703{
704	int *val = data;
705	(*val)++;
706	return 0;
707}
708
709/**
710 * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
711 * @memcg: The memory cgroup that went over limit
712 * @p: Task that is going to be killed
713 *
714 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
715 * enabled
716 */
717void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
718{
719	struct cgroup *task_cgrp;
720	struct cgroup *mem_cgrp;
721	/*
722	 * Need a buffer in BSS, can't rely on allocations. The code relies
723	 * on the assumption that OOM is serialized for memory controller.
724	 * If this assumption is broken, revisit this code.
725	 */
726	static char memcg_name[PATH_MAX];
727	int ret;
728
729	if (!memcg)
730		return;
731
732
733	rcu_read_lock();
734
735	mem_cgrp = memcg->css.cgroup;
736	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
737
738	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
739	if (ret < 0) {
740		/*
741		 * Unfortunately, we are unable to convert to a useful name
742		 * But we'll still print out the usage information
743		 */
744		rcu_read_unlock();
745		goto done;
746	}
747	rcu_read_unlock();
748
749	printk(KERN_INFO "Task in %s killed", memcg_name);
750
751	rcu_read_lock();
752	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
753	if (ret < 0) {
754		rcu_read_unlock();
755		goto done;
756	}
757	rcu_read_unlock();
758
759	/*
760	 * Continues from above, so we don't need an KERN_ level
761	 */
762	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
763done:
764
765	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
766		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
767		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
768		res_counter_read_u64(&memcg->res, RES_FAILCNT));
769	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
770		"failcnt %llu\n",
771		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
772		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
773		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
774}
775
776/*
777 * This function returns the number of memcg under hierarchy tree. Returns
778 * 1(self count) if no children.
779 */
780static int mem_cgroup_count_children(struct mem_cgroup *mem)
781{
782	int num = 0;
783 	mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
784	return num;
785}
786
787/*
788 * Visit the first child (need not be the first child as per the ordering
789 * of the cgroup list, since we track last_scanned_child) of @mem and use
790 * that to reclaim free pages from.
791 */
792static struct mem_cgroup *
793mem_cgroup_select_victim(struct mem_cgroup *root_mem)
794{
795	struct mem_cgroup *ret = NULL;
796	struct cgroup_subsys_state *css;
797	int nextid, found;
798
799	if (!root_mem->use_hierarchy) {
800		css_get(&root_mem->css);
801		ret = root_mem;
802	}
803
804	while (!ret) {
805		rcu_read_lock();
806		nextid = root_mem->last_scanned_child + 1;
807		css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
808				   &found);
809		if (css && css_tryget(css))
810			ret = container_of(css, struct mem_cgroup, css);
811
812		rcu_read_unlock();
813		/* Updates scanning parameter */
814		spin_lock(&root_mem->reclaim_param_lock);
815		if (!css) {
816			/* this means start scan from ID:1 */
817			root_mem->last_scanned_child = 0;
818		} else
819			root_mem->last_scanned_child = found;
820		spin_unlock(&root_mem->reclaim_param_lock);
821	}
822
823	return ret;
824}
825
826/*
827 * Scan the hierarchy if needed to reclaim memory. We remember the last child
828 * we reclaimed from, so that we don't end up penalizing one child extensively
829 * based on its position in the children list.
830 *
831 * root_mem is the original ancestor that we've been reclaim from.
832 *
833 * We give up and return to the caller when we visit root_mem twice.
834 * (other groups can be removed while we're walking....)
835 *
836 * If shrink==true, for avoiding to free too much, this returns immedieately.
837 */
838static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
839				   gfp_t gfp_mask, bool noswap, bool shrink)
840{
841	struct mem_cgroup *victim;
842	int ret, total = 0;
843	int loop = 0;
844
845	while (loop < 2) {
846		victim = mem_cgroup_select_victim(root_mem);
847		if (victim == root_mem)
848			loop++;
849		if (!mem_cgroup_local_usage(&victim->stat)) {
850			/* this cgroup's local usage == 0 */
851			css_put(&victim->css);
852			continue;
853		}
854		/* we use swappiness of local cgroup */
855		ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
856						   get_swappiness(victim));
857		css_put(&victim->css);
858		/*
859		 * At shrinking usage, we can't check we should stop here or
860		 * reclaim more. It's depends on callers. last_scanned_child
861		 * will work enough for keeping fairness under tree.
862		 */
863		if (shrink)
864			return ret;
865		total += ret;
866		if (mem_cgroup_check_under_limit(root_mem))
867			return 1 + total;
868	}
869	return total;
870}
871
872bool mem_cgroup_oom_called(struct task_struct *task)
873{
874	bool ret = false;
875	struct mem_cgroup *mem;
876	struct mm_struct *mm;
877
878	rcu_read_lock();
879	mm = task->mm;
880	if (!mm)
881		mm = &init_mm;
882	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
883	if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
884		ret = true;
885	rcu_read_unlock();
886	return ret;
887}
888
889static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
890{
891	mem->last_oom_jiffies = jiffies;
892	return 0;
893}
894
895static void record_last_oom(struct mem_cgroup *mem)
896{
897	mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
898}
899
900
901/*
902 * Unlike exported interface, "oom" parameter is added. if oom==true,
903 * oom-killer can be invoked.
904 */
905static int __mem_cgroup_try_charge(struct mm_struct *mm,
906			gfp_t gfp_mask, struct mem_cgroup **memcg,
907			bool oom)
908{
909	struct mem_cgroup *mem, *mem_over_limit;
910	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
911	struct res_counter *fail_res;
912
913	if (unlikely(test_thread_flag(TIF_MEMDIE))) {
914		/* Don't account this! */
915		*memcg = NULL;
916		return 0;
917	}
918
919	/*
920	 * We always charge the cgroup the mm_struct belongs to.
921	 * The mm_struct's mem_cgroup changes on task migration if the
922	 * thread group leader migrates. It's possible that mm is not
923	 * set, if so charge the init_mm (happens for pagecache usage).
924	 */
925	mem = *memcg;
926	if (likely(!mem)) {
927		mem = try_get_mem_cgroup_from_mm(mm);
928		*memcg = mem;
929	} else {
930		css_get(&mem->css);
931	}
932	if (unlikely(!mem))
933		return 0;
934
935	VM_BUG_ON(mem_cgroup_is_obsolete(mem));
936
937	while (1) {
938		int ret;
939		bool noswap = false;
940
941		ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
942		if (likely(!ret)) {
943			if (!do_swap_account)
944				break;
945			ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
946							&fail_res);
947			if (likely(!ret))
948				break;
949			/* mem+swap counter fails */
950			res_counter_uncharge(&mem->res, PAGE_SIZE);
951			noswap = true;
952			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
953									memsw);
954		} else
955			/* mem counter fails */
956			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
957									res);
958
959		if (!(gfp_mask & __GFP_WAIT))
960			goto nomem;
961
962		ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
963							noswap, false);
964		if (ret)
965			continue;
966
967		/*
968		 * try_to_free_mem_cgroup_pages() might not give us a full
969		 * picture of reclaim. Some pages are reclaimed and might be
970		 * moved to swap cache or just unmapped from the cgroup.
971		 * Check the limit again to see if the reclaim reduced the
972		 * current usage of the cgroup before giving up
973		 *
974		 */
975		if (mem_cgroup_check_under_limit(mem_over_limit))
976			continue;
977
978		if (!nr_retries--) {
979			if (oom) {
980				mutex_lock(&memcg_tasklist);
981				mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
982				mutex_unlock(&memcg_tasklist);
983				record_last_oom(mem_over_limit);
984			}
985			goto nomem;
986		}
987	}
988	return 0;
989nomem:
990	css_put(&mem->css);
991	return -ENOMEM;
992}
993
994static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
995{
996	struct mem_cgroup *mem;
997	swp_entry_t ent;
998
999	if (!PageSwapCache(page))
1000		return NULL;
1001
1002	ent.val = page_private(page);
1003	mem = lookup_swap_cgroup(ent);
1004	if (!mem)
1005		return NULL;
1006	if (!css_tryget(&mem->css))
1007		return NULL;
1008	return mem;
1009}
1010
1011/*
1012 * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1013 * USED state. If already USED, uncharge and return.
1014 */
1015
1016static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1017				     struct page_cgroup *pc,
1018				     enum charge_type ctype)
1019{
1020	/* try_charge() can return NULL to *memcg, taking care of it. */
1021	if (!mem)
1022		return;
1023
1024	lock_page_cgroup(pc);
1025	if (unlikely(PageCgroupUsed(pc))) {
1026		unlock_page_cgroup(pc);
1027		res_counter_uncharge(&mem->res, PAGE_SIZE);
1028		if (do_swap_account)
1029			res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1030		css_put(&mem->css);
1031		return;
1032	}
1033	pc->mem_cgroup = mem;
1034	smp_wmb();
1035	pc->flags = pcg_default_flags[ctype];
1036
1037	mem_cgroup_charge_statistics(mem, pc, true);
1038
1039	unlock_page_cgroup(pc);
1040}
1041
1042/**
1043 * mem_cgroup_move_account - move account of the page
1044 * @pc:	page_cgroup of the page.
1045 * @from: mem_cgroup which the page is moved from.
1046 * @to:	mem_cgroup which the page is moved to. @from != @to.
1047 *
1048 * The caller must confirm following.
1049 * - page is not on LRU (isolate_page() is useful.)
1050 *
1051 * returns 0 at success,
1052 * returns -EBUSY when lock is busy or "pc" is unstable.
1053 *
1054 * This function does "uncharge" from old cgroup but doesn't do "charge" to
1055 * new cgroup. It should be done by a caller.
1056 */
1057
1058static int mem_cgroup_move_account(struct page_cgroup *pc,
1059	struct mem_cgroup *from, struct mem_cgroup *to)
1060{
1061	struct mem_cgroup_per_zone *from_mz, *to_mz;
1062	int nid, zid;
1063	int ret = -EBUSY;
1064
1065	VM_BUG_ON(from == to);
1066	VM_BUG_ON(PageLRU(pc->page));
1067
1068	nid = page_cgroup_nid(pc);
1069	zid = page_cgroup_zid(pc);
1070	from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
1071	to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
1072
1073	if (!trylock_page_cgroup(pc))
1074		return ret;
1075
1076	if (!PageCgroupUsed(pc))
1077		goto out;
1078
1079	if (pc->mem_cgroup != from)
1080		goto out;
1081
1082	res_counter_uncharge(&from->res, PAGE_SIZE);
1083	mem_cgroup_charge_statistics(from, pc, false);
1084	if (do_swap_account)
1085		res_counter_uncharge(&from->memsw, PAGE_SIZE);
1086	css_put(&from->css);
1087
1088	css_get(&to->css);
1089	pc->mem_cgroup = to;
1090	mem_cgroup_charge_statistics(to, pc, true);
1091	ret = 0;
1092out:
1093	unlock_page_cgroup(pc);
1094	return ret;
1095}
1096
1097/*
1098 * move charges to its parent.
1099 */
1100
1101static int mem_cgroup_move_parent(struct page_cgroup *pc,
1102				  struct mem_cgroup *child,
1103				  gfp_t gfp_mask)
1104{
1105	struct page *page = pc->page;
1106	struct cgroup *cg = child->css.cgroup;
1107	struct cgroup *pcg = cg->parent;
1108	struct mem_cgroup *parent;
1109	int ret;
1110
1111	/* Is ROOT ? */
1112	if (!pcg)
1113		return -EINVAL;
1114
1115
1116	parent = mem_cgroup_from_cont(pcg);
1117
1118
1119	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1120	if (ret || !parent)
1121		return ret;
1122
1123	if (!get_page_unless_zero(page)) {
1124		ret = -EBUSY;
1125		goto uncharge;
1126	}
1127
1128	ret = isolate_lru_page(page);
1129
1130	if (ret)
1131		goto cancel;
1132
1133	ret = mem_cgroup_move_account(pc, child, parent);
1134
1135	putback_lru_page(page);
1136	if (!ret) {
1137		put_page(page);
1138		/* drop extra refcnt by try_charge() */
1139		css_put(&parent->css);
1140		return 0;
1141	}
1142
1143cancel:
1144	put_page(page);
1145uncharge:
1146	/* drop extra refcnt by try_charge() */
1147	css_put(&parent->css);
1148	/* uncharge if move fails */
1149	res_counter_uncharge(&parent->res, PAGE_SIZE);
1150	if (do_swap_account)
1151		res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1152	return ret;
1153}
1154
1155/*
1156 * Charge the memory controller for page usage.
1157 * Return
1158 * 0 if the charge was successful
1159 * < 0 if the cgroup is over its limit
1160 */
1161static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1162				gfp_t gfp_mask, enum charge_type ctype,
1163				struct mem_cgroup *memcg)
1164{
1165	struct mem_cgroup *mem;
1166	struct page_cgroup *pc;
1167	int ret;
1168
1169	pc = lookup_page_cgroup(page);
1170	/* can happen at boot */
1171	if (unlikely(!pc))
1172		return 0;
1173	prefetchw(pc);
1174
1175	mem = memcg;
1176	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1177	if (ret || !mem)
1178		return ret;
1179
1180	__mem_cgroup_commit_charge(mem, pc, ctype);
1181	return 0;
1182}
1183
1184int mem_cgroup_newpage_charge(struct page *page,
1185			      struct mm_struct *mm, gfp_t gfp_mask)
1186{
1187	if (mem_cgroup_disabled())
1188		return 0;
1189	if (PageCompound(page))
1190		return 0;
1191	/*
1192	 * If already mapped, we don't have to account.
1193	 * If page cache, page->mapping has address_space.
1194	 * But page->mapping may have out-of-use anon_vma pointer,
1195	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1196	 * is NULL.
1197  	 */
1198	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1199		return 0;
1200	if (unlikely(!mm))
1201		mm = &init_mm;
1202	return mem_cgroup_charge_common(page, mm, gfp_mask,
1203				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1204}
1205
1206int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1207				gfp_t gfp_mask)
1208{
1209	struct mem_cgroup *mem = NULL;
1210	int ret;
1211
1212	if (mem_cgroup_disabled())
1213		return 0;
1214	if (PageCompound(page))
1215		return 0;
1216	/*
1217	 * Corner case handling. This is called from add_to_page_cache()
1218	 * in usual. But some FS (shmem) precharges this page before calling it
1219	 * and call add_to_page_cache() with GFP_NOWAIT.
1220	 *
1221	 * For GFP_NOWAIT case, the page may be pre-charged before calling
1222	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1223	 * charge twice. (It works but has to pay a bit larger cost.)
1224	 * And when the page is SwapCache, it should take swap information
1225	 * into account. This is under lock_page() now.
1226	 */
1227	if (!(gfp_mask & __GFP_WAIT)) {
1228		struct page_cgroup *pc;
1229
1230
1231		pc = lookup_page_cgroup(page);
1232		if (!pc)
1233			return 0;
1234		lock_page_cgroup(pc);
1235		if (PageCgroupUsed(pc)) {
1236			unlock_page_cgroup(pc);
1237			return 0;
1238		}
1239		unlock_page_cgroup(pc);
1240	}
1241
1242	if (do_swap_account && PageSwapCache(page)) {
1243		mem = try_get_mem_cgroup_from_swapcache(page);
1244		if (mem)
1245			mm = NULL;
1246		  else
1247			mem = NULL;
1248		/* SwapCache may be still linked to LRU now. */
1249		mem_cgroup_lru_del_before_commit_swapcache(page);
1250	}
1251
1252	if (unlikely(!mm && !mem))
1253		mm = &init_mm;
1254
1255	if (page_is_file_cache(page))
1256		return mem_cgroup_charge_common(page, mm, gfp_mask,
1257				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1258
1259	ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1260				MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1261	if (mem)
1262		css_put(&mem->css);
1263	if (PageSwapCache(page))
1264		mem_cgroup_lru_add_after_commit_swapcache(page);
1265
1266	if (do_swap_account && !ret && PageSwapCache(page)) {
1267		swp_entry_t ent = {.val = page_private(page)};
1268		/* avoid double counting */
1269		mem = swap_cgroup_record(ent, NULL);
1270		if (mem) {
1271			res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1272			mem_cgroup_put(mem);
1273		}
1274	}
1275	return ret;
1276}
1277
1278/*
1279 * While swap-in, try_charge -> commit or cancel, the page is locked.
1280 * And when try_charge() successfully returns, one refcnt to memcg without
1281 * struct page_cgroup is aquired. This refcnt will be cumsumed by
1282 * "commit()" or removed by "cancel()"
1283 */
1284int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1285				 struct page *page,
1286				 gfp_t mask, struct mem_cgroup **ptr)
1287{
1288	struct mem_cgroup *mem;
1289	int ret;
1290
1291	if (mem_cgroup_disabled())
1292		return 0;
1293
1294	if (!do_swap_account)
1295		goto charge_cur_mm;
1296	/*
1297	 * A racing thread's fault, or swapoff, may have already updated
1298	 * the pte, and even removed page from swap cache: return success
1299	 * to go on to do_swap_page()'s pte_same() test, which should fail.
1300	 */
1301	if (!PageSwapCache(page))
1302		return 0;
1303	mem = try_get_mem_cgroup_from_swapcache(page);
1304	if (!mem)
1305		goto charge_cur_mm;
1306	*ptr = mem;
1307	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1308	/* drop extra refcnt from tryget */
1309	css_put(&mem->css);
1310	return ret;
1311charge_cur_mm:
1312	if (unlikely(!mm))
1313		mm = &init_mm;
1314	return __mem_cgroup_try_charge(mm, mask, ptr, true);
1315}
1316
1317void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1318{
1319	struct page_cgroup *pc;
1320
1321	if (mem_cgroup_disabled())
1322		return;
1323	if (!ptr)
1324		return;
1325	pc = lookup_page_cgroup(page);
1326	mem_cgroup_lru_del_before_commit_swapcache(page);
1327	__mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1328	mem_cgroup_lru_add_after_commit_swapcache(page);
1329	/*
1330	 * Now swap is on-memory. This means this page may be
1331	 * counted both as mem and swap....double count.
1332	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1333	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1334	 * may call delete_from_swap_cache() before reach here.
1335	 */
1336	if (do_swap_account && PageSwapCache(page)) {
1337		swp_entry_t ent = {.val = page_private(page)};
1338		struct mem_cgroup *memcg;
1339		memcg = swap_cgroup_record(ent, NULL);
1340		if (memcg) {
1341			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1342			mem_cgroup_put(memcg);
1343		}
1344
1345	}
1346	/* add this page(page_cgroup) to the LRU we want. */
1347
1348}
1349
1350void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1351{
1352	if (mem_cgroup_disabled())
1353		return;
1354	if (!mem)
1355		return;
1356	res_counter_uncharge(&mem->res, PAGE_SIZE);
1357	if (do_swap_account)
1358		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1359	css_put(&mem->css);
1360}
1361
1362
1363/*
1364 * uncharge if !page_mapped(page)
1365 */
1366static struct mem_cgroup *
1367__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1368{
1369	struct page_cgroup *pc;
1370	struct mem_cgroup *mem = NULL;
1371	struct mem_cgroup_per_zone *mz;
1372
1373	if (mem_cgroup_disabled())
1374		return NULL;
1375
1376	if (PageSwapCache(page))
1377		return NULL;
1378
1379	/*
1380	 * Check if our page_cgroup is valid
1381	 */
1382	pc = lookup_page_cgroup(page);
1383	if (unlikely(!pc || !PageCgroupUsed(pc)))
1384		return NULL;
1385
1386	lock_page_cgroup(pc);
1387
1388	mem = pc->mem_cgroup;
1389
1390	if (!PageCgroupUsed(pc))
1391		goto unlock_out;
1392
1393	switch (ctype) {
1394	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1395		if (page_mapped(page))
1396			goto unlock_out;
1397		break;
1398	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1399		if (!PageAnon(page)) {	/* Shared memory */
1400			if (page->mapping && !page_is_file_cache(page))
1401				goto unlock_out;
1402		} else if (page_mapped(page)) /* Anon */
1403				goto unlock_out;
1404		break;
1405	default:
1406		break;
1407	}
1408
1409	res_counter_uncharge(&mem->res, PAGE_SIZE);
1410	if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1411		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1412	mem_cgroup_charge_statistics(mem, pc, false);
1413
1414	ClearPageCgroupUsed(pc);
1415	/*
1416	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
1417	 * freed from LRU. This is safe because uncharged page is expected not
1418	 * to be reused (freed soon). Exception is SwapCache, it's handled by
1419	 * special functions.
1420	 */
1421
1422	mz = page_cgroup_zoneinfo(pc);
1423	unlock_page_cgroup(pc);
1424
1425	/* at swapout, this memcg will be accessed to record to swap */
1426	if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1427		css_put(&mem->css);
1428
1429	return mem;
1430
1431unlock_out:
1432	unlock_page_cgroup(pc);
1433	return NULL;
1434}
1435
1436void mem_cgroup_uncharge_page(struct page *page)
1437{
1438	/* early check. */
1439	if (page_mapped(page))
1440		return;
1441	if (page->mapping && !PageAnon(page))
1442		return;
1443	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1444}
1445
1446void mem_cgroup_uncharge_cache_page(struct page *page)
1447{
1448	VM_BUG_ON(page_mapped(page));
1449	VM_BUG_ON(page->mapping);
1450	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1451}
1452
1453/*
1454 * called from __delete_from_swap_cache() and drop "page" account.
1455 * memcg information is recorded to swap_cgroup of "ent"
1456 */
1457void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1458{
1459	struct mem_cgroup *memcg;
1460
1461	memcg = __mem_cgroup_uncharge_common(page,
1462					MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1463	/* record memcg information */
1464	if (do_swap_account && memcg) {
1465		swap_cgroup_record(ent, memcg);
1466		mem_cgroup_get(memcg);
1467	}
1468	if (memcg)
1469		css_put(&memcg->css);
1470}
1471
1472#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1473/*
1474 * called from swap_entry_free(). remove record in swap_cgroup and
1475 * uncharge "memsw" account.
1476 */
1477void mem_cgroup_uncharge_swap(swp_entry_t ent)
1478{
1479	struct mem_cgroup *memcg;
1480
1481	if (!do_swap_account)
1482		return;
1483
1484	memcg = swap_cgroup_record(ent, NULL);
1485	if (memcg) {
1486		res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1487		mem_cgroup_put(memcg);
1488	}
1489}
1490#endif
1491
1492/*
1493 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1494 * page belongs to.
1495 */
1496int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1497{
1498	struct page_cgroup *pc;
1499	struct mem_cgroup *mem = NULL;
1500	int ret = 0;
1501
1502	if (mem_cgroup_disabled())
1503		return 0;
1504
1505	pc = lookup_page_cgroup(page);
1506	lock_page_cgroup(pc);
1507	if (PageCgroupUsed(pc)) {
1508		mem = pc->mem_cgroup;
1509		css_get(&mem->css);
1510	}
1511	unlock_page_cgroup(pc);
1512
1513	if (mem) {
1514		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
1515		css_put(&mem->css);
1516	}
1517	*ptr = mem;
1518	return ret;
1519}
1520
1521/* remove redundant charge if migration failed*/
1522void mem_cgroup_end_migration(struct mem_cgroup *mem,
1523		struct page *oldpage, struct page *newpage)
1524{
1525	struct page *target, *unused;
1526	struct page_cgroup *pc;
1527	enum charge_type ctype;
1528
1529	if (!mem)
1530		return;
1531
1532	/* at migration success, oldpage->mapping is NULL. */
1533	if (oldpage->mapping) {
1534		target = oldpage;
1535		unused = NULL;
1536	} else {
1537		target = newpage;
1538		unused = oldpage;
1539	}
1540
1541	if (PageAnon(target))
1542		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1543	else if (page_is_file_cache(target))
1544		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1545	else
1546		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1547
1548	/* unused page is not on radix-tree now. */
1549	if (unused)
1550		__mem_cgroup_uncharge_common(unused, ctype);
1551
1552	pc = lookup_page_cgroup(target);
1553	/*
1554	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1555	 * So, double-counting is effectively avoided.
1556	 */
1557	__mem_cgroup_commit_charge(mem, pc, ctype);
1558
1559	/*
1560	 * Both of oldpage and newpage are still under lock_page().
1561	 * Then, we don't have to care about race in radix-tree.
1562	 * But we have to be careful that this page is unmapped or not.
1563	 *
1564	 * There is a case for !page_mapped(). At the start of
1565	 * migration, oldpage was mapped. But now, it's zapped.
1566	 * But we know *target* page is not freed/reused under us.
1567	 * mem_cgroup_uncharge_page() does all necessary checks.
1568	 */
1569	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1570		mem_cgroup_uncharge_page(target);
1571}
1572
1573/*
1574 * A call to try to shrink memory usage under specified resource controller.
1575 * This is typically used for page reclaiming for shmem for reducing side
1576 * effect of page allocation from shmem, which is used by some mem_cgroup.
1577 */
1578int mem_cgroup_shrink_usage(struct page *page,
1579			    struct mm_struct *mm,
1580			    gfp_t gfp_mask)
1581{
1582	struct mem_cgroup *mem = NULL;
1583	int progress = 0;
1584	int retry = MEM_CGROUP_RECLAIM_RETRIES;
1585
1586	if (mem_cgroup_disabled())
1587		return 0;
1588	if (page)
1589		mem = try_get_mem_cgroup_from_swapcache(page);
1590	if (!mem && mm)
1591		mem = try_get_mem_cgroup_from_mm(mm);
1592	if (unlikely(!mem))
1593		return 0;
1594
1595	do {
1596		progress = mem_cgroup_hierarchical_reclaim(mem,
1597					gfp_mask, true, false);
1598		progress += mem_cgroup_check_under_limit(mem);
1599	} while (!progress && --retry);
1600
1601	css_put(&mem->css);
1602	if (!retry)
1603		return -ENOMEM;
1604	return 0;
1605}
1606
1607static DEFINE_MUTEX(set_limit_mutex);
1608
1609static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1610				unsigned long long val)
1611{
1612	int retry_count;
1613	int progress;
1614	u64 memswlimit;
1615	int ret = 0;
1616	int children = mem_cgroup_count_children(memcg);
1617	u64 curusage, oldusage;
1618
1619	/*
1620	 * For keeping hierarchical_reclaim simple, how long we should retry
1621	 * is depends on callers. We set our retry-count to be function
1622	 * of # of children which we should visit in this loop.
1623	 */
1624	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
1625
1626	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1627
1628	while (retry_count) {
1629		if (signal_pending(current)) {
1630			ret = -EINTR;
1631			break;
1632		}
1633		/*
1634		 * Rather than hide all in some function, I do this in
1635		 * open coded manner. You see what this really does.
1636		 * We have to guarantee mem->res.limit < mem->memsw.limit.
1637		 */
1638		mutex_lock(&set_limit_mutex);
1639		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1640		if (memswlimit < val) {
1641			ret = -EINVAL;
1642			mutex_unlock(&set_limit_mutex);
1643			break;
1644		}
1645		ret = res_counter_set_limit(&memcg->res, val);
1646		mutex_unlock(&set_limit_mutex);
1647
1648		if (!ret)
1649			break;
1650
1651		progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1652						   false, true);
1653		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1654		/* Usage is reduced ? */
1655  		if (curusage >= oldusage)
1656			retry_count--;
1657		else
1658			oldusage = curusage;
1659	}
1660
1661	return ret;
1662}
1663
1664int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1665				unsigned long long val)
1666{
1667	int retry_count;
1668	u64 memlimit, oldusage, curusage;
1669	int children = mem_cgroup_count_children(memcg);
1670	int ret = -EBUSY;
1671
1672	if (!do_swap_account)
1673		return -EINVAL;
1674	/* see mem_cgroup_resize_res_limit */
1675 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
1676	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1677	while (retry_count) {
1678		if (signal_pending(current)) {
1679			ret = -EINTR;
1680			break;
1681		}
1682		/*
1683		 * Rather than hide all in some function, I do this in
1684		 * open coded manner. You see what this really does.
1685		 * We have to guarantee mem->res.limit < mem->memsw.limit.
1686		 */
1687		mutex_lock(&set_limit_mutex);
1688		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1689		if (memlimit > val) {
1690			ret = -EINVAL;
1691			mutex_unlock(&set_limit_mutex);
1692			break;
1693		}
1694		ret = res_counter_set_limit(&memcg->memsw, val);
1695		mutex_unlock(&set_limit_mutex);
1696
1697		if (!ret)
1698			break;
1699
1700		mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
1701		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1702		/* Usage is reduced ? */
1703		if (curusage >= oldusage)
1704			retry_count--;
1705		else
1706			oldusage = curusage;
1707	}
1708	return ret;
1709}
1710
1711/*
1712 * This routine traverse page_cgroup in given list and drop them all.
1713 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1714 */
1715static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1716				int node, int zid, enum lru_list lru)
1717{
1718	struct zone *zone;
1719	struct mem_cgroup_per_zone *mz;
1720	struct page_cgroup *pc, *busy;
1721	unsigned long flags, loop;
1722	struct list_head *list;
1723	int ret = 0;
1724
1725	zone = &NODE_DATA(node)->node_zones[zid];
1726	mz = mem_cgroup_zoneinfo(mem, node, zid);
1727	list = &mz->lists[lru];
1728
1729	loop = MEM_CGROUP_ZSTAT(mz, lru);
1730	/* give some margin against EBUSY etc...*/
1731	loop += 256;
1732	busy = NULL;
1733	while (loop--) {
1734		ret = 0;
1735		spin_lock_irqsave(&zone->lru_lock, flags);
1736		if (list_empty(list)) {
1737			spin_unlock_irqrestore(&zone->lru_lock, flags);
1738			break;
1739		}
1740		pc = list_entry(list->prev, struct page_cgroup, lru);
1741		if (busy == pc) {
1742			list_move(&pc->lru, list);
1743			busy = 0;
1744			spin_unlock_irqrestore(&zone->lru_lock, flags);
1745			continue;
1746		}
1747		spin_unlock_irqrestore(&zone->lru_lock, flags);
1748
1749		ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1750		if (ret == -ENOMEM)
1751			break;
1752
1753		if (ret == -EBUSY || ret == -EINVAL) {
1754			/* found lock contention or "pc" is obsolete. */
1755			busy = pc;
1756			cond_resched();
1757		} else
1758			busy = NULL;
1759	}
1760
1761	if (!ret && !list_empty(list))
1762		return -EBUSY;
1763	return ret;
1764}
1765
1766/*
1767 * make mem_cgroup's charge to be 0 if there is no task.
1768 * This enables deleting this mem_cgroup.
1769 */
1770static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1771{
1772	int ret;
1773	int node, zid, shrink;
1774	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1775	struct cgroup *cgrp = mem->css.cgroup;
1776
1777	css_get(&mem->css);
1778
1779	shrink = 0;
1780	/* should free all ? */
1781	if (free_all)
1782		goto try_to_free;
1783move_account:
1784	while (mem->res.usage > 0) {
1785		ret = -EBUSY;
1786		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1787			goto out;
1788		ret = -EINTR;
1789		if (signal_pending(current))
1790			goto out;
1791		/* This is for making all *used* pages to be on LRU. */
1792		lru_add_drain_all();
1793		ret = 0;
1794		for_each_node_state(node, N_HIGH_MEMORY) {
1795			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1796				enum lru_list l;
1797				for_each_lru(l) {
1798					ret = mem_cgroup_force_empty_list(mem,
1799							node, zid, l);
1800					if (ret)
1801						break;
1802				}
1803			}
1804			if (ret)
1805				break;
1806		}
1807		/* it seems parent cgroup doesn't have enough mem */
1808		if (ret == -ENOMEM)
1809			goto try_to_free;
1810		cond_resched();
1811	}
1812	ret = 0;
1813out:
1814	css_put(&mem->css);
1815	return ret;
1816
1817try_to_free:
1818	/* returns EBUSY if there is a task or if we come here twice. */
1819	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1820		ret = -EBUSY;
1821		goto out;
1822	}
1823	/* we call try-to-free pages for make this cgroup empty */
1824	lru_add_drain_all();
1825	/* try to free all pages in this cgroup */
1826	shrink = 1;
1827	while (nr_retries && mem->res.usage > 0) {
1828		int progress;
1829
1830		if (signal_pending(current)) {
1831			ret = -EINTR;
1832			goto out;
1833		}
1834		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1835						false, get_swappiness(mem));
1836		if (!progress) {
1837			nr_retries--;
1838			/* maybe some writeback is necessary */
1839			congestion_wait(WRITE, HZ/10);
1840		}
1841
1842	}
1843	lru_add_drain();
1844	/* try move_account...there may be some *locked* pages. */
1845	if (mem->res.usage)
1846		goto move_account;
1847	ret = 0;
1848	goto out;
1849}
1850
1851int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1852{
1853	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1854}
1855
1856
1857static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1858{
1859	return mem_cgroup_from_cont(cont)->use_hierarchy;
1860}
1861
1862static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1863					u64 val)
1864{
1865	int retval = 0;
1866	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1867	struct cgroup *parent = cont->parent;
1868	struct mem_cgroup *parent_mem = NULL;
1869
1870	if (parent)
1871		parent_mem = mem_cgroup_from_cont(parent);
1872
1873	cgroup_lock();
1874	/*
1875	 * If parent's use_hiearchy is set, we can't make any modifications
1876	 * in the child subtrees. If it is unset, then the change can
1877	 * occur, provided the current cgroup has no children.
1878	 *
1879	 * For the root cgroup, parent_mem is NULL, we allow value to be
1880	 * set if there are no children.
1881	 */
1882	if ((!parent_mem || !parent_mem->use_hierarchy) &&
1883				(val == 1 || val == 0)) {
1884		if (list_empty(&cont->children))
1885			mem->use_hierarchy = val;
1886		else
1887			retval = -EBUSY;
1888	} else
1889		retval = -EINVAL;
1890	cgroup_unlock();
1891
1892	return retval;
1893}
1894
1895static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1896{
1897	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1898	u64 val = 0;
1899	int type, name;
1900
1901	type = MEMFILE_TYPE(cft->private);
1902	name = MEMFILE_ATTR(cft->private);
1903	switch (type) {
1904	case _MEM:
1905		val = res_counter_read_u64(&mem->res, name);
1906		break;
1907	case _MEMSWAP:
1908		if (do_swap_account)
1909			val = res_counter_read_u64(&mem->memsw, name);
1910		break;
1911	default:
1912		BUG();
1913		break;
1914	}
1915	return val;
1916}
1917/*
1918 * The user of this function is...
1919 * RES_LIMIT.
1920 */
1921static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1922			    const char *buffer)
1923{
1924	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1925	int type, name;
1926	unsigned long long val;
1927	int ret;
1928
1929	type = MEMFILE_TYPE(cft->private);
1930	name = MEMFILE_ATTR(cft->private);
1931	switch (name) {
1932	case RES_LIMIT:
1933		/* This function does all necessary parse...reuse it */
1934		ret = res_counter_memparse_write_strategy(buffer, &val);
1935		if (ret)
1936			break;
1937		if (type == _MEM)
1938			ret = mem_cgroup_resize_limit(memcg, val);
1939		else
1940			ret = mem_cgroup_resize_memsw_limit(memcg, val);
1941		break;
1942	default:
1943		ret = -EINVAL; /* should be BUG() ? */
1944		break;
1945	}
1946	return ret;
1947}
1948
1949static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
1950		unsigned long long *mem_limit, unsigned long long *memsw_limit)
1951{
1952	struct cgroup *cgroup;
1953	unsigned long long min_limit, min_memsw_limit, tmp;
1954
1955	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1956	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1957	cgroup = memcg->css.cgroup;
1958	if (!memcg->use_hierarchy)
1959		goto out;
1960
1961	while (cgroup->parent) {
1962		cgroup = cgroup->parent;
1963		memcg = mem_cgroup_from_cont(cgroup);
1964		if (!memcg->use_hierarchy)
1965			break;
1966		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
1967		min_limit = min(min_limit, tmp);
1968		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1969		min_memsw_limit = min(min_memsw_limit, tmp);
1970	}
1971out:
1972	*mem_limit = min_limit;
1973	*memsw_limit = min_memsw_limit;
1974	return;
1975}
1976
1977static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1978{
1979	struct mem_cgroup *mem;
1980	int type, name;
1981
1982	mem = mem_cgroup_from_cont(cont);
1983	type = MEMFILE_TYPE(event);
1984	name = MEMFILE_ATTR(event);
1985	switch (name) {
1986	case RES_MAX_USAGE:
1987		if (type == _MEM)
1988			res_counter_reset_max(&mem->res);
1989		else
1990			res_counter_reset_max(&mem->memsw);
1991		break;
1992	case RES_FAILCNT:
1993		if (type == _MEM)
1994			res_counter_reset_failcnt(&mem->res);
1995		else
1996			res_counter_reset_failcnt(&mem->memsw);
1997		break;
1998	}
1999	return 0;
2000}
2001
2002
2003/* For read statistics */
2004enum {
2005	MCS_CACHE,
2006	MCS_RSS,
2007	MCS_PGPGIN,
2008	MCS_PGPGOUT,
2009	MCS_INACTIVE_ANON,
2010	MCS_ACTIVE_ANON,
2011	MCS_INACTIVE_FILE,
2012	MCS_ACTIVE_FILE,
2013	MCS_UNEVICTABLE,
2014	NR_MCS_STAT,
2015};
2016
2017struct mcs_total_stat {
2018	s64 stat[NR_MCS_STAT];
2019};
2020
2021struct {
2022	char *local_name;
2023	char *total_name;
2024} memcg_stat_strings[NR_MCS_STAT] = {
2025	{"cache", "total_cache"},
2026	{"rss", "total_rss"},
2027	{"pgpgin", "total_pgpgin"},
2028	{"pgpgout", "total_pgpgout"},
2029	{"inactive_anon", "total_inactive_anon"},
2030	{"active_anon", "total_active_anon"},
2031	{"inactive_file", "total_inactive_file"},
2032	{"active_file", "total_active_file"},
2033	{"unevictable", "total_unevictable"}
2034};
2035
2036
2037static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2038{
2039	struct mcs_total_stat *s = data;
2040	s64 val;
2041
2042	/* per cpu stat */
2043	val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2044	s->stat[MCS_CACHE] += val * PAGE_SIZE;
2045	val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2046	s->stat[MCS_RSS] += val * PAGE_SIZE;
2047	val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2048	s->stat[MCS_PGPGIN] += val;
2049	val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2050	s->stat[MCS_PGPGOUT] += val;
2051
2052	/* per zone stat */
2053	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2054	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2055	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2056	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2057	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2058	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2059	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2060	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2061	val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2062	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2063	return 0;
2064}
2065
2066static void
2067mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2068{
2069	mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2070}
2071
2072static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
2073				 struct cgroup_map_cb *cb)
2074{
2075	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
2076	struct mcs_total_stat mystat;
2077	int i;
2078
2079	memset(&mystat, 0, sizeof(mystat));
2080	mem_cgroup_get_local_stat(mem_cont, &mystat);
2081
2082	for (i = 0; i < NR_MCS_STAT; i++)
2083		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
2084
2085	/* Hierarchical information */
2086	{
2087		unsigned long long limit, memsw_limit;
2088		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
2089		cb->fill(cb, "hierarchical_memory_limit", limit);
2090		if (do_swap_account)
2091			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
2092	}
2093
2094	memset(&mystat, 0, sizeof(mystat));
2095	mem_cgroup_get_total_stat(mem_cont, &mystat);
2096	for (i = 0; i < NR_MCS_STAT; i++)
2097		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2098
2099
2100#ifdef CONFIG_DEBUG_VM
2101	cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
2102
2103	{
2104		int nid, zid;
2105		struct mem_cgroup_per_zone *mz;
2106		unsigned long recent_rotated[2] = {0, 0};
2107		unsigned long recent_scanned[2] = {0, 0};
2108
2109		for_each_online_node(nid)
2110			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2111				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
2112
2113				recent_rotated[0] +=
2114					mz->reclaim_stat.recent_rotated[0];
2115				recent_rotated[1] +=
2116					mz->reclaim_stat.recent_rotated[1];
2117				recent_scanned[0] +=
2118					mz->reclaim_stat.recent_scanned[0];
2119				recent_scanned[1] +=
2120					mz->reclaim_stat.recent_scanned[1];
2121			}
2122		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
2123		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
2124		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
2125		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
2126	}
2127#endif
2128
2129	return 0;
2130}
2131
2132static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
2133{
2134	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2135
2136	return get_swappiness(memcg);
2137}
2138
2139static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
2140				       u64 val)
2141{
2142	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2143	struct mem_cgroup *parent;
2144
2145	if (val > 100)
2146		return -EINVAL;
2147
2148	if (cgrp->parent == NULL)
2149		return -EINVAL;
2150
2151	parent = mem_cgroup_from_cont(cgrp->parent);
2152
2153	cgroup_lock();
2154
2155	/* If under hierarchy, only empty-root can set this value */
2156	if ((parent->use_hierarchy) ||
2157	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
2158		cgroup_unlock();
2159		return -EINVAL;
2160	}
2161
2162	spin_lock(&memcg->reclaim_param_lock);
2163	memcg->swappiness = val;
2164	spin_unlock(&memcg->reclaim_param_lock);
2165
2166	cgroup_unlock();
2167
2168	return 0;
2169}
2170
2171
2172static struct cftype mem_cgroup_files[] = {
2173	{
2174		.name = "usage_in_bytes",
2175		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2176		.read_u64 = mem_cgroup_read,
2177	},
2178	{
2179		.name = "max_usage_in_bytes",
2180		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2181		.trigger = mem_cgroup_reset,
2182		.read_u64 = mem_cgroup_read,
2183	},
2184	{
2185		.name = "limit_in_bytes",
2186		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2187		.write_string = mem_cgroup_write,
2188		.read_u64 = mem_cgroup_read,
2189	},
2190	{
2191		.name = "failcnt",
2192		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2193		.trigger = mem_cgroup_reset,
2194		.read_u64 = mem_cgroup_read,
2195	},
2196	{
2197		.name = "stat",
2198		.read_map = mem_control_stat_show,
2199	},
2200	{
2201		.name = "force_empty",
2202		.trigger = mem_cgroup_force_empty_write,
2203	},
2204	{
2205		.name = "use_hierarchy",
2206		.write_u64 = mem_cgroup_hierarchy_write,
2207		.read_u64 = mem_cgroup_hierarchy_read,
2208	},
2209	{
2210		.name = "swappiness",
2211		.read_u64 = mem_cgroup_swappiness_read,
2212		.write_u64 = mem_cgroup_swappiness_write,
2213	},
2214};
2215
2216#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2217static struct cftype memsw_cgroup_files[] = {
2218	{
2219		.name = "memsw.usage_in_bytes",
2220		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2221		.read_u64 = mem_cgroup_read,
2222	},
2223	{
2224		.name = "memsw.max_usage_in_bytes",
2225		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2226		.trigger = mem_cgroup_reset,
2227		.read_u64 = mem_cgroup_read,
2228	},
2229	{
2230		.name = "memsw.limit_in_bytes",
2231		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2232		.write_string = mem_cgroup_write,
2233		.read_u64 = mem_cgroup_read,
2234	},
2235	{
2236		.name = "memsw.failcnt",
2237		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2238		.trigger = mem_cgroup_reset,
2239		.read_u64 = mem_cgroup_read,
2240	},
2241};
2242
2243static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2244{
2245	if (!do_swap_account)
2246		return 0;
2247	return cgroup_add_files(cont, ss, memsw_cgroup_files,
2248				ARRAY_SIZE(memsw_cgroup_files));
2249};
2250#else
2251static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2252{
2253	return 0;
2254}
2255#endif
2256
2257static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2258{
2259	struct mem_cgroup_per_node *pn;
2260	struct mem_cgroup_per_zone *mz;
2261	enum lru_list l;
2262	int zone, tmp = node;
2263	/*
2264	 * This routine is called against possible nodes.
2265	 * But it's BUG to call kmalloc() against offline node.
2266	 *
2267	 * TODO: this routine can waste much memory for nodes which will
2268	 *       never be onlined. It's better to use memory hotplug callback
2269	 *       function.
2270	 */
2271	if (!node_state(node, N_NORMAL_MEMORY))
2272		tmp = -1;
2273	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2274	if (!pn)
2275		return 1;
2276
2277	mem->info.nodeinfo[node] = pn;
2278	memset(pn, 0, sizeof(*pn));
2279
2280	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2281		mz = &pn->zoneinfo[zone];
2282		for_each_lru(l)
2283			INIT_LIST_HEAD(&mz->lists[l]);
2284	}
2285	return 0;
2286}
2287
2288static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2289{
2290	kfree(mem->info.nodeinfo[node]);
2291}
2292
2293static int mem_cgroup_size(void)
2294{
2295	int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2296	return sizeof(struct mem_cgroup) + cpustat_size;
2297}
2298
2299static struct mem_cgroup *mem_cgroup_alloc(void)
2300{
2301	struct mem_cgroup *mem;
2302	int size = mem_cgroup_size();
2303
2304	if (size < PAGE_SIZE)
2305		mem = kmalloc(size, GFP_KERNEL);
2306	else
2307		mem = vmalloc(size);
2308
2309	if (mem)
2310		memset(mem, 0, size);
2311	return mem;
2312}
2313
2314/*
2315 * At destroying mem_cgroup, references from swap_cgroup can remain.
2316 * (scanning all at force_empty is too costly...)
2317 *
2318 * Instead of clearing all references at force_empty, we remember
2319 * the number of reference from swap_cgroup and free mem_cgroup when
2320 * it goes down to 0.
2321 *
2322 * Removal of cgroup itself succeeds regardless of refs from swap.
2323 */
2324
2325static void __mem_cgroup_free(struct mem_cgroup *mem)
2326{
2327	int node;
2328
2329	free_css_id(&mem_cgroup_subsys, &mem->css);
2330
2331	for_each_node_state(node, N_POSSIBLE)
2332		free_mem_cgroup_per_zone_info(mem, node);
2333
2334	if (mem_cgroup_size() < PAGE_SIZE)
2335		kfree(mem);
2336	else
2337		vfree(mem);
2338}
2339
2340static void mem_cgroup_get(struct mem_cgroup *mem)
2341{
2342	atomic_inc(&mem->refcnt);
2343}
2344
2345static void mem_cgroup_put(struct mem_cgroup *mem)
2346{
2347	if (atomic_dec_and_test(&mem->refcnt)) {
2348		struct mem_cgroup *parent = parent_mem_cgroup(mem);
2349		__mem_cgroup_free(mem);
2350		if (parent)
2351			mem_cgroup_put(parent);
2352	}
2353}
2354
2355/*
2356 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
2357 */
2358static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
2359{
2360	if (!mem->res.parent)
2361		return NULL;
2362	return mem_cgroup_from_res_counter(mem->res.parent, res);
2363}
2364
2365#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2366static void __init enable_swap_cgroup(void)
2367{
2368	if (!mem_cgroup_disabled() && really_do_swap_account)
2369		do_swap_account = 1;
2370}
2371#else
2372static void __init enable_swap_cgroup(void)
2373{
2374}
2375#endif
2376
2377static struct cgroup_subsys_state * __ref
2378mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2379{
2380	struct mem_cgroup *mem, *parent;
2381	long error = -ENOMEM;
2382	int node;
2383
2384	mem = mem_cgroup_alloc();
2385	if (!mem)
2386		return ERR_PTR(error);
2387
2388	for_each_node_state(node, N_POSSIBLE)
2389		if (alloc_mem_cgroup_per_zone_info(mem, node))
2390			goto free_out;
2391	/* root ? */
2392	if (cont->parent == NULL) {
2393		enable_swap_cgroup();
2394		parent = NULL;
2395	} else {
2396		parent = mem_cgroup_from_cont(cont->parent);
2397		mem->use_hierarchy = parent->use_hierarchy;
2398	}
2399
2400	if (parent && parent->use_hierarchy) {
2401		res_counter_init(&mem->res, &parent->res);
2402		res_counter_init(&mem->memsw, &parent->memsw);
2403		/*
2404		 * We increment refcnt of the parent to ensure that we can
2405		 * safely access it on res_counter_charge/uncharge.
2406		 * This refcnt will be decremented when freeing this
2407		 * mem_cgroup(see mem_cgroup_put).
2408		 */
2409		mem_cgroup_get(parent);
2410	} else {
2411		res_counter_init(&mem->res, NULL);
2412		res_counter_init(&mem->memsw, NULL);
2413	}
2414	mem->last_scanned_child = 0;
2415	spin_lock_init(&mem->reclaim_param_lock);
2416
2417	if (parent)
2418		mem->swappiness = get_swappiness(parent);
2419	atomic_set(&mem->refcnt, 1);
2420	return &mem->css;
2421free_out:
2422	__mem_cgroup_free(mem);
2423	return ERR_PTR(error);
2424}
2425
2426static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2427					struct cgroup *cont)
2428{
2429	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2430
2431	return mem_cgroup_force_empty(mem, false);
2432}
2433
2434static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2435				struct cgroup *cont)
2436{
2437	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2438
2439	mem_cgroup_put(mem);
2440}
2441
2442static int mem_cgroup_populate(struct cgroup_subsys *ss,
2443				struct cgroup *cont)
2444{
2445	int ret;
2446
2447	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2448				ARRAY_SIZE(mem_cgroup_files));
2449
2450	if (!ret)
2451		ret = register_memsw_files(cont, ss);
2452	return ret;
2453}
2454
2455static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2456				struct cgroup *cont,
2457				struct cgroup *old_cont,
2458				struct task_struct *p)
2459{
2460	mutex_lock(&memcg_tasklist);
2461	/*
2462	 * FIXME: It's better to move charges of this process from old
2463	 * memcg to new memcg. But it's just on TODO-List now.
2464	 */
2465	mutex_unlock(&memcg_tasklist);
2466}
2467
2468struct cgroup_subsys mem_cgroup_subsys = {
2469	.name = "memory",
2470	.subsys_id = mem_cgroup_subsys_id,
2471	.create = mem_cgroup_create,
2472	.pre_destroy = mem_cgroup_pre_destroy,
2473	.destroy = mem_cgroup_destroy,
2474	.populate = mem_cgroup_populate,
2475	.attach = mem_cgroup_move_task,
2476	.early_init = 0,
2477	.use_id = 1,
2478};
2479
2480#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2481
2482static int __init disable_swap_account(char *s)
2483{
2484	really_do_swap_account = 0;
2485	return 1;
2486}
2487__setup("noswapaccount", disable_swap_account);
2488#endif
2489