memcontrol.c revision 8289546e573d5ff681cdf0fc7a1184cca66fdb55
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/res_counter.h>
21#include <linux/memcontrol.h>
22#include <linux/cgroup.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/page-flags.h>
26#include <linux/backing-dev.h>
27#include <linux/bit_spinlock.h>
28#include <linux/rcupdate.h>
29#include <linux/swap.h>
30#include <linux/spinlock.h>
31#include <linux/fs.h>
32#include <linux/seq_file.h>
33
34#include <asm/uaccess.h>
35
36struct cgroup_subsys mem_cgroup_subsys;
37static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
38
39/*
40 * Statistics for memory cgroup.
41 */
42enum mem_cgroup_stat_index {
43	/*
44	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
45	 */
46	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
47	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
48
49	MEM_CGROUP_STAT_NSTATS,
50};
51
52struct mem_cgroup_stat_cpu {
53	s64 count[MEM_CGROUP_STAT_NSTATS];
54} ____cacheline_aligned_in_smp;
55
56struct mem_cgroup_stat {
57	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
58};
59
60/*
61 * For accounting under irq disable, no need for increment preempt count.
62 */
63static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
64		enum mem_cgroup_stat_index idx, int val)
65{
66	int cpu = smp_processor_id();
67	stat->cpustat[cpu].count[idx] += val;
68}
69
70static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
71		enum mem_cgroup_stat_index idx)
72{
73	int cpu;
74	s64 ret = 0;
75	for_each_possible_cpu(cpu)
76		ret += stat->cpustat[cpu].count[idx];
77	return ret;
78}
79
80/*
81 * per-zone information in memory controller.
82 */
83
84enum mem_cgroup_zstat_index {
85	MEM_CGROUP_ZSTAT_ACTIVE,
86	MEM_CGROUP_ZSTAT_INACTIVE,
87
88	NR_MEM_CGROUP_ZSTAT,
89};
90
91struct mem_cgroup_per_zone {
92	/*
93	 * spin_lock to protect the per cgroup LRU
94	 */
95	spinlock_t		lru_lock;
96	struct list_head	active_list;
97	struct list_head	inactive_list;
98	unsigned long count[NR_MEM_CGROUP_ZSTAT];
99};
100/* Macro for accessing counter */
101#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
102
103struct mem_cgroup_per_node {
104	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
105};
106
107struct mem_cgroup_lru_info {
108	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
109};
110
111/*
112 * The memory controller data structure. The memory controller controls both
113 * page cache and RSS per cgroup. We would eventually like to provide
114 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
115 * to help the administrator determine what knobs to tune.
116 *
117 * TODO: Add a water mark for the memory controller. Reclaim will begin when
118 * we hit the water mark. May be even add a low water mark, such that
119 * no reclaim occurs from a cgroup at it's low water mark, this is
120 * a feature that will be implemented much later in the future.
121 */
122struct mem_cgroup {
123	struct cgroup_subsys_state css;
124	/*
125	 * the counter to account for memory usage
126	 */
127	struct res_counter res;
128	/*
129	 * Per cgroup active and inactive list, similar to the
130	 * per zone LRU lists.
131	 */
132	struct mem_cgroup_lru_info info;
133
134	int	prev_priority;	/* for recording reclaim priority */
135	/*
136	 * statistics.
137	 */
138	struct mem_cgroup_stat stat;
139};
140
141/*
142 * We use the lower bit of the page->page_cgroup pointer as a bit spin
143 * lock.  We need to ensure that page->page_cgroup is at least two
144 * byte aligned (based on comments from Nick Piggin).  But since
145 * bit_spin_lock doesn't actually set that lock bit in a non-debug
146 * uniprocessor kernel, we should avoid setting it here too.
147 */
148#define PAGE_CGROUP_LOCK_BIT 	0x0
149#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
150#define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT)
151#else
152#define PAGE_CGROUP_LOCK	0x0
153#endif
154
155/*
156 * A page_cgroup page is associated with every page descriptor. The
157 * page_cgroup helps us identify information about the cgroup
158 */
159struct page_cgroup {
160	struct list_head lru;		/* per cgroup LRU list */
161	struct page *page;
162	struct mem_cgroup *mem_cgroup;
163	atomic_t ref_cnt;		/* Helpful when pages move b/w  */
164					/* mapped and cached states     */
165	int	 flags;
166};
167#define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */
168#define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */
169
170static inline int page_cgroup_nid(struct page_cgroup *pc)
171{
172	return page_to_nid(pc->page);
173}
174
175static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
176{
177	return page_zonenum(pc->page);
178}
179
180enum {
181	MEM_CGROUP_TYPE_UNSPEC = 0,
182	MEM_CGROUP_TYPE_MAPPED,
183	MEM_CGROUP_TYPE_CACHED,
184	MEM_CGROUP_TYPE_ALL,
185	MEM_CGROUP_TYPE_MAX,
186};
187
188enum charge_type {
189	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
190	MEM_CGROUP_CHARGE_TYPE_MAPPED,
191};
192
193
194/*
195 * Always modified under lru lock. Then, not necessary to preempt_disable()
196 */
197static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
198					bool charge)
199{
200	int val = (charge)? 1 : -1;
201	struct mem_cgroup_stat *stat = &mem->stat;
202	VM_BUG_ON(!irqs_disabled());
203
204	if (flags & PAGE_CGROUP_FLAG_CACHE)
205		__mem_cgroup_stat_add_safe(stat,
206					MEM_CGROUP_STAT_CACHE, val);
207	else
208		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
209}
210
211static inline struct mem_cgroup_per_zone *
212mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
213{
214	BUG_ON(!mem->info.nodeinfo[nid]);
215	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
216}
217
218static inline struct mem_cgroup_per_zone *
219page_cgroup_zoneinfo(struct page_cgroup *pc)
220{
221	struct mem_cgroup *mem = pc->mem_cgroup;
222	int nid = page_cgroup_nid(pc);
223	int zid = page_cgroup_zid(pc);
224
225	return mem_cgroup_zoneinfo(mem, nid, zid);
226}
227
228static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
229					enum mem_cgroup_zstat_index idx)
230{
231	int nid, zid;
232	struct mem_cgroup_per_zone *mz;
233	u64 total = 0;
234
235	for_each_online_node(nid)
236		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
237			mz = mem_cgroup_zoneinfo(mem, nid, zid);
238			total += MEM_CGROUP_ZSTAT(mz, idx);
239		}
240	return total;
241}
242
243static struct mem_cgroup init_mem_cgroup;
244
245static inline
246struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
247{
248	return container_of(cgroup_subsys_state(cont,
249				mem_cgroup_subsys_id), struct mem_cgroup,
250				css);
251}
252
253static inline
254struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
255{
256	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
257				struct mem_cgroup, css);
258}
259
260void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
261{
262	struct mem_cgroup *mem;
263
264	mem = mem_cgroup_from_task(p);
265	css_get(&mem->css);
266	mm->mem_cgroup = mem;
267}
268
269void mm_free_cgroup(struct mm_struct *mm)
270{
271	css_put(&mm->mem_cgroup->css);
272}
273
274static inline int page_cgroup_locked(struct page *page)
275{
276	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
277					&page->page_cgroup);
278}
279
280static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
281{
282	VM_BUG_ON(!page_cgroup_locked(page));
283	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
284}
285
286struct page_cgroup *page_get_page_cgroup(struct page *page)
287{
288	return (struct page_cgroup *)
289		(page->page_cgroup & ~PAGE_CGROUP_LOCK);
290}
291
292static void __always_inline lock_page_cgroup(struct page *page)
293{
294	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
295	VM_BUG_ON(!page_cgroup_locked(page));
296}
297
298static void __always_inline unlock_page_cgroup(struct page *page)
299{
300	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
301}
302
303/*
304 * Clear page->page_cgroup member under lock_page_cgroup().
305 * If given "pc" value is different from one page->page_cgroup,
306 * page->cgroup is not cleared.
307 * Returns a value of page->page_cgroup at lock taken.
308 * A can can detect failure of clearing by following
309 *  clear_page_cgroup(page, pc) == pc
310 */
311
312static struct page_cgroup *clear_page_cgroup(struct page *page,
313						struct page_cgroup *pc)
314{
315	struct page_cgroup *ret;
316	/* lock and clear */
317	lock_page_cgroup(page);
318	ret = page_get_page_cgroup(page);
319	if (likely(ret == pc))
320		page_assign_page_cgroup(page, NULL);
321	unlock_page_cgroup(page);
322	return ret;
323}
324
325static void __mem_cgroup_remove_list(struct page_cgroup *pc)
326{
327	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
328	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
329
330	if (from)
331		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
332	else
333		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
334
335	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
336	list_del_init(&pc->lru);
337}
338
339static void __mem_cgroup_add_list(struct page_cgroup *pc)
340{
341	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
342	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
343
344	if (!to) {
345		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
346		list_add(&pc->lru, &mz->inactive_list);
347	} else {
348		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
349		list_add(&pc->lru, &mz->active_list);
350	}
351	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
352}
353
354static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
355{
356	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
357	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
358
359	if (from)
360		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
361	else
362		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
363
364	if (active) {
365		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
366		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
367		list_move(&pc->lru, &mz->active_list);
368	} else {
369		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
370		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
371		list_move(&pc->lru, &mz->inactive_list);
372	}
373}
374
375int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
376{
377	int ret;
378
379	task_lock(task);
380	ret = task->mm && mm_match_cgroup(task->mm, mem);
381	task_unlock(task);
382	return ret;
383}
384
385/*
386 * This routine assumes that the appropriate zone's lru lock is already held
387 */
388void mem_cgroup_move_lists(struct page *page, bool active)
389{
390	struct page_cgroup *pc;
391	struct mem_cgroup_per_zone *mz;
392	unsigned long flags;
393
394	pc = page_get_page_cgroup(page);
395	if (!pc)
396		return;
397
398	mz = page_cgroup_zoneinfo(pc);
399	spin_lock_irqsave(&mz->lru_lock, flags);
400	__mem_cgroup_move_lists(pc, active);
401	spin_unlock_irqrestore(&mz->lru_lock, flags);
402}
403
404/*
405 * Calculate mapped_ratio under memory controller. This will be used in
406 * vmscan.c for deteremining we have to reclaim mapped pages.
407 */
408int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
409{
410	long total, rss;
411
412	/*
413	 * usage is recorded in bytes. But, here, we assume the number of
414	 * physical pages can be represented by "long" on any arch.
415	 */
416	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
417	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
418	return (int)((rss * 100L) / total);
419}
420/*
421 * This function is called from vmscan.c. In page reclaiming loop. balance
422 * between active and inactive list is calculated. For memory controller
423 * page reclaiming, we should use using mem_cgroup's imbalance rather than
424 * zone's global lru imbalance.
425 */
426long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
427{
428	unsigned long active, inactive;
429	/* active and inactive are the number of pages. 'long' is ok.*/
430	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
431	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
432	return (long) (active / (inactive + 1));
433}
434
435/*
436 * prev_priority control...this will be used in memory reclaim path.
437 */
438int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
439{
440	return mem->prev_priority;
441}
442
443void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
444{
445	if (priority < mem->prev_priority)
446		mem->prev_priority = priority;
447}
448
449void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
450{
451	mem->prev_priority = priority;
452}
453
454/*
455 * Calculate # of pages to be scanned in this priority/zone.
456 * See also vmscan.c
457 *
458 * priority starts from "DEF_PRIORITY" and decremented in each loop.
459 * (see include/linux/mmzone.h)
460 */
461
462long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
463				   struct zone *zone, int priority)
464{
465	long nr_active;
466	int nid = zone->zone_pgdat->node_id;
467	int zid = zone_idx(zone);
468	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
469
470	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
471	return (nr_active >> priority);
472}
473
474long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
475					struct zone *zone, int priority)
476{
477	long nr_inactive;
478	int nid = zone->zone_pgdat->node_id;
479	int zid = zone_idx(zone);
480	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
481
482	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
483
484	return (nr_inactive >> priority);
485}
486
487unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
488					struct list_head *dst,
489					unsigned long *scanned, int order,
490					int mode, struct zone *z,
491					struct mem_cgroup *mem_cont,
492					int active)
493{
494	unsigned long nr_taken = 0;
495	struct page *page;
496	unsigned long scan;
497	LIST_HEAD(pc_list);
498	struct list_head *src;
499	struct page_cgroup *pc, *tmp;
500	int nid = z->zone_pgdat->node_id;
501	int zid = zone_idx(z);
502	struct mem_cgroup_per_zone *mz;
503
504	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
505	if (active)
506		src = &mz->active_list;
507	else
508		src = &mz->inactive_list;
509
510
511	spin_lock(&mz->lru_lock);
512	scan = 0;
513	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
514		if (scan >= nr_to_scan)
515			break;
516		page = pc->page;
517
518		if (unlikely(!PageLRU(page)))
519			continue;
520
521		if (PageActive(page) && !active) {
522			__mem_cgroup_move_lists(pc, true);
523			continue;
524		}
525		if (!PageActive(page) && active) {
526			__mem_cgroup_move_lists(pc, false);
527			continue;
528		}
529
530		scan++;
531		list_move(&pc->lru, &pc_list);
532
533		if (__isolate_lru_page(page, mode) == 0) {
534			list_move(&page->lru, dst);
535			nr_taken++;
536		}
537	}
538
539	list_splice(&pc_list, src);
540	spin_unlock(&mz->lru_lock);
541
542	*scanned = scan;
543	return nr_taken;
544}
545
546/*
547 * Charge the memory controller for page usage.
548 * Return
549 * 0 if the charge was successful
550 * < 0 if the cgroup is over its limit
551 */
552static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
553				gfp_t gfp_mask, enum charge_type ctype)
554{
555	struct mem_cgroup *mem;
556	struct page_cgroup *pc;
557	unsigned long flags;
558	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
559	struct mem_cgroup_per_zone *mz;
560
561	/*
562	 * Should page_cgroup's go to their own slab?
563	 * One could optimize the performance of the charging routine
564	 * by saving a bit in the page_flags and using it as a lock
565	 * to see if the cgroup page already has a page_cgroup associated
566	 * with it
567	 */
568retry:
569	lock_page_cgroup(page);
570	pc = page_get_page_cgroup(page);
571	/*
572	 * The page_cgroup exists and
573	 * the page has already been accounted.
574	 */
575	if (pc) {
576		if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
577			/* this page is under being uncharged ? */
578			unlock_page_cgroup(page);
579			cpu_relax();
580			goto retry;
581		} else {
582			unlock_page_cgroup(page);
583			goto done;
584		}
585	}
586	unlock_page_cgroup(page);
587
588	pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
589	if (pc == NULL)
590		goto err;
591
592	/*
593	 * We always charge the cgroup the mm_struct belongs to.
594	 * The mm_struct's mem_cgroup changes on task migration if the
595	 * thread group leader migrates. It's possible that mm is not
596	 * set, if so charge the init_mm (happens for pagecache usage).
597	 */
598	if (!mm)
599		mm = &init_mm;
600
601	rcu_read_lock();
602	mem = rcu_dereference(mm->mem_cgroup);
603	/*
604	 * For every charge from the cgroup, increment reference
605	 * count
606	 */
607	css_get(&mem->css);
608	rcu_read_unlock();
609
610	/*
611	 * If we created the page_cgroup, we should free it on exceeding
612	 * the cgroup limit.
613	 */
614	while (res_counter_charge(&mem->res, PAGE_SIZE)) {
615		if (!(gfp_mask & __GFP_WAIT))
616			goto out;
617
618		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
619			continue;
620
621		/*
622 		 * try_to_free_mem_cgroup_pages() might not give us a full
623 		 * picture of reclaim. Some pages are reclaimed and might be
624 		 * moved to swap cache or just unmapped from the cgroup.
625 		 * Check the limit again to see if the reclaim reduced the
626 		 * current usage of the cgroup before giving up
627 		 */
628		if (res_counter_check_under_limit(&mem->res))
629			continue;
630
631		if (!nr_retries--) {
632			mem_cgroup_out_of_memory(mem, gfp_mask);
633			goto out;
634		}
635		congestion_wait(WRITE, HZ/10);
636	}
637
638	atomic_set(&pc->ref_cnt, 1);
639	pc->mem_cgroup = mem;
640	pc->page = page;
641	pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
642	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
643		pc->flags |= PAGE_CGROUP_FLAG_CACHE;
644
645	lock_page_cgroup(page);
646	if (page_get_page_cgroup(page)) {
647		unlock_page_cgroup(page);
648		/*
649		 * Another charge has been added to this page already.
650		 * We take lock_page_cgroup(page) again and read
651		 * page->cgroup, increment refcnt.... just retry is OK.
652		 */
653		res_counter_uncharge(&mem->res, PAGE_SIZE);
654		css_put(&mem->css);
655		kfree(pc);
656		goto retry;
657	}
658	page_assign_page_cgroup(page, pc);
659	unlock_page_cgroup(page);
660
661	mz = page_cgroup_zoneinfo(pc);
662	spin_lock_irqsave(&mz->lru_lock, flags);
663	/* Update statistics vector */
664	__mem_cgroup_add_list(pc);
665	spin_unlock_irqrestore(&mz->lru_lock, flags);
666
667done:
668	return 0;
669out:
670	css_put(&mem->css);
671	kfree(pc);
672err:
673	return -ENOMEM;
674}
675
676int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
677			gfp_t gfp_mask)
678{
679	return mem_cgroup_charge_common(page, mm, gfp_mask,
680			MEM_CGROUP_CHARGE_TYPE_MAPPED);
681}
682
683/*
684 * See if the cached pages should be charged at all?
685 */
686int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
687				gfp_t gfp_mask)
688{
689	int ret = 0;
690	if (!mm)
691		mm = &init_mm;
692
693	ret = mem_cgroup_charge_common(page, mm, gfp_mask,
694				MEM_CGROUP_CHARGE_TYPE_CACHE);
695	return ret;
696}
697
698/*
699 * Uncharging is always a welcome operation, we never complain, simply
700 * uncharge.
701 */
702void mem_cgroup_uncharge_page(struct page *page)
703{
704	struct page_cgroup *pc;
705	struct mem_cgroup *mem;
706	struct mem_cgroup_per_zone *mz;
707	unsigned long flags;
708
709	/*
710	 * Check if our page_cgroup is valid
711	 */
712	lock_page_cgroup(page);
713	pc = page_get_page_cgroup(page);
714	if (!pc)
715		goto unlock;
716
717	if (atomic_dec_and_test(&pc->ref_cnt)) {
718		page = pc->page;
719		mz = page_cgroup_zoneinfo(pc);
720		/*
721		 * get page->cgroup and clear it under lock.
722		 * force_empty can drop page->cgroup without checking refcnt.
723		 */
724		unlock_page_cgroup(page);
725		if (clear_page_cgroup(page, pc) == pc) {
726			mem = pc->mem_cgroup;
727			css_put(&mem->css);
728			res_counter_uncharge(&mem->res, PAGE_SIZE);
729			spin_lock_irqsave(&mz->lru_lock, flags);
730			__mem_cgroup_remove_list(pc);
731			spin_unlock_irqrestore(&mz->lru_lock, flags);
732			kfree(pc);
733		}
734		lock_page_cgroup(page);
735	}
736
737unlock:
738	unlock_page_cgroup(page);
739}
740
741/*
742 * Returns non-zero if a page (under migration) has valid page_cgroup member.
743 * Refcnt of page_cgroup is incremented.
744 */
745
746int mem_cgroup_prepare_migration(struct page *page)
747{
748	struct page_cgroup *pc;
749	int ret = 0;
750	lock_page_cgroup(page);
751	pc = page_get_page_cgroup(page);
752	if (pc && atomic_inc_not_zero(&pc->ref_cnt))
753		ret = 1;
754	unlock_page_cgroup(page);
755	return ret;
756}
757
758void mem_cgroup_end_migration(struct page *page)
759{
760	mem_cgroup_uncharge_page(page);
761}
762/*
763 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
764 * And no race with uncharge() routines because page_cgroup for *page*
765 * has extra one reference by mem_cgroup_prepare_migration.
766 */
767
768void mem_cgroup_page_migration(struct page *page, struct page *newpage)
769{
770	struct page_cgroup *pc;
771	struct mem_cgroup *mem;
772	unsigned long flags;
773	struct mem_cgroup_per_zone *mz;
774retry:
775	pc = page_get_page_cgroup(page);
776	if (!pc)
777		return;
778	mem = pc->mem_cgroup;
779	mz = page_cgroup_zoneinfo(pc);
780	if (clear_page_cgroup(page, pc) != pc)
781		goto retry;
782	spin_lock_irqsave(&mz->lru_lock, flags);
783
784	__mem_cgroup_remove_list(pc);
785	spin_unlock_irqrestore(&mz->lru_lock, flags);
786
787	pc->page = newpage;
788	lock_page_cgroup(newpage);
789	page_assign_page_cgroup(newpage, pc);
790	unlock_page_cgroup(newpage);
791
792	mz = page_cgroup_zoneinfo(pc);
793	spin_lock_irqsave(&mz->lru_lock, flags);
794	__mem_cgroup_add_list(pc);
795	spin_unlock_irqrestore(&mz->lru_lock, flags);
796	return;
797}
798
799/*
800 * This routine traverse page_cgroup in given list and drop them all.
801 * This routine ignores page_cgroup->ref_cnt.
802 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
803 */
804#define FORCE_UNCHARGE_BATCH	(128)
805static void
806mem_cgroup_force_empty_list(struct mem_cgroup *mem,
807			    struct mem_cgroup_per_zone *mz,
808			    int active)
809{
810	struct page_cgroup *pc;
811	struct page *page;
812	int count;
813	unsigned long flags;
814	struct list_head *list;
815
816	if (active)
817		list = &mz->active_list;
818	else
819		list = &mz->inactive_list;
820
821	if (list_empty(list))
822		return;
823retry:
824	count = FORCE_UNCHARGE_BATCH;
825	spin_lock_irqsave(&mz->lru_lock, flags);
826
827	while (--count && !list_empty(list)) {
828		pc = list_entry(list->prev, struct page_cgroup, lru);
829		page = pc->page;
830		/* Avoid race with charge */
831		atomic_set(&pc->ref_cnt, 0);
832		if (clear_page_cgroup(page, pc) == pc) {
833			css_put(&mem->css);
834			res_counter_uncharge(&mem->res, PAGE_SIZE);
835			__mem_cgroup_remove_list(pc);
836			kfree(pc);
837		} else 	/* being uncharged ? ...do relax */
838			break;
839	}
840	spin_unlock_irqrestore(&mz->lru_lock, flags);
841	if (!list_empty(list)) {
842		cond_resched();
843		goto retry;
844	}
845	return;
846}
847
848/*
849 * make mem_cgroup's charge to be 0 if there is no task.
850 * This enables deleting this mem_cgroup.
851 */
852
853int mem_cgroup_force_empty(struct mem_cgroup *mem)
854{
855	int ret = -EBUSY;
856	int node, zid;
857	css_get(&mem->css);
858	/*
859	 * page reclaim code (kswapd etc..) will move pages between
860`	 * active_list <-> inactive_list while we don't take a lock.
861	 * So, we have to do loop here until all lists are empty.
862	 */
863	while (mem->res.usage > 0) {
864		if (atomic_read(&mem->css.cgroup->count) > 0)
865			goto out;
866		for_each_node_state(node, N_POSSIBLE)
867			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
868				struct mem_cgroup_per_zone *mz;
869				mz = mem_cgroup_zoneinfo(mem, node, zid);
870				/* drop all page_cgroup in active_list */
871				mem_cgroup_force_empty_list(mem, mz, 1);
872				/* drop all page_cgroup in inactive_list */
873				mem_cgroup_force_empty_list(mem, mz, 0);
874			}
875	}
876	ret = 0;
877out:
878	css_put(&mem->css);
879	return ret;
880}
881
882
883
884int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
885{
886	*tmp = memparse(buf, &buf);
887	if (*buf != '\0')
888		return -EINVAL;
889
890	/*
891	 * Round up the value to the closest page size
892	 */
893	*tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
894	return 0;
895}
896
897static ssize_t mem_cgroup_read(struct cgroup *cont,
898			struct cftype *cft, struct file *file,
899			char __user *userbuf, size_t nbytes, loff_t *ppos)
900{
901	return res_counter_read(&mem_cgroup_from_cont(cont)->res,
902				cft->private, userbuf, nbytes, ppos,
903				NULL);
904}
905
906static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
907				struct file *file, const char __user *userbuf,
908				size_t nbytes, loff_t *ppos)
909{
910	return res_counter_write(&mem_cgroup_from_cont(cont)->res,
911				cft->private, userbuf, nbytes, ppos,
912				mem_cgroup_write_strategy);
913}
914
915static ssize_t mem_force_empty_write(struct cgroup *cont,
916				struct cftype *cft, struct file *file,
917				const char __user *userbuf,
918				size_t nbytes, loff_t *ppos)
919{
920	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
921	int ret;
922	ret = mem_cgroup_force_empty(mem);
923	if (!ret)
924		ret = nbytes;
925	return ret;
926}
927
928/*
929 * Note: This should be removed if cgroup supports write-only file.
930 */
931
932static ssize_t mem_force_empty_read(struct cgroup *cont,
933				struct cftype *cft,
934				struct file *file, char __user *userbuf,
935				size_t nbytes, loff_t *ppos)
936{
937	return -EINVAL;
938}
939
940
941static const struct mem_cgroup_stat_desc {
942	const char *msg;
943	u64 unit;
944} mem_cgroup_stat_desc[] = {
945	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
946	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
947};
948
949static int mem_control_stat_show(struct seq_file *m, void *arg)
950{
951	struct cgroup *cont = m->private;
952	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
953	struct mem_cgroup_stat *stat = &mem_cont->stat;
954	int i;
955
956	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
957		s64 val;
958
959		val = mem_cgroup_read_stat(stat, i);
960		val *= mem_cgroup_stat_desc[i].unit;
961		seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
962				(long long)val);
963	}
964	/* showing # of active pages */
965	{
966		unsigned long active, inactive;
967
968		inactive = mem_cgroup_get_all_zonestat(mem_cont,
969						MEM_CGROUP_ZSTAT_INACTIVE);
970		active = mem_cgroup_get_all_zonestat(mem_cont,
971						MEM_CGROUP_ZSTAT_ACTIVE);
972		seq_printf(m, "active %ld\n", (active) * PAGE_SIZE);
973		seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE);
974	}
975	return 0;
976}
977
978static const struct file_operations mem_control_stat_file_operations = {
979	.read = seq_read,
980	.llseek = seq_lseek,
981	.release = single_release,
982};
983
984static int mem_control_stat_open(struct inode *unused, struct file *file)
985{
986	/* XXX __d_cont */
987	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
988
989	file->f_op = &mem_control_stat_file_operations;
990	return single_open(file, mem_control_stat_show, cont);
991}
992
993
994
995static struct cftype mem_cgroup_files[] = {
996	{
997		.name = "usage_in_bytes",
998		.private = RES_USAGE,
999		.read = mem_cgroup_read,
1000	},
1001	{
1002		.name = "limit_in_bytes",
1003		.private = RES_LIMIT,
1004		.write = mem_cgroup_write,
1005		.read = mem_cgroup_read,
1006	},
1007	{
1008		.name = "failcnt",
1009		.private = RES_FAILCNT,
1010		.read = mem_cgroup_read,
1011	},
1012	{
1013		.name = "force_empty",
1014		.write = mem_force_empty_write,
1015		.read = mem_force_empty_read,
1016	},
1017	{
1018		.name = "stat",
1019		.open = mem_control_stat_open,
1020	},
1021};
1022
1023static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1024{
1025	struct mem_cgroup_per_node *pn;
1026	struct mem_cgroup_per_zone *mz;
1027	int zone;
1028	/*
1029	 * This routine is called against possible nodes.
1030	 * But it's BUG to call kmalloc() against offline node.
1031	 *
1032	 * TODO: this routine can waste much memory for nodes which will
1033	 *       never be onlined. It's better to use memory hotplug callback
1034	 *       function.
1035	 */
1036	if (node_state(node, N_HIGH_MEMORY))
1037		pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
1038	else
1039		pn = kmalloc(sizeof(*pn), GFP_KERNEL);
1040	if (!pn)
1041		return 1;
1042
1043	mem->info.nodeinfo[node] = pn;
1044	memset(pn, 0, sizeof(*pn));
1045
1046	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1047		mz = &pn->zoneinfo[zone];
1048		INIT_LIST_HEAD(&mz->active_list);
1049		INIT_LIST_HEAD(&mz->inactive_list);
1050		spin_lock_init(&mz->lru_lock);
1051	}
1052	return 0;
1053}
1054
1055static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1056{
1057	kfree(mem->info.nodeinfo[node]);
1058}
1059
1060
1061static struct mem_cgroup init_mem_cgroup;
1062
1063static struct cgroup_subsys_state *
1064mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1065{
1066	struct mem_cgroup *mem;
1067	int node;
1068
1069	if (unlikely((cont->parent) == NULL)) {
1070		mem = &init_mem_cgroup;
1071		init_mm.mem_cgroup = mem;
1072	} else
1073		mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
1074
1075	if (mem == NULL)
1076		return ERR_PTR(-ENOMEM);
1077
1078	res_counter_init(&mem->res);
1079
1080	memset(&mem->info, 0, sizeof(mem->info));
1081
1082	for_each_node_state(node, N_POSSIBLE)
1083		if (alloc_mem_cgroup_per_zone_info(mem, node))
1084			goto free_out;
1085
1086	return &mem->css;
1087free_out:
1088	for_each_node_state(node, N_POSSIBLE)
1089		free_mem_cgroup_per_zone_info(mem, node);
1090	if (cont->parent != NULL)
1091		kfree(mem);
1092	return ERR_PTR(-ENOMEM);
1093}
1094
1095static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1096					struct cgroup *cont)
1097{
1098	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1099	mem_cgroup_force_empty(mem);
1100}
1101
1102static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1103				struct cgroup *cont)
1104{
1105	int node;
1106	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1107
1108	for_each_node_state(node, N_POSSIBLE)
1109		free_mem_cgroup_per_zone_info(mem, node);
1110
1111	kfree(mem_cgroup_from_cont(cont));
1112}
1113
1114static int mem_cgroup_populate(struct cgroup_subsys *ss,
1115				struct cgroup *cont)
1116{
1117	return cgroup_add_files(cont, ss, mem_cgroup_files,
1118					ARRAY_SIZE(mem_cgroup_files));
1119}
1120
1121static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1122				struct cgroup *cont,
1123				struct cgroup *old_cont,
1124				struct task_struct *p)
1125{
1126	struct mm_struct *mm;
1127	struct mem_cgroup *mem, *old_mem;
1128
1129	mm = get_task_mm(p);
1130	if (mm == NULL)
1131		return;
1132
1133	mem = mem_cgroup_from_cont(cont);
1134	old_mem = mem_cgroup_from_cont(old_cont);
1135
1136	if (mem == old_mem)
1137		goto out;
1138
1139	/*
1140	 * Only thread group leaders are allowed to migrate, the mm_struct is
1141	 * in effect owned by the leader
1142	 */
1143	if (p->tgid != p->pid)
1144		goto out;
1145
1146	css_get(&mem->css);
1147	rcu_assign_pointer(mm->mem_cgroup, mem);
1148	css_put(&old_mem->css);
1149
1150out:
1151	mmput(mm);
1152	return;
1153}
1154
1155struct cgroup_subsys mem_cgroup_subsys = {
1156	.name = "memory",
1157	.subsys_id = mem_cgroup_subsys_id,
1158	.create = mem_cgroup_create,
1159	.pre_destroy = mem_cgroup_pre_destroy,
1160	.destroy = mem_cgroup_destroy,
1161	.populate = mem_cgroup_populate,
1162	.attach = mem_cgroup_move_task,
1163	.early_init = 0,
1164};
1165