vmstat.c revision 0d6617c7732c083659566117ca620eda6f1a87af
1/*
2 *  linux/mm/vmstat.c
3 *
4 *  Manages VM statistics
5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6 *
7 *  zoned VM statistics
8 *  Copyright (C) 2006 Silicon Graphics, Inc.,
9 *		Christoph Lameter <christoph@lameter.com>
10 */
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/cpu.h>
17#include <linux/vmstat.h>
18#include <linux/sched.h>
19#include <linux/math64.h>
20#include <linux/writeback.h>
21#include <linux/compaction.h>
22
23#ifdef CONFIG_VM_EVENT_COUNTERS
24DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
25EXPORT_PER_CPU_SYMBOL(vm_event_states);
26
27static void sum_vm_events(unsigned long *ret)
28{
29	int cpu;
30	int i;
31
32	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
33
34	for_each_online_cpu(cpu) {
35		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
36
37		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
38			ret[i] += this->event[i];
39	}
40}
41
42/*
43 * Accumulate the vm event counters across all CPUs.
44 * The result is unavoidably approximate - it can change
45 * during and after execution of this function.
46*/
47void all_vm_events(unsigned long *ret)
48{
49	get_online_cpus();
50	sum_vm_events(ret);
51	put_online_cpus();
52}
53EXPORT_SYMBOL_GPL(all_vm_events);
54
55#ifdef CONFIG_HOTPLUG
56/*
57 * Fold the foreign cpu events into our own.
58 *
59 * This is adding to the events on one processor
60 * but keeps the global counts constant.
61 */
62void vm_events_fold_cpu(int cpu)
63{
64	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
65	int i;
66
67	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
68		count_vm_events(i, fold_state->event[i]);
69		fold_state->event[i] = 0;
70	}
71}
72#endif /* CONFIG_HOTPLUG */
73
74#endif /* CONFIG_VM_EVENT_COUNTERS */
75
76/*
77 * Manage combined zone based / global counters
78 *
79 * vm_stat contains the global counters
80 */
81atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82EXPORT_SYMBOL(vm_stat);
83
84#ifdef CONFIG_SMP
85
86int calculate_pressure_threshold(struct zone *zone)
87{
88	int threshold;
89	int watermark_distance;
90
91	/*
92	 * As vmstats are not up to date, there is drift between the estimated
93	 * and real values. For high thresholds and a high number of CPUs, it
94	 * is possible for the min watermark to be breached while the estimated
95	 * value looks fine. The pressure threshold is a reduced value such
96	 * that even the maximum amount of drift will not accidentally breach
97	 * the min watermark
98	 */
99	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
100	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
101
102	/*
103	 * Maximum threshold is 125
104	 */
105	threshold = min(125, threshold);
106
107	return threshold;
108}
109
110int calculate_normal_threshold(struct zone *zone)
111{
112	int threshold;
113	int mem;	/* memory in 128 MB units */
114
115	/*
116	 * The threshold scales with the number of processors and the amount
117	 * of memory per zone. More memory means that we can defer updates for
118	 * longer, more processors could lead to more contention.
119 	 * fls() is used to have a cheap way of logarithmic scaling.
120	 *
121	 * Some sample thresholds:
122	 *
123	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
124	 * ------------------------------------------------------------------
125	 * 8		1		1	0.9-1 GB	4
126	 * 16		2		2	0.9-1 GB	4
127	 * 20 		2		2	1-2 GB		5
128	 * 24		2		2	2-4 GB		6
129	 * 28		2		2	4-8 GB		7
130	 * 32		2		2	8-16 GB		8
131	 * 4		2		2	<128M		1
132	 * 30		4		3	2-4 GB		5
133	 * 48		4		3	8-16 GB		8
134	 * 32		8		4	1-2 GB		4
135	 * 32		8		4	0.9-1GB		4
136	 * 10		16		5	<128M		1
137	 * 40		16		5	900M		4
138	 * 70		64		7	2-4 GB		5
139	 * 84		64		7	4-8 GB		6
140	 * 108		512		9	4-8 GB		6
141	 * 125		1024		10	8-16 GB		8
142	 * 125		1024		10	16-32 GB	9
143	 */
144
145	mem = zone->present_pages >> (27 - PAGE_SHIFT);
146
147	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
148
149	/*
150	 * Maximum threshold is 125
151	 */
152	threshold = min(125, threshold);
153
154	return threshold;
155}
156
157/*
158 * Refresh the thresholds for each zone.
159 */
160void refresh_zone_stat_thresholds(void)
161{
162	struct zone *zone;
163	int cpu;
164	int threshold;
165
166	for_each_populated_zone(zone) {
167		unsigned long max_drift, tolerate_drift;
168
169		threshold = calculate_normal_threshold(zone);
170
171		for_each_online_cpu(cpu)
172			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
173							= threshold;
174
175		/*
176		 * Only set percpu_drift_mark if there is a danger that
177		 * NR_FREE_PAGES reports the low watermark is ok when in fact
178		 * the min watermark could be breached by an allocation
179		 */
180		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
181		max_drift = num_online_cpus() * threshold;
182		if (max_drift > tolerate_drift)
183			zone->percpu_drift_mark = high_wmark_pages(zone) +
184					max_drift;
185	}
186}
187
188void set_pgdat_percpu_threshold(pg_data_t *pgdat,
189				int (*calculate_pressure)(struct zone *))
190{
191	struct zone *zone;
192	int cpu;
193	int threshold;
194	int i;
195
196	for (i = 0; i < pgdat->nr_zones; i++) {
197		zone = &pgdat->node_zones[i];
198		if (!zone->percpu_drift_mark)
199			continue;
200
201		threshold = (*calculate_pressure)(zone);
202		for_each_possible_cpu(cpu)
203			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
204							= threshold;
205	}
206}
207
208/*
209 * For use when we know that interrupts are disabled.
210 */
211void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
212				int delta)
213{
214	struct per_cpu_pageset __percpu *pcp = zone->pageset;
215	s8 __percpu *p = pcp->vm_stat_diff + item;
216	long x;
217	long t;
218
219	x = delta + __this_cpu_read(*p);
220
221	t = __this_cpu_read(pcp->stat_threshold);
222
223	if (unlikely(x > t || x < -t)) {
224		zone_page_state_add(x, zone, item);
225		x = 0;
226	}
227	__this_cpu_write(*p, x);
228}
229EXPORT_SYMBOL(__mod_zone_page_state);
230
231/*
232 * Optimized increment and decrement functions.
233 *
234 * These are only for a single page and therefore can take a struct page *
235 * argument instead of struct zone *. This allows the inclusion of the code
236 * generated for page_zone(page) into the optimized functions.
237 *
238 * No overflow check is necessary and therefore the differential can be
239 * incremented or decremented in place which may allow the compilers to
240 * generate better code.
241 * The increment or decrement is known and therefore one boundary check can
242 * be omitted.
243 *
244 * NOTE: These functions are very performance sensitive. Change only
245 * with care.
246 *
247 * Some processors have inc/dec instructions that are atomic vs an interrupt.
248 * However, the code must first determine the differential location in a zone
249 * based on the processor number and then inc/dec the counter. There is no
250 * guarantee without disabling preemption that the processor will not change
251 * in between and therefore the atomicity vs. interrupt cannot be exploited
252 * in a useful way here.
253 */
254void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
255{
256	struct per_cpu_pageset __percpu *pcp = zone->pageset;
257	s8 __percpu *p = pcp->vm_stat_diff + item;
258	s8 v, t;
259
260	v = __this_cpu_inc_return(*p);
261	t = __this_cpu_read(pcp->stat_threshold);
262	if (unlikely(v > t)) {
263		s8 overstep = t >> 1;
264
265		zone_page_state_add(v + overstep, zone, item);
266		__this_cpu_write(*p, -overstep);
267	}
268}
269
270void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
271{
272	__inc_zone_state(page_zone(page), item);
273}
274EXPORT_SYMBOL(__inc_zone_page_state);
275
276void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
277{
278	struct per_cpu_pageset __percpu *pcp = zone->pageset;
279	s8 __percpu *p = pcp->vm_stat_diff + item;
280	s8 v, t;
281
282	v = __this_cpu_dec_return(*p);
283	t = __this_cpu_read(pcp->stat_threshold);
284	if (unlikely(v < - t)) {
285		s8 overstep = t >> 1;
286
287		zone_page_state_add(v - overstep, zone, item);
288		__this_cpu_write(*p, overstep);
289	}
290}
291
292void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
293{
294	__dec_zone_state(page_zone(page), item);
295}
296EXPORT_SYMBOL(__dec_zone_page_state);
297
298#ifdef CONFIG_CMPXCHG_LOCAL
299/*
300 * If we have cmpxchg_local support then we do not need to incur the overhead
301 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
302 *
303 * mod_state() modifies the zone counter state through atomic per cpu
304 * operations.
305 *
306 * Overstep mode specifies how overstep should handled:
307 *     0       No overstepping
308 *     1       Overstepping half of threshold
309 *     -1      Overstepping minus half of threshold
310*/
311static inline void mod_state(struct zone *zone,
312       enum zone_stat_item item, int delta, int overstep_mode)
313{
314	struct per_cpu_pageset __percpu *pcp = zone->pageset;
315	s8 __percpu *p = pcp->vm_stat_diff + item;
316	long o, n, t, z;
317
318	do {
319		z = 0;  /* overflow to zone counters */
320
321		/*
322		 * The fetching of the stat_threshold is racy. We may apply
323		 * a counter threshold to the wrong the cpu if we get
324		 * rescheduled while executing here. However, the next
325		 * counter update will apply the threshold again and
326		 * therefore bring the counter under the threshold again.
327		 *
328		 * Most of the time the thresholds are the same anyways
329		 * for all cpus in a zone.
330		 */
331		t = this_cpu_read(pcp->stat_threshold);
332
333		o = this_cpu_read(*p);
334		n = delta + o;
335
336		if (n > t || n < -t) {
337			int os = overstep_mode * (t >> 1) ;
338
339			/* Overflow must be added to zone counters */
340			z = n + os;
341			n = -os;
342		}
343	} while (this_cpu_cmpxchg(*p, o, n) != o);
344
345	if (z)
346		zone_page_state_add(z, zone, item);
347}
348
349void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
350					int delta)
351{
352	mod_state(zone, item, delta, 0);
353}
354EXPORT_SYMBOL(mod_zone_page_state);
355
356void inc_zone_state(struct zone *zone, enum zone_stat_item item)
357{
358	mod_state(zone, item, 1, 1);
359}
360
361void inc_zone_page_state(struct page *page, enum zone_stat_item item)
362{
363	mod_state(page_zone(page), item, 1, 1);
364}
365EXPORT_SYMBOL(inc_zone_page_state);
366
367void dec_zone_page_state(struct page *page, enum zone_stat_item item)
368{
369	mod_state(page_zone(page), item, -1, -1);
370}
371EXPORT_SYMBOL(dec_zone_page_state);
372#else
373/*
374 * Use interrupt disable to serialize counter updates
375 */
376void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
377					int delta)
378{
379	unsigned long flags;
380
381	local_irq_save(flags);
382	__mod_zone_page_state(zone, item, delta);
383	local_irq_restore(flags);
384}
385EXPORT_SYMBOL(mod_zone_page_state);
386
387void inc_zone_state(struct zone *zone, enum zone_stat_item item)
388{
389	unsigned long flags;
390
391	local_irq_save(flags);
392	__inc_zone_state(zone, item);
393	local_irq_restore(flags);
394}
395
396void inc_zone_page_state(struct page *page, enum zone_stat_item item)
397{
398	unsigned long flags;
399	struct zone *zone;
400
401	zone = page_zone(page);
402	local_irq_save(flags);
403	__inc_zone_state(zone, item);
404	local_irq_restore(flags);
405}
406EXPORT_SYMBOL(inc_zone_page_state);
407
408void dec_zone_page_state(struct page *page, enum zone_stat_item item)
409{
410	unsigned long flags;
411
412	local_irq_save(flags);
413	__dec_zone_page_state(page, item);
414	local_irq_restore(flags);
415}
416EXPORT_SYMBOL(dec_zone_page_state);
417#endif
418
419/*
420 * Update the zone counters for one cpu.
421 *
422 * The cpu specified must be either the current cpu or a processor that
423 * is not online. If it is the current cpu then the execution thread must
424 * be pinned to the current cpu.
425 *
426 * Note that refresh_cpu_vm_stats strives to only access
427 * node local memory. The per cpu pagesets on remote zones are placed
428 * in the memory local to the processor using that pageset. So the
429 * loop over all zones will access a series of cachelines local to
430 * the processor.
431 *
432 * The call to zone_page_state_add updates the cachelines with the
433 * statistics in the remote zone struct as well as the global cachelines
434 * with the global counters. These could cause remote node cache line
435 * bouncing and will have to be only done when necessary.
436 */
437void refresh_cpu_vm_stats(int cpu)
438{
439	struct zone *zone;
440	int i;
441	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
442
443	for_each_populated_zone(zone) {
444		struct per_cpu_pageset *p;
445
446		p = per_cpu_ptr(zone->pageset, cpu);
447
448		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
449			if (p->vm_stat_diff[i]) {
450				unsigned long flags;
451				int v;
452
453				local_irq_save(flags);
454				v = p->vm_stat_diff[i];
455				p->vm_stat_diff[i] = 0;
456				local_irq_restore(flags);
457				atomic_long_add(v, &zone->vm_stat[i]);
458				global_diff[i] += v;
459#ifdef CONFIG_NUMA
460				/* 3 seconds idle till flush */
461				p->expire = 3;
462#endif
463			}
464		cond_resched();
465#ifdef CONFIG_NUMA
466		/*
467		 * Deal with draining the remote pageset of this
468		 * processor
469		 *
470		 * Check if there are pages remaining in this pageset
471		 * if not then there is nothing to expire.
472		 */
473		if (!p->expire || !p->pcp.count)
474			continue;
475
476		/*
477		 * We never drain zones local to this processor.
478		 */
479		if (zone_to_nid(zone) == numa_node_id()) {
480			p->expire = 0;
481			continue;
482		}
483
484		p->expire--;
485		if (p->expire)
486			continue;
487
488		if (p->pcp.count)
489			drain_zone_pages(zone, &p->pcp);
490#endif
491	}
492
493	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
494		if (global_diff[i])
495			atomic_long_add(global_diff[i], &vm_stat[i]);
496}
497
498#endif
499
500#ifdef CONFIG_NUMA
501/*
502 * zonelist = the list of zones passed to the allocator
503 * z 	    = the zone from which the allocation occurred.
504 *
505 * Must be called with interrupts disabled.
506 *
507 * When __GFP_OTHER_NODE is set assume the node of the preferred
508 * zone is the local node. This is useful for daemons who allocate
509 * memory on behalf of other processes.
510 */
511void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
512{
513	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
514		__inc_zone_state(z, NUMA_HIT);
515	} else {
516		__inc_zone_state(z, NUMA_MISS);
517		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
518	}
519	if (z->node == ((flags & __GFP_OTHER_NODE) ?
520			preferred_zone->node : numa_node_id()))
521		__inc_zone_state(z, NUMA_LOCAL);
522	else
523		__inc_zone_state(z, NUMA_OTHER);
524}
525#endif
526
527#ifdef CONFIG_COMPACTION
528
529struct contig_page_info {
530	unsigned long free_pages;
531	unsigned long free_blocks_total;
532	unsigned long free_blocks_suitable;
533};
534
535/*
536 * Calculate the number of free pages in a zone, how many contiguous
537 * pages are free and how many are large enough to satisfy an allocation of
538 * the target size. Note that this function makes no attempt to estimate
539 * how many suitable free blocks there *might* be if MOVABLE pages were
540 * migrated. Calculating that is possible, but expensive and can be
541 * figured out from userspace
542 */
543static void fill_contig_page_info(struct zone *zone,
544				unsigned int suitable_order,
545				struct contig_page_info *info)
546{
547	unsigned int order;
548
549	info->free_pages = 0;
550	info->free_blocks_total = 0;
551	info->free_blocks_suitable = 0;
552
553	for (order = 0; order < MAX_ORDER; order++) {
554		unsigned long blocks;
555
556		/* Count number of free blocks */
557		blocks = zone->free_area[order].nr_free;
558		info->free_blocks_total += blocks;
559
560		/* Count free base pages */
561		info->free_pages += blocks << order;
562
563		/* Count the suitable free blocks */
564		if (order >= suitable_order)
565			info->free_blocks_suitable += blocks <<
566						(order - suitable_order);
567	}
568}
569
570/*
571 * A fragmentation index only makes sense if an allocation of a requested
572 * size would fail. If that is true, the fragmentation index indicates
573 * whether external fragmentation or a lack of memory was the problem.
574 * The value can be used to determine if page reclaim or compaction
575 * should be used
576 */
577static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
578{
579	unsigned long requested = 1UL << order;
580
581	if (!info->free_blocks_total)
582		return 0;
583
584	/* Fragmentation index only makes sense when a request would fail */
585	if (info->free_blocks_suitable)
586		return -1000;
587
588	/*
589	 * Index is between 0 and 1 so return within 3 decimal places
590	 *
591	 * 0 => allocation would fail due to lack of memory
592	 * 1 => allocation would fail due to fragmentation
593	 */
594	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
595}
596
597/* Same as __fragmentation index but allocs contig_page_info on stack */
598int fragmentation_index(struct zone *zone, unsigned int order)
599{
600	struct contig_page_info info;
601
602	fill_contig_page_info(zone, order, &info);
603	return __fragmentation_index(order, &info);
604}
605#endif
606
607#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
608#include <linux/proc_fs.h>
609#include <linux/seq_file.h>
610
611static char * const migratetype_names[MIGRATE_TYPES] = {
612	"Unmovable",
613	"Reclaimable",
614	"Movable",
615	"Reserve",
616	"Isolate",
617};
618
619static void *frag_start(struct seq_file *m, loff_t *pos)
620{
621	pg_data_t *pgdat;
622	loff_t node = *pos;
623	for (pgdat = first_online_pgdat();
624	     pgdat && node;
625	     pgdat = next_online_pgdat(pgdat))
626		--node;
627
628	return pgdat;
629}
630
631static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
632{
633	pg_data_t *pgdat = (pg_data_t *)arg;
634
635	(*pos)++;
636	return next_online_pgdat(pgdat);
637}
638
639static void frag_stop(struct seq_file *m, void *arg)
640{
641}
642
643/* Walk all the zones in a node and print using a callback */
644static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
645		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
646{
647	struct zone *zone;
648	struct zone *node_zones = pgdat->node_zones;
649	unsigned long flags;
650
651	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
652		if (!populated_zone(zone))
653			continue;
654
655		spin_lock_irqsave(&zone->lock, flags);
656		print(m, pgdat, zone);
657		spin_unlock_irqrestore(&zone->lock, flags);
658	}
659}
660#endif
661
662#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
663#ifdef CONFIG_ZONE_DMA
664#define TEXT_FOR_DMA(xx) xx "_dma",
665#else
666#define TEXT_FOR_DMA(xx)
667#endif
668
669#ifdef CONFIG_ZONE_DMA32
670#define TEXT_FOR_DMA32(xx) xx "_dma32",
671#else
672#define TEXT_FOR_DMA32(xx)
673#endif
674
675#ifdef CONFIG_HIGHMEM
676#define TEXT_FOR_HIGHMEM(xx) xx "_high",
677#else
678#define TEXT_FOR_HIGHMEM(xx)
679#endif
680
681#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
682					TEXT_FOR_HIGHMEM(xx) xx "_movable",
683
684const char * const vmstat_text[] = {
685	/* Zoned VM counters */
686	"nr_free_pages",
687	"nr_inactive_anon",
688	"nr_active_anon",
689	"nr_inactive_file",
690	"nr_active_file",
691	"nr_unevictable",
692	"nr_mlock",
693	"nr_anon_pages",
694	"nr_mapped",
695	"nr_file_pages",
696	"nr_dirty",
697	"nr_writeback",
698	"nr_slab_reclaimable",
699	"nr_slab_unreclaimable",
700	"nr_page_table_pages",
701	"nr_kernel_stack",
702	"nr_unstable",
703	"nr_bounce",
704	"nr_vmscan_write",
705	"nr_writeback_temp",
706	"nr_isolated_anon",
707	"nr_isolated_file",
708	"nr_shmem",
709	"nr_dirtied",
710	"nr_written",
711
712#ifdef CONFIG_NUMA
713	"numa_hit",
714	"numa_miss",
715	"numa_foreign",
716	"numa_interleave",
717	"numa_local",
718	"numa_other",
719#endif
720	"nr_anon_transparent_hugepages",
721	"nr_dirty_threshold",
722	"nr_dirty_background_threshold",
723
724#ifdef CONFIG_VM_EVENT_COUNTERS
725	"pgpgin",
726	"pgpgout",
727	"pswpin",
728	"pswpout",
729
730	TEXTS_FOR_ZONES("pgalloc")
731
732	"pgfree",
733	"pgactivate",
734	"pgdeactivate",
735
736	"pgfault",
737	"pgmajfault",
738
739	TEXTS_FOR_ZONES("pgrefill")
740	TEXTS_FOR_ZONES("pgsteal")
741	TEXTS_FOR_ZONES("pgscan_kswapd")
742	TEXTS_FOR_ZONES("pgscan_direct")
743
744#ifdef CONFIG_NUMA
745	"zone_reclaim_failed",
746#endif
747	"pginodesteal",
748	"slabs_scanned",
749	"kswapd_steal",
750	"kswapd_inodesteal",
751	"kswapd_low_wmark_hit_quickly",
752	"kswapd_high_wmark_hit_quickly",
753	"kswapd_skip_congestion_wait",
754	"pageoutrun",
755	"allocstall",
756
757	"pgrotated",
758
759#ifdef CONFIG_COMPACTION
760	"compact_blocks_moved",
761	"compact_pages_moved",
762	"compact_pagemigrate_failed",
763	"compact_stall",
764	"compact_fail",
765	"compact_success",
766#endif
767
768#ifdef CONFIG_HUGETLB_PAGE
769	"htlb_buddy_alloc_success",
770	"htlb_buddy_alloc_fail",
771#endif
772	"unevictable_pgs_culled",
773	"unevictable_pgs_scanned",
774	"unevictable_pgs_rescued",
775	"unevictable_pgs_mlocked",
776	"unevictable_pgs_munlocked",
777	"unevictable_pgs_cleared",
778	"unevictable_pgs_stranded",
779	"unevictable_pgs_mlockfreed",
780
781#ifdef CONFIG_TRANSPARENT_HUGEPAGE
782	"thp_fault_alloc",
783	"thp_fault_fallback",
784	"thp_collapse_alloc",
785	"thp_collapse_alloc_failed",
786	"thp_split",
787#endif
788
789#endif /* CONFIG_VM_EVENTS_COUNTERS */
790};
791#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
792
793
794#ifdef CONFIG_PROC_FS
795static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
796						struct zone *zone)
797{
798	int order;
799
800	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
801	for (order = 0; order < MAX_ORDER; ++order)
802		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
803	seq_putc(m, '\n');
804}
805
806/*
807 * This walks the free areas for each zone.
808 */
809static int frag_show(struct seq_file *m, void *arg)
810{
811	pg_data_t *pgdat = (pg_data_t *)arg;
812	walk_zones_in_node(m, pgdat, frag_show_print);
813	return 0;
814}
815
816static void pagetypeinfo_showfree_print(struct seq_file *m,
817					pg_data_t *pgdat, struct zone *zone)
818{
819	int order, mtype;
820
821	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
822		seq_printf(m, "Node %4d, zone %8s, type %12s ",
823					pgdat->node_id,
824					zone->name,
825					migratetype_names[mtype]);
826		for (order = 0; order < MAX_ORDER; ++order) {
827			unsigned long freecount = 0;
828			struct free_area *area;
829			struct list_head *curr;
830
831			area = &(zone->free_area[order]);
832
833			list_for_each(curr, &area->free_list[mtype])
834				freecount++;
835			seq_printf(m, "%6lu ", freecount);
836		}
837		seq_putc(m, '\n');
838	}
839}
840
841/* Print out the free pages at each order for each migatetype */
842static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
843{
844	int order;
845	pg_data_t *pgdat = (pg_data_t *)arg;
846
847	/* Print header */
848	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
849	for (order = 0; order < MAX_ORDER; ++order)
850		seq_printf(m, "%6d ", order);
851	seq_putc(m, '\n');
852
853	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
854
855	return 0;
856}
857
858static void pagetypeinfo_showblockcount_print(struct seq_file *m,
859					pg_data_t *pgdat, struct zone *zone)
860{
861	int mtype;
862	unsigned long pfn;
863	unsigned long start_pfn = zone->zone_start_pfn;
864	unsigned long end_pfn = start_pfn + zone->spanned_pages;
865	unsigned long count[MIGRATE_TYPES] = { 0, };
866
867	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
868		struct page *page;
869
870		if (!pfn_valid(pfn))
871			continue;
872
873		page = pfn_to_page(pfn);
874
875		/* Watch for unexpected holes punched in the memmap */
876		if (!memmap_valid_within(pfn, page, zone))
877			continue;
878
879		mtype = get_pageblock_migratetype(page);
880
881		if (mtype < MIGRATE_TYPES)
882			count[mtype]++;
883	}
884
885	/* Print counts */
886	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
887	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
888		seq_printf(m, "%12lu ", count[mtype]);
889	seq_putc(m, '\n');
890}
891
892/* Print out the free pages at each order for each migratetype */
893static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
894{
895	int mtype;
896	pg_data_t *pgdat = (pg_data_t *)arg;
897
898	seq_printf(m, "\n%-23s", "Number of blocks type ");
899	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
900		seq_printf(m, "%12s ", migratetype_names[mtype]);
901	seq_putc(m, '\n');
902	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
903
904	return 0;
905}
906
907/*
908 * This prints out statistics in relation to grouping pages by mobility.
909 * It is expensive to collect so do not constantly read the file.
910 */
911static int pagetypeinfo_show(struct seq_file *m, void *arg)
912{
913	pg_data_t *pgdat = (pg_data_t *)arg;
914
915	/* check memoryless node */
916	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
917		return 0;
918
919	seq_printf(m, "Page block order: %d\n", pageblock_order);
920	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
921	seq_putc(m, '\n');
922	pagetypeinfo_showfree(m, pgdat);
923	pagetypeinfo_showblockcount(m, pgdat);
924
925	return 0;
926}
927
928static const struct seq_operations fragmentation_op = {
929	.start	= frag_start,
930	.next	= frag_next,
931	.stop	= frag_stop,
932	.show	= frag_show,
933};
934
935static int fragmentation_open(struct inode *inode, struct file *file)
936{
937	return seq_open(file, &fragmentation_op);
938}
939
940static const struct file_operations fragmentation_file_operations = {
941	.open		= fragmentation_open,
942	.read		= seq_read,
943	.llseek		= seq_lseek,
944	.release	= seq_release,
945};
946
947static const struct seq_operations pagetypeinfo_op = {
948	.start	= frag_start,
949	.next	= frag_next,
950	.stop	= frag_stop,
951	.show	= pagetypeinfo_show,
952};
953
954static int pagetypeinfo_open(struct inode *inode, struct file *file)
955{
956	return seq_open(file, &pagetypeinfo_op);
957}
958
959static const struct file_operations pagetypeinfo_file_ops = {
960	.open		= pagetypeinfo_open,
961	.read		= seq_read,
962	.llseek		= seq_lseek,
963	.release	= seq_release,
964};
965
966static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
967							struct zone *zone)
968{
969	int i;
970	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
971	seq_printf(m,
972		   "\n  pages free     %lu"
973		   "\n        min      %lu"
974		   "\n        low      %lu"
975		   "\n        high     %lu"
976		   "\n        scanned  %lu"
977		   "\n        spanned  %lu"
978		   "\n        present  %lu",
979		   zone_page_state(zone, NR_FREE_PAGES),
980		   min_wmark_pages(zone),
981		   low_wmark_pages(zone),
982		   high_wmark_pages(zone),
983		   zone->pages_scanned,
984		   zone->spanned_pages,
985		   zone->present_pages);
986
987	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
988		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
989				zone_page_state(zone, i));
990
991	seq_printf(m,
992		   "\n        protection: (%lu",
993		   zone->lowmem_reserve[0]);
994	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
995		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
996	seq_printf(m,
997		   ")"
998		   "\n  pagesets");
999	for_each_online_cpu(i) {
1000		struct per_cpu_pageset *pageset;
1001
1002		pageset = per_cpu_ptr(zone->pageset, i);
1003		seq_printf(m,
1004			   "\n    cpu: %i"
1005			   "\n              count: %i"
1006			   "\n              high:  %i"
1007			   "\n              batch: %i",
1008			   i,
1009			   pageset->pcp.count,
1010			   pageset->pcp.high,
1011			   pageset->pcp.batch);
1012#ifdef CONFIG_SMP
1013		seq_printf(m, "\n  vm stats threshold: %d",
1014				pageset->stat_threshold);
1015#endif
1016	}
1017	seq_printf(m,
1018		   "\n  all_unreclaimable: %u"
1019		   "\n  start_pfn:         %lu"
1020		   "\n  inactive_ratio:    %u",
1021		   zone->all_unreclaimable,
1022		   zone->zone_start_pfn,
1023		   zone->inactive_ratio);
1024	seq_putc(m, '\n');
1025}
1026
1027/*
1028 * Output information about zones in @pgdat.
1029 */
1030static int zoneinfo_show(struct seq_file *m, void *arg)
1031{
1032	pg_data_t *pgdat = (pg_data_t *)arg;
1033	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1034	return 0;
1035}
1036
1037static const struct seq_operations zoneinfo_op = {
1038	.start	= frag_start, /* iterate over all zones. The same as in
1039			       * fragmentation. */
1040	.next	= frag_next,
1041	.stop	= frag_stop,
1042	.show	= zoneinfo_show,
1043};
1044
1045static int zoneinfo_open(struct inode *inode, struct file *file)
1046{
1047	return seq_open(file, &zoneinfo_op);
1048}
1049
1050static const struct file_operations proc_zoneinfo_file_operations = {
1051	.open		= zoneinfo_open,
1052	.read		= seq_read,
1053	.llseek		= seq_lseek,
1054	.release	= seq_release,
1055};
1056
1057enum writeback_stat_item {
1058	NR_DIRTY_THRESHOLD,
1059	NR_DIRTY_BG_THRESHOLD,
1060	NR_VM_WRITEBACK_STAT_ITEMS,
1061};
1062
1063static void *vmstat_start(struct seq_file *m, loff_t *pos)
1064{
1065	unsigned long *v;
1066	int i, stat_items_size;
1067
1068	if (*pos >= ARRAY_SIZE(vmstat_text))
1069		return NULL;
1070	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1071			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1072
1073#ifdef CONFIG_VM_EVENT_COUNTERS
1074	stat_items_size += sizeof(struct vm_event_state);
1075#endif
1076
1077	v = kmalloc(stat_items_size, GFP_KERNEL);
1078	m->private = v;
1079	if (!v)
1080		return ERR_PTR(-ENOMEM);
1081	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1082		v[i] = global_page_state(i);
1083	v += NR_VM_ZONE_STAT_ITEMS;
1084
1085	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1086			    v + NR_DIRTY_THRESHOLD);
1087	v += NR_VM_WRITEBACK_STAT_ITEMS;
1088
1089#ifdef CONFIG_VM_EVENT_COUNTERS
1090	all_vm_events(v);
1091	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1092	v[PGPGOUT] /= 2;
1093#endif
1094	return (unsigned long *)m->private + *pos;
1095}
1096
1097static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1098{
1099	(*pos)++;
1100	if (*pos >= ARRAY_SIZE(vmstat_text))
1101		return NULL;
1102	return (unsigned long *)m->private + *pos;
1103}
1104
1105static int vmstat_show(struct seq_file *m, void *arg)
1106{
1107	unsigned long *l = arg;
1108	unsigned long off = l - (unsigned long *)m->private;
1109
1110	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1111	return 0;
1112}
1113
1114static void vmstat_stop(struct seq_file *m, void *arg)
1115{
1116	kfree(m->private);
1117	m->private = NULL;
1118}
1119
1120static const struct seq_operations vmstat_op = {
1121	.start	= vmstat_start,
1122	.next	= vmstat_next,
1123	.stop	= vmstat_stop,
1124	.show	= vmstat_show,
1125};
1126
1127static int vmstat_open(struct inode *inode, struct file *file)
1128{
1129	return seq_open(file, &vmstat_op);
1130}
1131
1132static const struct file_operations proc_vmstat_file_operations = {
1133	.open		= vmstat_open,
1134	.read		= seq_read,
1135	.llseek		= seq_lseek,
1136	.release	= seq_release,
1137};
1138#endif /* CONFIG_PROC_FS */
1139
1140#ifdef CONFIG_SMP
1141static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1142int sysctl_stat_interval __read_mostly = HZ;
1143
1144static void vmstat_update(struct work_struct *w)
1145{
1146	refresh_cpu_vm_stats(smp_processor_id());
1147	schedule_delayed_work(&__get_cpu_var(vmstat_work),
1148		round_jiffies_relative(sysctl_stat_interval));
1149}
1150
1151static void __cpuinit start_cpu_timer(int cpu)
1152{
1153	struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1154
1155	INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
1156	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1157}
1158
1159/*
1160 * Use the cpu notifier to insure that the thresholds are recalculated
1161 * when necessary.
1162 */
1163static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
1164		unsigned long action,
1165		void *hcpu)
1166{
1167	long cpu = (long)hcpu;
1168
1169	switch (action) {
1170	case CPU_ONLINE:
1171	case CPU_ONLINE_FROZEN:
1172		refresh_zone_stat_thresholds();
1173		start_cpu_timer(cpu);
1174		node_set_state(cpu_to_node(cpu), N_CPU);
1175		break;
1176	case CPU_DOWN_PREPARE:
1177	case CPU_DOWN_PREPARE_FROZEN:
1178		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1179		per_cpu(vmstat_work, cpu).work.func = NULL;
1180		break;
1181	case CPU_DOWN_FAILED:
1182	case CPU_DOWN_FAILED_FROZEN:
1183		start_cpu_timer(cpu);
1184		break;
1185	case CPU_DEAD:
1186	case CPU_DEAD_FROZEN:
1187		refresh_zone_stat_thresholds();
1188		break;
1189	default:
1190		break;
1191	}
1192	return NOTIFY_OK;
1193}
1194
1195static struct notifier_block __cpuinitdata vmstat_notifier =
1196	{ &vmstat_cpuup_callback, NULL, 0 };
1197#endif
1198
1199static int __init setup_vmstat(void)
1200{
1201#ifdef CONFIG_SMP
1202	int cpu;
1203
1204	register_cpu_notifier(&vmstat_notifier);
1205
1206	for_each_online_cpu(cpu)
1207		start_cpu_timer(cpu);
1208#endif
1209#ifdef CONFIG_PROC_FS
1210	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1211	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1212	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1213	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1214#endif
1215	return 0;
1216}
1217module_init(setup_vmstat)
1218
1219#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1220#include <linux/debugfs.h>
1221
1222static struct dentry *extfrag_debug_root;
1223
1224/*
1225 * Return an index indicating how much of the available free memory is
1226 * unusable for an allocation of the requested size.
1227 */
1228static int unusable_free_index(unsigned int order,
1229				struct contig_page_info *info)
1230{
1231	/* No free memory is interpreted as all free memory is unusable */
1232	if (info->free_pages == 0)
1233		return 1000;
1234
1235	/*
1236	 * Index should be a value between 0 and 1. Return a value to 3
1237	 * decimal places.
1238	 *
1239	 * 0 => no fragmentation
1240	 * 1 => high fragmentation
1241	 */
1242	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1243
1244}
1245
1246static void unusable_show_print(struct seq_file *m,
1247					pg_data_t *pgdat, struct zone *zone)
1248{
1249	unsigned int order;
1250	int index;
1251	struct contig_page_info info;
1252
1253	seq_printf(m, "Node %d, zone %8s ",
1254				pgdat->node_id,
1255				zone->name);
1256	for (order = 0; order < MAX_ORDER; ++order) {
1257		fill_contig_page_info(zone, order, &info);
1258		index = unusable_free_index(order, &info);
1259		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1260	}
1261
1262	seq_putc(m, '\n');
1263}
1264
1265/*
1266 * Display unusable free space index
1267 *
1268 * The unusable free space index measures how much of the available free
1269 * memory cannot be used to satisfy an allocation of a given size and is a
1270 * value between 0 and 1. The higher the value, the more of free memory is
1271 * unusable and by implication, the worse the external fragmentation is. This
1272 * can be expressed as a percentage by multiplying by 100.
1273 */
1274static int unusable_show(struct seq_file *m, void *arg)
1275{
1276	pg_data_t *pgdat = (pg_data_t *)arg;
1277
1278	/* check memoryless node */
1279	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1280		return 0;
1281
1282	walk_zones_in_node(m, pgdat, unusable_show_print);
1283
1284	return 0;
1285}
1286
1287static const struct seq_operations unusable_op = {
1288	.start	= frag_start,
1289	.next	= frag_next,
1290	.stop	= frag_stop,
1291	.show	= unusable_show,
1292};
1293
1294static int unusable_open(struct inode *inode, struct file *file)
1295{
1296	return seq_open(file, &unusable_op);
1297}
1298
1299static const struct file_operations unusable_file_ops = {
1300	.open		= unusable_open,
1301	.read		= seq_read,
1302	.llseek		= seq_lseek,
1303	.release	= seq_release,
1304};
1305
1306static void extfrag_show_print(struct seq_file *m,
1307					pg_data_t *pgdat, struct zone *zone)
1308{
1309	unsigned int order;
1310	int index;
1311
1312	/* Alloc on stack as interrupts are disabled for zone walk */
1313	struct contig_page_info info;
1314
1315	seq_printf(m, "Node %d, zone %8s ",
1316				pgdat->node_id,
1317				zone->name);
1318	for (order = 0; order < MAX_ORDER; ++order) {
1319		fill_contig_page_info(zone, order, &info);
1320		index = __fragmentation_index(order, &info);
1321		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1322	}
1323
1324	seq_putc(m, '\n');
1325}
1326
1327/*
1328 * Display fragmentation index for orders that allocations would fail for
1329 */
1330static int extfrag_show(struct seq_file *m, void *arg)
1331{
1332	pg_data_t *pgdat = (pg_data_t *)arg;
1333
1334	walk_zones_in_node(m, pgdat, extfrag_show_print);
1335
1336	return 0;
1337}
1338
1339static const struct seq_operations extfrag_op = {
1340	.start	= frag_start,
1341	.next	= frag_next,
1342	.stop	= frag_stop,
1343	.show	= extfrag_show,
1344};
1345
1346static int extfrag_open(struct inode *inode, struct file *file)
1347{
1348	return seq_open(file, &extfrag_op);
1349}
1350
1351static const struct file_operations extfrag_file_ops = {
1352	.open		= extfrag_open,
1353	.read		= seq_read,
1354	.llseek		= seq_lseek,
1355	.release	= seq_release,
1356};
1357
1358static int __init extfrag_debug_init(void)
1359{
1360	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1361	if (!extfrag_debug_root)
1362		return -ENOMEM;
1363
1364	if (!debugfs_create_file("unusable_index", 0444,
1365			extfrag_debug_root, NULL, &unusable_file_ops))
1366		return -ENOMEM;
1367
1368	if (!debugfs_create_file("extfrag_index", 0444,
1369			extfrag_debug_root, NULL, &extfrag_file_ops))
1370		return -ENOMEM;
1371
1372	return 0;
1373}
1374
1375module_init(extfrag_debug_init);
1376#endif
1377