page-writeback.c revision 54848d73f9f254631303d6eab9b976855988b266
1/*
2 * mm/page-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 * Contains functions related to writing back dirty pages at the
8 * address_space level.
9 *
10 * 10Apr2002	Andrew Morton
11 *		Initial version
12 */
13
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
25#include <linux/task_io_accounting_ops.h>
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
28#include <linux/rmap.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
35#include <linux/buffer_head.h>
36#include <linux/pagevec.h>
37#include <trace/events/writeback.h>
38
39/*
40 * Sleep at most 200ms at a time in balance_dirty_pages().
41 */
42#define MAX_PAUSE		max(HZ/5, 1)
43
44/*
45 * Estimate write bandwidth at 200ms intervals.
46 */
47#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
48
49#define RATELIMIT_CALC_SHIFT	10
50
51/*
52 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
53 * will look to see if it needs to force writeback or throttling.
54 */
55static long ratelimit_pages = 32;
56
57/* The following parameters are exported via /proc/sys/vm */
58
59/*
60 * Start background writeback (via writeback threads) at this percentage
61 */
62int dirty_background_ratio = 10;
63
64/*
65 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
66 * dirty_background_ratio * the amount of dirtyable memory
67 */
68unsigned long dirty_background_bytes;
69
70/*
71 * free highmem will not be subtracted from the total free memory
72 * for calculating free ratios if vm_highmem_is_dirtyable is true
73 */
74int vm_highmem_is_dirtyable;
75
76/*
77 * The generator of dirty data starts writeback at this percentage
78 */
79int vm_dirty_ratio = 20;
80
81/*
82 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
83 * vm_dirty_ratio * the amount of dirtyable memory
84 */
85unsigned long vm_dirty_bytes;
86
87/*
88 * The interval between `kupdate'-style writebacks
89 */
90unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
91
92/*
93 * The longest time for which data is allowed to remain dirty
94 */
95unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
96
97/*
98 * Flag that makes the machine dump writes/reads and block dirtyings.
99 */
100int block_dump;
101
102/*
103 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
104 * a full sync is triggered after this time elapses without any disk activity.
105 */
106int laptop_mode;
107
108EXPORT_SYMBOL(laptop_mode);
109
110/* End of sysctl-exported parameters */
111
112unsigned long global_dirty_limit;
113
114/*
115 * Scale the writeback cache size proportional to the relative writeout speeds.
116 *
117 * We do this by keeping a floating proportion between BDIs, based on page
118 * writeback completions [end_page_writeback()]. Those devices that write out
119 * pages fastest will get the larger share, while the slower will get a smaller
120 * share.
121 *
122 * We use page writeout completions because we are interested in getting rid of
123 * dirty pages. Having them written out is the primary goal.
124 *
125 * We introduce a concept of time, a period over which we measure these events,
126 * because demand can/will vary over time. The length of this period itself is
127 * measured in page writeback completions.
128 *
129 */
130static struct prop_descriptor vm_completions;
131
132/*
133 * couple the period to the dirty_ratio:
134 *
135 *   period/2 ~ roundup_pow_of_two(dirty limit)
136 */
137static int calc_period_shift(void)
138{
139	unsigned long dirty_total;
140
141	if (vm_dirty_bytes)
142		dirty_total = vm_dirty_bytes / PAGE_SIZE;
143	else
144		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
145				100;
146	return 2 + ilog2(dirty_total - 1);
147}
148
149/*
150 * update the period when the dirty threshold changes.
151 */
152static void update_completion_period(void)
153{
154	int shift = calc_period_shift();
155	prop_change_shift(&vm_completions, shift);
156
157	writeback_set_ratelimit();
158}
159
160int dirty_background_ratio_handler(struct ctl_table *table, int write,
161		void __user *buffer, size_t *lenp,
162		loff_t *ppos)
163{
164	int ret;
165
166	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
167	if (ret == 0 && write)
168		dirty_background_bytes = 0;
169	return ret;
170}
171
172int dirty_background_bytes_handler(struct ctl_table *table, int write,
173		void __user *buffer, size_t *lenp,
174		loff_t *ppos)
175{
176	int ret;
177
178	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
179	if (ret == 0 && write)
180		dirty_background_ratio = 0;
181	return ret;
182}
183
184int dirty_ratio_handler(struct ctl_table *table, int write,
185		void __user *buffer, size_t *lenp,
186		loff_t *ppos)
187{
188	int old_ratio = vm_dirty_ratio;
189	int ret;
190
191	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
192	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
193		update_completion_period();
194		vm_dirty_bytes = 0;
195	}
196	return ret;
197}
198
199
200int dirty_bytes_handler(struct ctl_table *table, int write,
201		void __user *buffer, size_t *lenp,
202		loff_t *ppos)
203{
204	unsigned long old_bytes = vm_dirty_bytes;
205	int ret;
206
207	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
208	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
209		update_completion_period();
210		vm_dirty_ratio = 0;
211	}
212	return ret;
213}
214
215/*
216 * Increment the BDI's writeout completion count and the global writeout
217 * completion count. Called from test_clear_page_writeback().
218 */
219static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
220{
221	__inc_bdi_stat(bdi, BDI_WRITTEN);
222	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
223			      bdi->max_prop_frac);
224}
225
226void bdi_writeout_inc(struct backing_dev_info *bdi)
227{
228	unsigned long flags;
229
230	local_irq_save(flags);
231	__bdi_writeout_inc(bdi);
232	local_irq_restore(flags);
233}
234EXPORT_SYMBOL_GPL(bdi_writeout_inc);
235
236/*
237 * Obtain an accurate fraction of the BDI's portion.
238 */
239static void bdi_writeout_fraction(struct backing_dev_info *bdi,
240		long *numerator, long *denominator)
241{
242	prop_fraction_percpu(&vm_completions, &bdi->completions,
243				numerator, denominator);
244}
245
246/*
247 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
248 * registered backing devices, which, for obvious reasons, can not
249 * exceed 100%.
250 */
251static unsigned int bdi_min_ratio;
252
253int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
254{
255	int ret = 0;
256
257	spin_lock_bh(&bdi_lock);
258	if (min_ratio > bdi->max_ratio) {
259		ret = -EINVAL;
260	} else {
261		min_ratio -= bdi->min_ratio;
262		if (bdi_min_ratio + min_ratio < 100) {
263			bdi_min_ratio += min_ratio;
264			bdi->min_ratio += min_ratio;
265		} else {
266			ret = -EINVAL;
267		}
268	}
269	spin_unlock_bh(&bdi_lock);
270
271	return ret;
272}
273
274int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
275{
276	int ret = 0;
277
278	if (max_ratio > 100)
279		return -EINVAL;
280
281	spin_lock_bh(&bdi_lock);
282	if (bdi->min_ratio > max_ratio) {
283		ret = -EINVAL;
284	} else {
285		bdi->max_ratio = max_ratio;
286		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
287	}
288	spin_unlock_bh(&bdi_lock);
289
290	return ret;
291}
292EXPORT_SYMBOL(bdi_set_max_ratio);
293
294/*
295 * Work out the current dirty-memory clamping and background writeout
296 * thresholds.
297 *
298 * The main aim here is to lower them aggressively if there is a lot of mapped
299 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
300 * pages.  It is better to clamp down on writers than to start swapping, and
301 * performing lots of scanning.
302 *
303 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
304 *
305 * We don't permit the clamping level to fall below 5% - that is getting rather
306 * excessive.
307 *
308 * We make sure that the background writeout level is below the adjusted
309 * clamping level.
310 */
311
312static unsigned long highmem_dirtyable_memory(unsigned long total)
313{
314#ifdef CONFIG_HIGHMEM
315	int node;
316	unsigned long x = 0;
317
318	for_each_node_state(node, N_HIGH_MEMORY) {
319		struct zone *z =
320			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
321
322		x += zone_page_state(z, NR_FREE_PAGES) +
323		     zone_reclaimable_pages(z);
324	}
325	/*
326	 * Make sure that the number of highmem pages is never larger
327	 * than the number of the total dirtyable memory. This can only
328	 * occur in very strange VM situations but we want to make sure
329	 * that this does not occur.
330	 */
331	return min(x, total);
332#else
333	return 0;
334#endif
335}
336
337/**
338 * determine_dirtyable_memory - amount of memory that may be used
339 *
340 * Returns the numebr of pages that can currently be freed and used
341 * by the kernel for direct mappings.
342 */
343unsigned long determine_dirtyable_memory(void)
344{
345	unsigned long x;
346
347	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
348
349	if (!vm_highmem_is_dirtyable)
350		x -= highmem_dirtyable_memory(x);
351
352	return x + 1;	/* Ensure that we never return 0 */
353}
354
355static unsigned long dirty_freerun_ceiling(unsigned long thresh,
356					   unsigned long bg_thresh)
357{
358	return (thresh + bg_thresh) / 2;
359}
360
361static unsigned long hard_dirty_limit(unsigned long thresh)
362{
363	return max(thresh, global_dirty_limit);
364}
365
366/*
367 * global_dirty_limits - background-writeback and dirty-throttling thresholds
368 *
369 * Calculate the dirty thresholds based on sysctl parameters
370 * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
371 * - vm.dirty_ratio             or  vm.dirty_bytes
372 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
373 * real-time tasks.
374 */
375void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
376{
377	unsigned long background;
378	unsigned long dirty;
379	unsigned long uninitialized_var(available_memory);
380	struct task_struct *tsk;
381
382	if (!vm_dirty_bytes || !dirty_background_bytes)
383		available_memory = determine_dirtyable_memory();
384
385	if (vm_dirty_bytes)
386		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
387	else
388		dirty = (vm_dirty_ratio * available_memory) / 100;
389
390	if (dirty_background_bytes)
391		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
392	else
393		background = (dirty_background_ratio * available_memory) / 100;
394
395	if (background >= dirty)
396		background = dirty / 2;
397	tsk = current;
398	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
399		background += background / 4;
400		dirty += dirty / 4;
401	}
402	*pbackground = background;
403	*pdirty = dirty;
404	trace_global_dirty_state(background, dirty);
405}
406
407/**
408 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
409 * @bdi: the backing_dev_info to query
410 * @dirty: global dirty limit in pages
411 *
412 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
413 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
414 *
415 * Note that balance_dirty_pages() will only seriously take it as a hard limit
416 * when sleeping max_pause per page is not enough to keep the dirty pages under
417 * control. For example, when the device is completely stalled due to some error
418 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
419 * In the other normal situations, it acts more gently by throttling the tasks
420 * more (rather than completely block them) when the bdi dirty pages go high.
421 *
422 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
423 * - starving fast devices
424 * - piling up dirty pages (that will take long time to sync) on slow devices
425 *
426 * The bdi's share of dirty limit will be adapting to its throughput and
427 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
428 */
429unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
430{
431	u64 bdi_dirty;
432	long numerator, denominator;
433
434	/*
435	 * Calculate this BDI's share of the dirty ratio.
436	 */
437	bdi_writeout_fraction(bdi, &numerator, &denominator);
438
439	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
440	bdi_dirty *= numerator;
441	do_div(bdi_dirty, denominator);
442
443	bdi_dirty += (dirty * bdi->min_ratio) / 100;
444	if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
445		bdi_dirty = dirty * bdi->max_ratio / 100;
446
447	return bdi_dirty;
448}
449
450/*
451 * Dirty position control.
452 *
453 * (o) global/bdi setpoints
454 *
455 * We want the dirty pages be balanced around the global/bdi setpoints.
456 * When the number of dirty pages is higher/lower than the setpoint, the
457 * dirty position control ratio (and hence task dirty ratelimit) will be
458 * decreased/increased to bring the dirty pages back to the setpoint.
459 *
460 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
461 *
462 *     if (dirty < setpoint) scale up   pos_ratio
463 *     if (dirty > setpoint) scale down pos_ratio
464 *
465 *     if (bdi_dirty < bdi_setpoint) scale up   pos_ratio
466 *     if (bdi_dirty > bdi_setpoint) scale down pos_ratio
467 *
468 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
469 *
470 * (o) global control line
471 *
472 *     ^ pos_ratio
473 *     |
474 *     |            |<===== global dirty control scope ======>|
475 * 2.0 .............*
476 *     |            .*
477 *     |            . *
478 *     |            .   *
479 *     |            .     *
480 *     |            .        *
481 *     |            .            *
482 * 1.0 ................................*
483 *     |            .                  .     *
484 *     |            .                  .          *
485 *     |            .                  .              *
486 *     |            .                  .                 *
487 *     |            .                  .                    *
488 *   0 +------------.------------------.----------------------*------------->
489 *           freerun^          setpoint^                 limit^   dirty pages
490 *
491 * (o) bdi control line
492 *
493 *     ^ pos_ratio
494 *     |
495 *     |            *
496 *     |              *
497 *     |                *
498 *     |                  *
499 *     |                    * |<=========== span ============>|
500 * 1.0 .......................*
501 *     |                      . *
502 *     |                      .   *
503 *     |                      .     *
504 *     |                      .       *
505 *     |                      .         *
506 *     |                      .           *
507 *     |                      .             *
508 *     |                      .               *
509 *     |                      .                 *
510 *     |                      .                   *
511 *     |                      .                     *
512 * 1/4 ...............................................* * * * * * * * * * * *
513 *     |                      .                         .
514 *     |                      .                           .
515 *     |                      .                             .
516 *   0 +----------------------.-------------------------------.------------->
517 *                bdi_setpoint^                    x_intercept^
518 *
519 * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can
520 * be smoothly throttled down to normal if it starts high in situations like
521 * - start writing to a slow SD card and a fast disk at the same time. The SD
522 *   card's bdi_dirty may rush to many times higher than bdi_setpoint.
523 * - the bdi dirty thresh drops quickly due to change of JBOD workload
524 */
525static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
526					unsigned long thresh,
527					unsigned long bg_thresh,
528					unsigned long dirty,
529					unsigned long bdi_thresh,
530					unsigned long bdi_dirty)
531{
532	unsigned long write_bw = bdi->avg_write_bandwidth;
533	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
534	unsigned long limit = hard_dirty_limit(thresh);
535	unsigned long x_intercept;
536	unsigned long setpoint;		/* dirty pages' target balance point */
537	unsigned long bdi_setpoint;
538	unsigned long span;
539	long long pos_ratio;		/* for scaling up/down the rate limit */
540	long x;
541
542	if (unlikely(dirty >= limit))
543		return 0;
544
545	/*
546	 * global setpoint
547	 *
548	 *                           setpoint - dirty 3
549	 *        f(dirty) := 1.0 + (----------------)
550	 *                           limit - setpoint
551	 *
552	 * it's a 3rd order polynomial that subjects to
553	 *
554	 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
555	 * (2) f(setpoint) = 1.0 => the balance point
556	 * (3) f(limit)    = 0   => the hard limit
557	 * (4) df/dx      <= 0	 => negative feedback control
558	 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
559	 *     => fast response on large errors; small oscillation near setpoint
560	 */
561	setpoint = (freerun + limit) / 2;
562	x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
563		    limit - setpoint + 1);
564	pos_ratio = x;
565	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
566	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
567	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
568
569	/*
570	 * We have computed basic pos_ratio above based on global situation. If
571	 * the bdi is over/under its share of dirty pages, we want to scale
572	 * pos_ratio further down/up. That is done by the following mechanism.
573	 */
574
575	/*
576	 * bdi setpoint
577	 *
578	 *        f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint)
579	 *
580	 *                        x_intercept - bdi_dirty
581	 *                     := --------------------------
582	 *                        x_intercept - bdi_setpoint
583	 *
584	 * The main bdi control line is a linear function that subjects to
585	 *
586	 * (1) f(bdi_setpoint) = 1.0
587	 * (2) k = - 1 / (8 * write_bw)  (in single bdi case)
588	 *     or equally: x_intercept = bdi_setpoint + 8 * write_bw
589	 *
590	 * For single bdi case, the dirty pages are observed to fluctuate
591	 * regularly within range
592	 *        [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2]
593	 * for various filesystems, where (2) can yield in a reasonable 12.5%
594	 * fluctuation range for pos_ratio.
595	 *
596	 * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its
597	 * own size, so move the slope over accordingly and choose a slope that
598	 * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh.
599	 */
600	if (unlikely(bdi_thresh > thresh))
601		bdi_thresh = thresh;
602	/*
603	 * It's very possible that bdi_thresh is close to 0 not because the
604	 * device is slow, but that it has remained inactive for long time.
605	 * Honour such devices a reasonable good (hopefully IO efficient)
606	 * threshold, so that the occasional writes won't be blocked and active
607	 * writes can rampup the threshold quickly.
608	 */
609	bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
610	/*
611	 * scale global setpoint to bdi's:
612	 *	bdi_setpoint = setpoint * bdi_thresh / thresh
613	 */
614	x = div_u64((u64)bdi_thresh << 16, thresh + 1);
615	bdi_setpoint = setpoint * (u64)x >> 16;
616	/*
617	 * Use span=(8*write_bw) in single bdi case as indicated by
618	 * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case.
619	 *
620	 *        bdi_thresh                    thresh - bdi_thresh
621	 * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh
622	 *          thresh                            thresh
623	 */
624	span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
625	x_intercept = bdi_setpoint + span;
626
627	if (bdi_dirty < x_intercept - span / 4) {
628		pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
629				    x_intercept - bdi_setpoint + 1);
630	} else
631		pos_ratio /= 4;
632
633	/*
634	 * bdi reserve area, safeguard against dirty pool underrun and disk idle
635	 * It may push the desired control point of global dirty pages higher
636	 * than setpoint.
637	 */
638	x_intercept = bdi_thresh / 2;
639	if (bdi_dirty < x_intercept) {
640		if (bdi_dirty > x_intercept / 8)
641			pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
642		else
643			pos_ratio *= 8;
644	}
645
646	return pos_ratio;
647}
648
649static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
650				       unsigned long elapsed,
651				       unsigned long written)
652{
653	const unsigned long period = roundup_pow_of_two(3 * HZ);
654	unsigned long avg = bdi->avg_write_bandwidth;
655	unsigned long old = bdi->write_bandwidth;
656	u64 bw;
657
658	/*
659	 * bw = written * HZ / elapsed
660	 *
661	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
662	 * write_bandwidth = ---------------------------------------------------
663	 *                                          period
664	 */
665	bw = written - bdi->written_stamp;
666	bw *= HZ;
667	if (unlikely(elapsed > period)) {
668		do_div(bw, elapsed);
669		avg = bw;
670		goto out;
671	}
672	bw += (u64)bdi->write_bandwidth * (period - elapsed);
673	bw >>= ilog2(period);
674
675	/*
676	 * one more level of smoothing, for filtering out sudden spikes
677	 */
678	if (avg > old && old >= (unsigned long)bw)
679		avg -= (avg - old) >> 3;
680
681	if (avg < old && old <= (unsigned long)bw)
682		avg += (old - avg) >> 3;
683
684out:
685	bdi->write_bandwidth = bw;
686	bdi->avg_write_bandwidth = avg;
687}
688
689/*
690 * The global dirtyable memory and dirty threshold could be suddenly knocked
691 * down by a large amount (eg. on the startup of KVM in a swapless system).
692 * This may throw the system into deep dirty exceeded state and throttle
693 * heavy/light dirtiers alike. To retain good responsiveness, maintain
694 * global_dirty_limit for tracking slowly down to the knocked down dirty
695 * threshold.
696 */
697static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
698{
699	unsigned long limit = global_dirty_limit;
700
701	/*
702	 * Follow up in one step.
703	 */
704	if (limit < thresh) {
705		limit = thresh;
706		goto update;
707	}
708
709	/*
710	 * Follow down slowly. Use the higher one as the target, because thresh
711	 * may drop below dirty. This is exactly the reason to introduce
712	 * global_dirty_limit which is guaranteed to lie above the dirty pages.
713	 */
714	thresh = max(thresh, dirty);
715	if (limit > thresh) {
716		limit -= (limit - thresh) >> 5;
717		goto update;
718	}
719	return;
720update:
721	global_dirty_limit = limit;
722}
723
724static void global_update_bandwidth(unsigned long thresh,
725				    unsigned long dirty,
726				    unsigned long now)
727{
728	static DEFINE_SPINLOCK(dirty_lock);
729	static unsigned long update_time;
730
731	/*
732	 * check locklessly first to optimize away locking for the most time
733	 */
734	if (time_before(now, update_time + BANDWIDTH_INTERVAL))
735		return;
736
737	spin_lock(&dirty_lock);
738	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
739		update_dirty_limit(thresh, dirty);
740		update_time = now;
741	}
742	spin_unlock(&dirty_lock);
743}
744
745/*
746 * Maintain bdi->dirty_ratelimit, the base dirty throttle rate.
747 *
748 * Normal bdi tasks will be curbed at or below it in long term.
749 * Obviously it should be around (write_bw / N) when there are N dd tasks.
750 */
751static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
752				       unsigned long thresh,
753				       unsigned long bg_thresh,
754				       unsigned long dirty,
755				       unsigned long bdi_thresh,
756				       unsigned long bdi_dirty,
757				       unsigned long dirtied,
758				       unsigned long elapsed)
759{
760	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
761	unsigned long limit = hard_dirty_limit(thresh);
762	unsigned long setpoint = (freerun + limit) / 2;
763	unsigned long write_bw = bdi->avg_write_bandwidth;
764	unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
765	unsigned long dirty_rate;
766	unsigned long task_ratelimit;
767	unsigned long balanced_dirty_ratelimit;
768	unsigned long pos_ratio;
769	unsigned long step;
770	unsigned long x;
771
772	/*
773	 * The dirty rate will match the writeout rate in long term, except
774	 * when dirty pages are truncated by userspace or re-dirtied by FS.
775	 */
776	dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
777
778	pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
779				       bdi_thresh, bdi_dirty);
780	/*
781	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
782	 */
783	task_ratelimit = (u64)dirty_ratelimit *
784					pos_ratio >> RATELIMIT_CALC_SHIFT;
785	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
786
787	/*
788	 * A linear estimation of the "balanced" throttle rate. The theory is,
789	 * if there are N dd tasks, each throttled at task_ratelimit, the bdi's
790	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
791	 * formula will yield the balanced rate limit (write_bw / N).
792	 *
793	 * Note that the expanded form is not a pure rate feedback:
794	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
795	 * but also takes pos_ratio into account:
796	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
797	 *
798	 * (1) is not realistic because pos_ratio also takes part in balancing
799	 * the dirty rate.  Consider the state
800	 *	pos_ratio = 0.5						     (3)
801	 *	rate = 2 * (write_bw / N)				     (4)
802	 * If (1) is used, it will stuck in that state! Because each dd will
803	 * be throttled at
804	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
805	 * yielding
806	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
807	 * put (6) into (1) we get
808	 *	rate_(i+1) = rate_(i)					     (7)
809	 *
810	 * So we end up using (2) to always keep
811	 *	rate_(i+1) ~= (write_bw / N)				     (8)
812	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
813	 * pos_ratio is able to drive itself to 1.0, which is not only where
814	 * the dirty count meet the setpoint, but also where the slope of
815	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
816	 */
817	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
818					   dirty_rate | 1);
819
820	/*
821	 * We could safely do this and return immediately:
822	 *
823	 *	bdi->dirty_ratelimit = balanced_dirty_ratelimit;
824	 *
825	 * However to get a more stable dirty_ratelimit, the below elaborated
826	 * code makes use of task_ratelimit to filter out sigular points and
827	 * limit the step size.
828	 *
829	 * The below code essentially only uses the relative value of
830	 *
831	 *	task_ratelimit - dirty_ratelimit
832	 *	= (pos_ratio - 1) * dirty_ratelimit
833	 *
834	 * which reflects the direction and size of dirty position error.
835	 */
836
837	/*
838	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
839	 * task_ratelimit is on the same side of dirty_ratelimit, too.
840	 * For example, when
841	 * - dirty_ratelimit > balanced_dirty_ratelimit
842	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
843	 * lowering dirty_ratelimit will help meet both the position and rate
844	 * control targets. Otherwise, don't update dirty_ratelimit if it will
845	 * only help meet the rate target. After all, what the users ultimately
846	 * feel and care are stable dirty rate and small position error.
847	 *
848	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
849	 * and filter out the sigular points of balanced_dirty_ratelimit. Which
850	 * keeps jumping around randomly and can even leap far away at times
851	 * due to the small 200ms estimation period of dirty_rate (we want to
852	 * keep that period small to reduce time lags).
853	 */
854	step = 0;
855	if (dirty < setpoint) {
856		x = min(bdi->balanced_dirty_ratelimit,
857			 min(balanced_dirty_ratelimit, task_ratelimit));
858		if (dirty_ratelimit < x)
859			step = x - dirty_ratelimit;
860	} else {
861		x = max(bdi->balanced_dirty_ratelimit,
862			 max(balanced_dirty_ratelimit, task_ratelimit));
863		if (dirty_ratelimit > x)
864			step = dirty_ratelimit - x;
865	}
866
867	/*
868	 * Don't pursue 100% rate matching. It's impossible since the balanced
869	 * rate itself is constantly fluctuating. So decrease the track speed
870	 * when it gets close to the target. Helps eliminate pointless tremors.
871	 */
872	step >>= dirty_ratelimit / (2 * step + 1);
873	/*
874	 * Limit the tracking speed to avoid overshooting.
875	 */
876	step = (step + 7) / 8;
877
878	if (dirty_ratelimit < balanced_dirty_ratelimit)
879		dirty_ratelimit += step;
880	else
881		dirty_ratelimit -= step;
882
883	bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
884	bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
885
886	trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
887}
888
889void __bdi_update_bandwidth(struct backing_dev_info *bdi,
890			    unsigned long thresh,
891			    unsigned long bg_thresh,
892			    unsigned long dirty,
893			    unsigned long bdi_thresh,
894			    unsigned long bdi_dirty,
895			    unsigned long start_time)
896{
897	unsigned long now = jiffies;
898	unsigned long elapsed = now - bdi->bw_time_stamp;
899	unsigned long dirtied;
900	unsigned long written;
901
902	/*
903	 * rate-limit, only update once every 200ms.
904	 */
905	if (elapsed < BANDWIDTH_INTERVAL)
906		return;
907
908	dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
909	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
910
911	/*
912	 * Skip quiet periods when disk bandwidth is under-utilized.
913	 * (at least 1s idle time between two flusher runs)
914	 */
915	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
916		goto snapshot;
917
918	if (thresh) {
919		global_update_bandwidth(thresh, dirty, now);
920		bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
921					   bdi_thresh, bdi_dirty,
922					   dirtied, elapsed);
923	}
924	bdi_update_write_bandwidth(bdi, elapsed, written);
925
926snapshot:
927	bdi->dirtied_stamp = dirtied;
928	bdi->written_stamp = written;
929	bdi->bw_time_stamp = now;
930}
931
932static void bdi_update_bandwidth(struct backing_dev_info *bdi,
933				 unsigned long thresh,
934				 unsigned long bg_thresh,
935				 unsigned long dirty,
936				 unsigned long bdi_thresh,
937				 unsigned long bdi_dirty,
938				 unsigned long start_time)
939{
940	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
941		return;
942	spin_lock(&bdi->wb.list_lock);
943	__bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
944			       bdi_thresh, bdi_dirty, start_time);
945	spin_unlock(&bdi->wb.list_lock);
946}
947
948/*
949 * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr()
950 * will look to see if it needs to start dirty throttling.
951 *
952 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
953 * global_page_state() too often. So scale it near-sqrt to the safety margin
954 * (the number of pages we may dirty without exceeding the dirty limits).
955 */
956static unsigned long dirty_poll_interval(unsigned long dirty,
957					 unsigned long thresh)
958{
959	if (thresh > dirty)
960		return 1UL << (ilog2(thresh - dirty) >> 1);
961
962	return 1;
963}
964
965static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
966				   unsigned long bdi_dirty)
967{
968	unsigned long bw = bdi->avg_write_bandwidth;
969	unsigned long hi = ilog2(bw);
970	unsigned long lo = ilog2(bdi->dirty_ratelimit);
971	unsigned long t;
972
973	/* target for 20ms max pause on 1-dd case */
974	t = HZ / 50;
975
976	/*
977	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
978	 * overheads.
979	 *
980	 * (N * 20ms) on 2^N concurrent tasks.
981	 */
982	if (hi > lo)
983		t += (hi - lo) * (20 * HZ) / 1024;
984
985	/*
986	 * Limit pause time for small memory systems. If sleeping for too long
987	 * time, a small pool of dirty/writeback pages may go empty and disk go
988	 * idle.
989	 *
990	 * 8 serves as the safety ratio.
991	 */
992	t = min(t, bdi_dirty * HZ / (8 * bw + 1));
993
994	/*
995	 * The pause time will be settled within range (max_pause/4, max_pause).
996	 * Apply a minimal value of 4 to get a non-zero max_pause/4.
997	 */
998	return clamp_val(t, 4, MAX_PAUSE);
999}
1000
1001/*
1002 * balance_dirty_pages() must be called by processes which are generating dirty
1003 * data.  It looks at the number of dirty pages in the machine and will force
1004 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1005 * If we're over `background_thresh' then the writeback threads are woken to
1006 * perform some writeout.
1007 */
1008static void balance_dirty_pages(struct address_space *mapping,
1009				unsigned long pages_dirtied)
1010{
1011	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
1012	unsigned long bdi_reclaimable;
1013	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
1014	unsigned long bdi_dirty;
1015	unsigned long freerun;
1016	unsigned long background_thresh;
1017	unsigned long dirty_thresh;
1018	unsigned long bdi_thresh;
1019	long pause = 0;
1020	long uninitialized_var(max_pause);
1021	bool dirty_exceeded = false;
1022	unsigned long task_ratelimit;
1023	unsigned long uninitialized_var(dirty_ratelimit);
1024	unsigned long pos_ratio;
1025	struct backing_dev_info *bdi = mapping->backing_dev_info;
1026	unsigned long start_time = jiffies;
1027
1028	for (;;) {
1029		/*
1030		 * Unstable writes are a feature of certain networked
1031		 * filesystems (i.e. NFS) in which data may have been
1032		 * written to the server's write cache, but has not yet
1033		 * been flushed to permanent storage.
1034		 */
1035		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
1036					global_page_state(NR_UNSTABLE_NFS);
1037		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1038
1039		global_dirty_limits(&background_thresh, &dirty_thresh);
1040
1041		/*
1042		 * Throttle it only when the background writeback cannot
1043		 * catch-up. This avoids (excessively) small writeouts
1044		 * when the bdi limits are ramping up.
1045		 */
1046		freerun = dirty_freerun_ceiling(dirty_thresh,
1047						background_thresh);
1048		if (nr_dirty <= freerun)
1049			break;
1050
1051		if (unlikely(!writeback_in_progress(bdi)))
1052			bdi_start_background_writeback(bdi);
1053
1054		/*
1055		 * bdi_thresh is not treated as some limiting factor as
1056		 * dirty_thresh, due to reasons
1057		 * - in JBOD setup, bdi_thresh can fluctuate a lot
1058		 * - in a system with HDD and USB key, the USB key may somehow
1059		 *   go into state (bdi_dirty >> bdi_thresh) either because
1060		 *   bdi_dirty starts high, or because bdi_thresh drops low.
1061		 *   In this case we don't want to hard throttle the USB key
1062		 *   dirtiers for 100 seconds until bdi_dirty drops under
1063		 *   bdi_thresh. Instead the auxiliary bdi control line in
1064		 *   bdi_position_ratio() will let the dirtier task progress
1065		 *   at some rate <= (write_bw / 2) for bringing down bdi_dirty.
1066		 */
1067		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
1068
1069		/*
1070		 * In order to avoid the stacked BDI deadlock we need
1071		 * to ensure we accurately count the 'dirty' pages when
1072		 * the threshold is low.
1073		 *
1074		 * Otherwise it would be possible to get thresh+n pages
1075		 * reported dirty, even though there are thresh-m pages
1076		 * actually dirty; with m+n sitting in the percpu
1077		 * deltas.
1078		 */
1079		if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
1080			bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
1081			bdi_dirty = bdi_reclaimable +
1082				    bdi_stat_sum(bdi, BDI_WRITEBACK);
1083		} else {
1084			bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
1085			bdi_dirty = bdi_reclaimable +
1086				    bdi_stat(bdi, BDI_WRITEBACK);
1087		}
1088
1089		dirty_exceeded = (bdi_dirty > bdi_thresh) ||
1090				  (nr_dirty > dirty_thresh);
1091		if (dirty_exceeded && !bdi->dirty_exceeded)
1092			bdi->dirty_exceeded = 1;
1093
1094		bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
1095				     nr_dirty, bdi_thresh, bdi_dirty,
1096				     start_time);
1097
1098		max_pause = bdi_max_pause(bdi, bdi_dirty);
1099
1100		dirty_ratelimit = bdi->dirty_ratelimit;
1101		pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
1102					       background_thresh, nr_dirty,
1103					       bdi_thresh, bdi_dirty);
1104		task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
1105							RATELIMIT_CALC_SHIFT;
1106		if (unlikely(task_ratelimit == 0)) {
1107			pause = max_pause;
1108			goto pause;
1109		}
1110		pause = HZ * pages_dirtied / task_ratelimit;
1111		if (unlikely(pause <= 0)) {
1112			trace_balance_dirty_pages(bdi,
1113						  dirty_thresh,
1114						  background_thresh,
1115						  nr_dirty,
1116						  bdi_thresh,
1117						  bdi_dirty,
1118						  dirty_ratelimit,
1119						  task_ratelimit,
1120						  pages_dirtied,
1121						  pause,
1122						  start_time);
1123			pause = 1; /* avoid resetting nr_dirtied_pause below */
1124			break;
1125		}
1126		pause = min(pause, max_pause);
1127
1128pause:
1129		trace_balance_dirty_pages(bdi,
1130					  dirty_thresh,
1131					  background_thresh,
1132					  nr_dirty,
1133					  bdi_thresh,
1134					  bdi_dirty,
1135					  dirty_ratelimit,
1136					  task_ratelimit,
1137					  pages_dirtied,
1138					  pause,
1139					  start_time);
1140		__set_current_state(TASK_KILLABLE);
1141		io_schedule_timeout(pause);
1142
1143		/*
1144		 * This is typically equal to (nr_dirty < dirty_thresh) and can
1145		 * also keep "1000+ dd on a slow USB stick" under control.
1146		 */
1147		if (task_ratelimit)
1148			break;
1149
1150		/*
1151		 * In the case of an unresponding NFS server and the NFS dirty
1152		 * pages exceeds dirty_thresh, give the other good bdi's a pipe
1153		 * to go through, so that tasks on them still remain responsive.
1154		 *
1155		 * In theory 1 page is enough to keep the comsumer-producer
1156		 * pipe going: the flusher cleans 1 page => the task dirties 1
1157		 * more page. However bdi_dirty has accounting errors.  So use
1158		 * the larger and more IO friendly bdi_stat_error.
1159		 */
1160		if (bdi_dirty <= bdi_stat_error(bdi))
1161			break;
1162
1163		if (fatal_signal_pending(current))
1164			break;
1165	}
1166
1167	if (!dirty_exceeded && bdi->dirty_exceeded)
1168		bdi->dirty_exceeded = 0;
1169
1170	current->nr_dirtied = 0;
1171	if (pause == 0) { /* in freerun area */
1172		current->nr_dirtied_pause =
1173				dirty_poll_interval(nr_dirty, dirty_thresh);
1174	} else if (pause <= max_pause / 4 &&
1175		   pages_dirtied >= current->nr_dirtied_pause) {
1176		current->nr_dirtied_pause = clamp_val(
1177					dirty_ratelimit * (max_pause / 2) / HZ,
1178					pages_dirtied + pages_dirtied / 8,
1179					pages_dirtied * 4);
1180	} else if (pause >= max_pause) {
1181		current->nr_dirtied_pause = 1 | clamp_val(
1182					dirty_ratelimit * (max_pause / 2) / HZ,
1183					pages_dirtied / 4,
1184					pages_dirtied - pages_dirtied / 8);
1185	}
1186
1187	if (writeback_in_progress(bdi))
1188		return;
1189
1190	/*
1191	 * In laptop mode, we wait until hitting the higher threshold before
1192	 * starting background writeout, and then write out all the way down
1193	 * to the lower threshold.  So slow writers cause minimal disk activity.
1194	 *
1195	 * In normal mode, we start background writeout at the lower
1196	 * background_thresh, to keep the amount of dirty memory low.
1197	 */
1198	if (laptop_mode)
1199		return;
1200
1201	if (nr_reclaimable > background_thresh)
1202		bdi_start_background_writeback(bdi);
1203}
1204
1205void set_page_dirty_balance(struct page *page, int page_mkwrite)
1206{
1207	if (set_page_dirty(page) || page_mkwrite) {
1208		struct address_space *mapping = page_mapping(page);
1209
1210		if (mapping)
1211			balance_dirty_pages_ratelimited(mapping);
1212	}
1213}
1214
1215static DEFINE_PER_CPU(int, bdp_ratelimits);
1216
1217/*
1218 * Normal tasks are throttled by
1219 *	loop {
1220 *		dirty tsk->nr_dirtied_pause pages;
1221 *		take a snap in balance_dirty_pages();
1222 *	}
1223 * However there is a worst case. If every task exit immediately when dirtied
1224 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1225 * called to throttle the page dirties. The solution is to save the not yet
1226 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1227 * randomly into the running tasks. This works well for the above worst case,
1228 * as the new task will pick up and accumulate the old task's leaked dirty
1229 * count and eventually get throttled.
1230 */
1231DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1232
1233/**
1234 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
1235 * @mapping: address_space which was dirtied
1236 * @nr_pages_dirtied: number of pages which the caller has just dirtied
1237 *
1238 * Processes which are dirtying memory should call in here once for each page
1239 * which was newly dirtied.  The function will periodically check the system's
1240 * dirty state and will initiate writeback if needed.
1241 *
1242 * On really big machines, get_writeback_state is expensive, so try to avoid
1243 * calling it too often (ratelimiting).  But once we're over the dirty memory
1244 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1245 * from overshooting the limit by (ratelimit_pages) each.
1246 */
1247void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
1248					unsigned long nr_pages_dirtied)
1249{
1250	struct backing_dev_info *bdi = mapping->backing_dev_info;
1251	int ratelimit;
1252	int *p;
1253
1254	if (!bdi_cap_account_dirty(bdi))
1255		return;
1256
1257	ratelimit = current->nr_dirtied_pause;
1258	if (bdi->dirty_exceeded)
1259		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1260
1261	current->nr_dirtied += nr_pages_dirtied;
1262
1263	preempt_disable();
1264	/*
1265	 * This prevents one CPU to accumulate too many dirtied pages without
1266	 * calling into balance_dirty_pages(), which can happen when there are
1267	 * 1000+ tasks, all of them start dirtying pages at exactly the same
1268	 * time, hence all honoured too large initial task->nr_dirtied_pause.
1269	 */
1270	p =  &__get_cpu_var(bdp_ratelimits);
1271	if (unlikely(current->nr_dirtied >= ratelimit))
1272		*p = 0;
1273	else {
1274		*p += nr_pages_dirtied;
1275		if (unlikely(*p >= ratelimit_pages)) {
1276			*p = 0;
1277			ratelimit = 0;
1278		}
1279	}
1280	/*
1281	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1282	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1283	 * the dirty throttling and livelock other long-run dirtiers.
1284	 */
1285	p = &__get_cpu_var(dirty_throttle_leaks);
1286	if (*p > 0 && current->nr_dirtied < ratelimit) {
1287		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1288		*p -= nr_pages_dirtied;
1289		current->nr_dirtied += nr_pages_dirtied;
1290	}
1291	preempt_enable();
1292
1293	if (unlikely(current->nr_dirtied >= ratelimit))
1294		balance_dirty_pages(mapping, current->nr_dirtied);
1295}
1296EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
1297
1298void throttle_vm_writeout(gfp_t gfp_mask)
1299{
1300	unsigned long background_thresh;
1301	unsigned long dirty_thresh;
1302
1303        for ( ; ; ) {
1304		global_dirty_limits(&background_thresh, &dirty_thresh);
1305
1306                /*
1307                 * Boost the allowable dirty threshold a bit for page
1308                 * allocators so they don't get DoS'ed by heavy writers
1309                 */
1310                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
1311
1312                if (global_page_state(NR_UNSTABLE_NFS) +
1313			global_page_state(NR_WRITEBACK) <= dirty_thresh)
1314                        	break;
1315                congestion_wait(BLK_RW_ASYNC, HZ/10);
1316
1317		/*
1318		 * The caller might hold locks which can prevent IO completion
1319		 * or progress in the filesystem.  So we cannot just sit here
1320		 * waiting for IO to complete.
1321		 */
1322		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
1323			break;
1324        }
1325}
1326
1327/*
1328 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
1329 */
1330int dirty_writeback_centisecs_handler(ctl_table *table, int write,
1331	void __user *buffer, size_t *length, loff_t *ppos)
1332{
1333	proc_dointvec(table, write, buffer, length, ppos);
1334	bdi_arm_supers_timer();
1335	return 0;
1336}
1337
1338#ifdef CONFIG_BLOCK
1339void laptop_mode_timer_fn(unsigned long data)
1340{
1341	struct request_queue *q = (struct request_queue *)data;
1342	int nr_pages = global_page_state(NR_FILE_DIRTY) +
1343		global_page_state(NR_UNSTABLE_NFS);
1344
1345	/*
1346	 * We want to write everything out, not just down to the dirty
1347	 * threshold
1348	 */
1349	if (bdi_has_dirty_io(&q->backing_dev_info))
1350		bdi_start_writeback(&q->backing_dev_info, nr_pages,
1351					WB_REASON_LAPTOP_TIMER);
1352}
1353
1354/*
1355 * We've spun up the disk and we're in laptop mode: schedule writeback
1356 * of all dirty data a few seconds from now.  If the flush is already scheduled
1357 * then push it back - the user is still using the disk.
1358 */
1359void laptop_io_completion(struct backing_dev_info *info)
1360{
1361	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1362}
1363
1364/*
1365 * We're in laptop mode and we've just synced. The sync's writes will have
1366 * caused another writeback to be scheduled by laptop_io_completion.
1367 * Nothing needs to be written back anymore, so we unschedule the writeback.
1368 */
1369void laptop_sync_completion(void)
1370{
1371	struct backing_dev_info *bdi;
1372
1373	rcu_read_lock();
1374
1375	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
1376		del_timer(&bdi->laptop_mode_wb_timer);
1377
1378	rcu_read_unlock();
1379}
1380#endif
1381
1382/*
1383 * If ratelimit_pages is too high then we can get into dirty-data overload
1384 * if a large number of processes all perform writes at the same time.
1385 * If it is too low then SMP machines will call the (expensive)
1386 * get_writeback_state too often.
1387 *
1388 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
1389 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
1390 * thresholds.
1391 */
1392
1393void writeback_set_ratelimit(void)
1394{
1395	unsigned long background_thresh;
1396	unsigned long dirty_thresh;
1397	global_dirty_limits(&background_thresh, &dirty_thresh);
1398	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1399	if (ratelimit_pages < 16)
1400		ratelimit_pages = 16;
1401}
1402
1403static int __cpuinit
1404ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
1405{
1406	writeback_set_ratelimit();
1407	return NOTIFY_DONE;
1408}
1409
1410static struct notifier_block __cpuinitdata ratelimit_nb = {
1411	.notifier_call	= ratelimit_handler,
1412	.next		= NULL,
1413};
1414
1415/*
1416 * Called early on to tune the page writeback dirty limits.
1417 *
1418 * We used to scale dirty pages according to how total memory
1419 * related to pages that could be allocated for buffers (by
1420 * comparing nr_free_buffer_pages() to vm_total_pages.
1421 *
1422 * However, that was when we used "dirty_ratio" to scale with
1423 * all memory, and we don't do that any more. "dirty_ratio"
1424 * is now applied to total non-HIGHPAGE memory (by subtracting
1425 * totalhigh_pages from vm_total_pages), and as such we can't
1426 * get into the old insane situation any more where we had
1427 * large amounts of dirty pages compared to a small amount of
1428 * non-HIGHMEM memory.
1429 *
1430 * But we might still want to scale the dirty_ratio by how
1431 * much memory the box has..
1432 */
1433void __init page_writeback_init(void)
1434{
1435	int shift;
1436
1437	writeback_set_ratelimit();
1438	register_cpu_notifier(&ratelimit_nb);
1439
1440	shift = calc_period_shift();
1441	prop_descriptor_init(&vm_completions, shift);
1442}
1443
1444/**
1445 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
1446 * @mapping: address space structure to write
1447 * @start: starting page index
1448 * @end: ending page index (inclusive)
1449 *
1450 * This function scans the page range from @start to @end (inclusive) and tags
1451 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
1452 * that write_cache_pages (or whoever calls this function) will then use
1453 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
1454 * used to avoid livelocking of writeback by a process steadily creating new
1455 * dirty pages in the file (thus it is important for this function to be quick
1456 * so that it can tag pages faster than a dirtying process can create them).
1457 */
1458/*
1459 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1460 */
1461void tag_pages_for_writeback(struct address_space *mapping,
1462			     pgoff_t start, pgoff_t end)
1463{
1464#define WRITEBACK_TAG_BATCH 4096
1465	unsigned long tagged;
1466
1467	do {
1468		spin_lock_irq(&mapping->tree_lock);
1469		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1470				&start, end, WRITEBACK_TAG_BATCH,
1471				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1472		spin_unlock_irq(&mapping->tree_lock);
1473		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1474		cond_resched();
1475		/* We check 'start' to handle wrapping when end == ~0UL */
1476	} while (tagged >= WRITEBACK_TAG_BATCH && start);
1477}
1478EXPORT_SYMBOL(tag_pages_for_writeback);
1479
1480/**
1481 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1482 * @mapping: address space structure to write
1483 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1484 * @writepage: function called for each page
1485 * @data: data passed to writepage function
1486 *
1487 * If a page is already under I/O, write_cache_pages() skips it, even
1488 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1489 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1490 * and msync() need to guarantee that all the data which was dirty at the time
1491 * the call was made get new I/O started against them.  If wbc->sync_mode is
1492 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1493 * existing IO to complete.
1494 *
1495 * To avoid livelocks (when other process dirties new pages), we first tag
1496 * pages which should be written back with TOWRITE tag and only then start
1497 * writing them. For data-integrity sync we have to be careful so that we do
1498 * not miss some pages (e.g., because some other process has cleared TOWRITE
1499 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
1500 * by the process clearing the DIRTY tag (and submitting the page for IO).
1501 */
1502int write_cache_pages(struct address_space *mapping,
1503		      struct writeback_control *wbc, writepage_t writepage,
1504		      void *data)
1505{
1506	int ret = 0;
1507	int done = 0;
1508	struct pagevec pvec;
1509	int nr_pages;
1510	pgoff_t uninitialized_var(writeback_index);
1511	pgoff_t index;
1512	pgoff_t end;		/* Inclusive */
1513	pgoff_t done_index;
1514	int cycled;
1515	int range_whole = 0;
1516	int tag;
1517
1518	pagevec_init(&pvec, 0);
1519	if (wbc->range_cyclic) {
1520		writeback_index = mapping->writeback_index; /* prev offset */
1521		index = writeback_index;
1522		if (index == 0)
1523			cycled = 1;
1524		else
1525			cycled = 0;
1526		end = -1;
1527	} else {
1528		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1529		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1530		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1531			range_whole = 1;
1532		cycled = 1; /* ignore range_cyclic tests */
1533	}
1534	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1535		tag = PAGECACHE_TAG_TOWRITE;
1536	else
1537		tag = PAGECACHE_TAG_DIRTY;
1538retry:
1539	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1540		tag_pages_for_writeback(mapping, index, end);
1541	done_index = index;
1542	while (!done && (index <= end)) {
1543		int i;
1544
1545		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1546			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1547		if (nr_pages == 0)
1548			break;
1549
1550		for (i = 0; i < nr_pages; i++) {
1551			struct page *page = pvec.pages[i];
1552
1553			/*
1554			 * At this point, the page may be truncated or
1555			 * invalidated (changing page->mapping to NULL), or
1556			 * even swizzled back from swapper_space to tmpfs file
1557			 * mapping. However, page->index will not change
1558			 * because we have a reference on the page.
1559			 */
1560			if (page->index > end) {
1561				/*
1562				 * can't be range_cyclic (1st pass) because
1563				 * end == -1 in that case.
1564				 */
1565				done = 1;
1566				break;
1567			}
1568
1569			done_index = page->index;
1570
1571			lock_page(page);
1572
1573			/*
1574			 * Page truncated or invalidated. We can freely skip it
1575			 * then, even for data integrity operations: the page
1576			 * has disappeared concurrently, so there could be no
1577			 * real expectation of this data interity operation
1578			 * even if there is now a new, dirty page at the same
1579			 * pagecache address.
1580			 */
1581			if (unlikely(page->mapping != mapping)) {
1582continue_unlock:
1583				unlock_page(page);
1584				continue;
1585			}
1586
1587			if (!PageDirty(page)) {
1588				/* someone wrote it for us */
1589				goto continue_unlock;
1590			}
1591
1592			if (PageWriteback(page)) {
1593				if (wbc->sync_mode != WB_SYNC_NONE)
1594					wait_on_page_writeback(page);
1595				else
1596					goto continue_unlock;
1597			}
1598
1599			BUG_ON(PageWriteback(page));
1600			if (!clear_page_dirty_for_io(page))
1601				goto continue_unlock;
1602
1603			trace_wbc_writepage(wbc, mapping->backing_dev_info);
1604			ret = (*writepage)(page, wbc, data);
1605			if (unlikely(ret)) {
1606				if (ret == AOP_WRITEPAGE_ACTIVATE) {
1607					unlock_page(page);
1608					ret = 0;
1609				} else {
1610					/*
1611					 * done_index is set past this page,
1612					 * so media errors will not choke
1613					 * background writeout for the entire
1614					 * file. This has consequences for
1615					 * range_cyclic semantics (ie. it may
1616					 * not be suitable for data integrity
1617					 * writeout).
1618					 */
1619					done_index = page->index + 1;
1620					done = 1;
1621					break;
1622				}
1623			}
1624
1625			/*
1626			 * We stop writing back only if we are not doing
1627			 * integrity sync. In case of integrity sync we have to
1628			 * keep going until we have written all the pages
1629			 * we tagged for writeback prior to entering this loop.
1630			 */
1631			if (--wbc->nr_to_write <= 0 &&
1632			    wbc->sync_mode == WB_SYNC_NONE) {
1633				done = 1;
1634				break;
1635			}
1636		}
1637		pagevec_release(&pvec);
1638		cond_resched();
1639	}
1640	if (!cycled && !done) {
1641		/*
1642		 * range_cyclic:
1643		 * We hit the last page and there is more work to be done: wrap
1644		 * back to the start of the file
1645		 */
1646		cycled = 1;
1647		index = 0;
1648		end = writeback_index - 1;
1649		goto retry;
1650	}
1651	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1652		mapping->writeback_index = done_index;
1653
1654	return ret;
1655}
1656EXPORT_SYMBOL(write_cache_pages);
1657
1658/*
1659 * Function used by generic_writepages to call the real writepage
1660 * function and set the mapping flags on error
1661 */
1662static int __writepage(struct page *page, struct writeback_control *wbc,
1663		       void *data)
1664{
1665	struct address_space *mapping = data;
1666	int ret = mapping->a_ops->writepage(page, wbc);
1667	mapping_set_error(mapping, ret);
1668	return ret;
1669}
1670
1671/**
1672 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1673 * @mapping: address space structure to write
1674 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1675 *
1676 * This is a library function, which implements the writepages()
1677 * address_space_operation.
1678 */
1679int generic_writepages(struct address_space *mapping,
1680		       struct writeback_control *wbc)
1681{
1682	struct blk_plug plug;
1683	int ret;
1684
1685	/* deal with chardevs and other special file */
1686	if (!mapping->a_ops->writepage)
1687		return 0;
1688
1689	blk_start_plug(&plug);
1690	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1691	blk_finish_plug(&plug);
1692	return ret;
1693}
1694
1695EXPORT_SYMBOL(generic_writepages);
1696
1697int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1698{
1699	int ret;
1700
1701	if (wbc->nr_to_write <= 0)
1702		return 0;
1703	if (mapping->a_ops->writepages)
1704		ret = mapping->a_ops->writepages(mapping, wbc);
1705	else
1706		ret = generic_writepages(mapping, wbc);
1707	return ret;
1708}
1709
1710/**
1711 * write_one_page - write out a single page and optionally wait on I/O
1712 * @page: the page to write
1713 * @wait: if true, wait on writeout
1714 *
1715 * The page must be locked by the caller and will be unlocked upon return.
1716 *
1717 * write_one_page() returns a negative error code if I/O failed.
1718 */
1719int write_one_page(struct page *page, int wait)
1720{
1721	struct address_space *mapping = page->mapping;
1722	int ret = 0;
1723	struct writeback_control wbc = {
1724		.sync_mode = WB_SYNC_ALL,
1725		.nr_to_write = 1,
1726	};
1727
1728	BUG_ON(!PageLocked(page));
1729
1730	if (wait)
1731		wait_on_page_writeback(page);
1732
1733	if (clear_page_dirty_for_io(page)) {
1734		page_cache_get(page);
1735		ret = mapping->a_ops->writepage(page, &wbc);
1736		if (ret == 0 && wait) {
1737			wait_on_page_writeback(page);
1738			if (PageError(page))
1739				ret = -EIO;
1740		}
1741		page_cache_release(page);
1742	} else {
1743		unlock_page(page);
1744	}
1745	return ret;
1746}
1747EXPORT_SYMBOL(write_one_page);
1748
1749/*
1750 * For address_spaces which do not use buffers nor write back.
1751 */
1752int __set_page_dirty_no_writeback(struct page *page)
1753{
1754	if (!PageDirty(page))
1755		return !TestSetPageDirty(page);
1756	return 0;
1757}
1758
1759/*
1760 * Helper function for set_page_dirty family.
1761 * NOTE: This relies on being atomic wrt interrupts.
1762 */
1763void account_page_dirtied(struct page *page, struct address_space *mapping)
1764{
1765	if (mapping_cap_account_dirty(mapping)) {
1766		__inc_zone_page_state(page, NR_FILE_DIRTY);
1767		__inc_zone_page_state(page, NR_DIRTIED);
1768		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1769		__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
1770		task_io_account_write(PAGE_CACHE_SIZE);
1771	}
1772}
1773EXPORT_SYMBOL(account_page_dirtied);
1774
1775/*
1776 * Helper function for set_page_writeback family.
1777 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
1778 * wrt interrupts.
1779 */
1780void account_page_writeback(struct page *page)
1781{
1782	inc_zone_page_state(page, NR_WRITEBACK);
1783}
1784EXPORT_SYMBOL(account_page_writeback);
1785
1786/*
1787 * For address_spaces which do not use buffers.  Just tag the page as dirty in
1788 * its radix tree.
1789 *
1790 * This is also used when a single buffer is being dirtied: we want to set the
1791 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
1792 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1793 *
1794 * Most callers have locked the page, which pins the address_space in memory.
1795 * But zap_pte_range() does not lock the page, however in that case the
1796 * mapping is pinned by the vma's ->vm_file reference.
1797 *
1798 * We take care to handle the case where the page was truncated from the
1799 * mapping by re-checking page_mapping() inside tree_lock.
1800 */
1801int __set_page_dirty_nobuffers(struct page *page)
1802{
1803	if (!TestSetPageDirty(page)) {
1804		struct address_space *mapping = page_mapping(page);
1805		struct address_space *mapping2;
1806
1807		if (!mapping)
1808			return 1;
1809
1810		spin_lock_irq(&mapping->tree_lock);
1811		mapping2 = page_mapping(page);
1812		if (mapping2) { /* Race with truncate? */
1813			BUG_ON(mapping2 != mapping);
1814			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1815			account_page_dirtied(page, mapping);
1816			radix_tree_tag_set(&mapping->page_tree,
1817				page_index(page), PAGECACHE_TAG_DIRTY);
1818		}
1819		spin_unlock_irq(&mapping->tree_lock);
1820		if (mapping->host) {
1821			/* !PageAnon && !swapper_space */
1822			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1823		}
1824		return 1;
1825	}
1826	return 0;
1827}
1828EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1829
1830/*
1831 * When a writepage implementation decides that it doesn't want to write this
1832 * page for some reason, it should redirty the locked page via
1833 * redirty_page_for_writepage() and it should then unlock the page and return 0
1834 */
1835int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1836{
1837	wbc->pages_skipped++;
1838	return __set_page_dirty_nobuffers(page);
1839}
1840EXPORT_SYMBOL(redirty_page_for_writepage);
1841
1842/*
1843 * Dirty a page.
1844 *
1845 * For pages with a mapping this should be done under the page lock
1846 * for the benefit of asynchronous memory errors who prefer a consistent
1847 * dirty state. This rule can be broken in some special cases,
1848 * but should be better not to.
1849 *
1850 * If the mapping doesn't provide a set_page_dirty a_op, then
1851 * just fall through and assume that it wants buffer_heads.
1852 */
1853int set_page_dirty(struct page *page)
1854{
1855	struct address_space *mapping = page_mapping(page);
1856
1857	if (likely(mapping)) {
1858		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1859		/*
1860		 * readahead/lru_deactivate_page could remain
1861		 * PG_readahead/PG_reclaim due to race with end_page_writeback
1862		 * About readahead, if the page is written, the flags would be
1863		 * reset. So no problem.
1864		 * About lru_deactivate_page, if the page is redirty, the flag
1865		 * will be reset. So no problem. but if the page is used by readahead
1866		 * it will confuse readahead and make it restart the size rampup
1867		 * process. But it's a trivial problem.
1868		 */
1869		ClearPageReclaim(page);
1870#ifdef CONFIG_BLOCK
1871		if (!spd)
1872			spd = __set_page_dirty_buffers;
1873#endif
1874		return (*spd)(page);
1875	}
1876	if (!PageDirty(page)) {
1877		if (!TestSetPageDirty(page))
1878			return 1;
1879	}
1880	return 0;
1881}
1882EXPORT_SYMBOL(set_page_dirty);
1883
1884/*
1885 * set_page_dirty() is racy if the caller has no reference against
1886 * page->mapping->host, and if the page is unlocked.  This is because another
1887 * CPU could truncate the page off the mapping and then free the mapping.
1888 *
1889 * Usually, the page _is_ locked, or the caller is a user-space process which
1890 * holds a reference on the inode by having an open file.
1891 *
1892 * In other cases, the page should be locked before running set_page_dirty().
1893 */
1894int set_page_dirty_lock(struct page *page)
1895{
1896	int ret;
1897
1898	lock_page(page);
1899	ret = set_page_dirty(page);
1900	unlock_page(page);
1901	return ret;
1902}
1903EXPORT_SYMBOL(set_page_dirty_lock);
1904
1905/*
1906 * Clear a page's dirty flag, while caring for dirty memory accounting.
1907 * Returns true if the page was previously dirty.
1908 *
1909 * This is for preparing to put the page under writeout.  We leave the page
1910 * tagged as dirty in the radix tree so that a concurrent write-for-sync
1911 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
1912 * implementation will run either set_page_writeback() or set_page_dirty(),
1913 * at which stage we bring the page's dirty flag and radix-tree dirty tag
1914 * back into sync.
1915 *
1916 * This incoherency between the page's dirty flag and radix-tree tag is
1917 * unfortunate, but it only exists while the page is locked.
1918 */
1919int clear_page_dirty_for_io(struct page *page)
1920{
1921	struct address_space *mapping = page_mapping(page);
1922
1923	BUG_ON(!PageLocked(page));
1924
1925	if (mapping && mapping_cap_account_dirty(mapping)) {
1926		/*
1927		 * Yes, Virginia, this is indeed insane.
1928		 *
1929		 * We use this sequence to make sure that
1930		 *  (a) we account for dirty stats properly
1931		 *  (b) we tell the low-level filesystem to
1932		 *      mark the whole page dirty if it was
1933		 *      dirty in a pagetable. Only to then
1934		 *  (c) clean the page again and return 1 to
1935		 *      cause the writeback.
1936		 *
1937		 * This way we avoid all nasty races with the
1938		 * dirty bit in multiple places and clearing
1939		 * them concurrently from different threads.
1940		 *
1941		 * Note! Normally the "set_page_dirty(page)"
1942		 * has no effect on the actual dirty bit - since
1943		 * that will already usually be set. But we
1944		 * need the side effects, and it can help us
1945		 * avoid races.
1946		 *
1947		 * We basically use the page "master dirty bit"
1948		 * as a serialization point for all the different
1949		 * threads doing their things.
1950		 */
1951		if (page_mkclean(page))
1952			set_page_dirty(page);
1953		/*
1954		 * We carefully synchronise fault handlers against
1955		 * installing a dirty pte and marking the page dirty
1956		 * at this point. We do this by having them hold the
1957		 * page lock at some point after installing their
1958		 * pte, but before marking the page dirty.
1959		 * Pages are always locked coming in here, so we get
1960		 * the desired exclusion. See mm/memory.c:do_wp_page()
1961		 * for more comments.
1962		 */
1963		if (TestClearPageDirty(page)) {
1964			dec_zone_page_state(page, NR_FILE_DIRTY);
1965			dec_bdi_stat(mapping->backing_dev_info,
1966					BDI_RECLAIMABLE);
1967			return 1;
1968		}
1969		return 0;
1970	}
1971	return TestClearPageDirty(page);
1972}
1973EXPORT_SYMBOL(clear_page_dirty_for_io);
1974
1975int test_clear_page_writeback(struct page *page)
1976{
1977	struct address_space *mapping = page_mapping(page);
1978	int ret;
1979
1980	if (mapping) {
1981		struct backing_dev_info *bdi = mapping->backing_dev_info;
1982		unsigned long flags;
1983
1984		spin_lock_irqsave(&mapping->tree_lock, flags);
1985		ret = TestClearPageWriteback(page);
1986		if (ret) {
1987			radix_tree_tag_clear(&mapping->page_tree,
1988						page_index(page),
1989						PAGECACHE_TAG_WRITEBACK);
1990			if (bdi_cap_account_writeback(bdi)) {
1991				__dec_bdi_stat(bdi, BDI_WRITEBACK);
1992				__bdi_writeout_inc(bdi);
1993			}
1994		}
1995		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1996	} else {
1997		ret = TestClearPageWriteback(page);
1998	}
1999	if (ret) {
2000		dec_zone_page_state(page, NR_WRITEBACK);
2001		inc_zone_page_state(page, NR_WRITTEN);
2002	}
2003	return ret;
2004}
2005
2006int test_set_page_writeback(struct page *page)
2007{
2008	struct address_space *mapping = page_mapping(page);
2009	int ret;
2010
2011	if (mapping) {
2012		struct backing_dev_info *bdi = mapping->backing_dev_info;
2013		unsigned long flags;
2014
2015		spin_lock_irqsave(&mapping->tree_lock, flags);
2016		ret = TestSetPageWriteback(page);
2017		if (!ret) {
2018			radix_tree_tag_set(&mapping->page_tree,
2019						page_index(page),
2020						PAGECACHE_TAG_WRITEBACK);
2021			if (bdi_cap_account_writeback(bdi))
2022				__inc_bdi_stat(bdi, BDI_WRITEBACK);
2023		}
2024		if (!PageDirty(page))
2025			radix_tree_tag_clear(&mapping->page_tree,
2026						page_index(page),
2027						PAGECACHE_TAG_DIRTY);
2028		radix_tree_tag_clear(&mapping->page_tree,
2029				     page_index(page),
2030				     PAGECACHE_TAG_TOWRITE);
2031		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2032	} else {
2033		ret = TestSetPageWriteback(page);
2034	}
2035	if (!ret)
2036		account_page_writeback(page);
2037	return ret;
2038
2039}
2040EXPORT_SYMBOL(test_set_page_writeback);
2041
2042/*
2043 * Return true if any of the pages in the mapping are marked with the
2044 * passed tag.
2045 */
2046int mapping_tagged(struct address_space *mapping, int tag)
2047{
2048	return radix_tree_tagged(&mapping->page_tree, tag);
2049}
2050EXPORT_SYMBOL(mapping_tagged);
2051