page-writeback.c revision 1cf6e7d83bf334cc5916137862c920a97aabc018
1/*
2 * mm/page-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 * Contains functions related to writing back dirty pages at the
8 * address_space level.
9 *
10 * 10Apr2002	Andrew Morton
11 *		Initial version
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
25#include <linux/task_io_accounting_ops.h>
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
28#include <linux/rmap.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
35#include <linux/buffer_head.h>
36#include <linux/pagevec.h>
37
38/*
39 * The maximum number of pages to writeout in a single bdflush/kupdate
40 * operation.  We do this so we don't hold I_SYNC against an inode for
41 * enormous amounts of time, which would block a userspace task which has
42 * been forced to throttle against that inode.  Also, the code reevaluates
43 * the dirty each time it has written this many pages.
44 */
45#define MAX_WRITEBACK_PAGES	1024
46
47/*
48 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
49 * will look to see if it needs to force writeback or throttling.
50 */
51static long ratelimit_pages = 32;
52
53/*
54 * When balance_dirty_pages decides that the caller needs to perform some
55 * non-background writeback, this is how many pages it will attempt to write.
56 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
57 * large amounts of I/O are submitted.
58 */
59static inline long sync_writeback_pages(void)
60{
61	return ratelimit_pages + ratelimit_pages / 2;
62}
63
64/* The following parameters are exported via /proc/sys/vm */
65
66/*
67 * Start background writeback (via pdflush) at this percentage
68 */
69int dirty_background_ratio = 5;
70
71/*
72 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
73 * dirty_background_ratio * the amount of dirtyable memory
74 */
75unsigned long dirty_background_bytes;
76
77/*
78 * free highmem will not be subtracted from the total free memory
79 * for calculating free ratios if vm_highmem_is_dirtyable is true
80 */
81int vm_highmem_is_dirtyable;
82
83/*
84 * The generator of dirty data starts writeback at this percentage
85 */
86int vm_dirty_ratio = 10;
87
88/*
89 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
90 * vm_dirty_ratio * the amount of dirtyable memory
91 */
92unsigned long vm_dirty_bytes;
93
94/*
95 * The interval between `kupdate'-style writebacks, in jiffies
96 */
97int dirty_writeback_interval = 5 * HZ;
98
99/*
100 * The longest number of jiffies for which data is allowed to remain dirty
101 */
102int dirty_expire_interval = 30 * HZ;
103
104/*
105 * Flag that makes the machine dump writes/reads and block dirtyings.
106 */
107int block_dump;
108
109/*
110 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
111 * a full sync is triggered after this time elapses without any disk activity.
112 */
113int laptop_mode;
114
115EXPORT_SYMBOL(laptop_mode);
116
117/* End of sysctl-exported parameters */
118
119
120static void background_writeout(unsigned long _min_pages);
121
122/*
123 * Scale the writeback cache size proportional to the relative writeout speeds.
124 *
125 * We do this by keeping a floating proportion between BDIs, based on page
126 * writeback completions [end_page_writeback()]. Those devices that write out
127 * pages fastest will get the larger share, while the slower will get a smaller
128 * share.
129 *
130 * We use page writeout completions because we are interested in getting rid of
131 * dirty pages. Having them written out is the primary goal.
132 *
133 * We introduce a concept of time, a period over which we measure these events,
134 * because demand can/will vary over time. The length of this period itself is
135 * measured in page writeback completions.
136 *
137 */
138static struct prop_descriptor vm_completions;
139static struct prop_descriptor vm_dirties;
140
141/*
142 * couple the period to the dirty_ratio:
143 *
144 *   period/2 ~ roundup_pow_of_two(dirty limit)
145 */
146static int calc_period_shift(void)
147{
148	unsigned long dirty_total;
149
150	if (vm_dirty_bytes)
151		dirty_total = vm_dirty_bytes / PAGE_SIZE;
152	else
153		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
154				100;
155	return 2 + ilog2(dirty_total - 1);
156}
157
158/*
159 * update the period when the dirty threshold changes.
160 */
161static void update_completion_period(void)
162{
163	int shift = calc_period_shift();
164	prop_change_shift(&vm_completions, shift);
165	prop_change_shift(&vm_dirties, shift);
166}
167
168int dirty_background_ratio_handler(struct ctl_table *table, int write,
169		struct file *filp, void __user *buffer, size_t *lenp,
170		loff_t *ppos)
171{
172	int ret;
173
174	ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
175	if (ret == 0 && write)
176		dirty_background_bytes = 0;
177	return ret;
178}
179
180int dirty_background_bytes_handler(struct ctl_table *table, int write,
181		struct file *filp, void __user *buffer, size_t *lenp,
182		loff_t *ppos)
183{
184	int ret;
185
186	ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
187	if (ret == 0 && write)
188		dirty_background_ratio = 0;
189	return ret;
190}
191
192int dirty_ratio_handler(struct ctl_table *table, int write,
193		struct file *filp, void __user *buffer, size_t *lenp,
194		loff_t *ppos)
195{
196	int old_ratio = vm_dirty_ratio;
197	int ret;
198
199	ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
200	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
201		update_completion_period();
202		vm_dirty_bytes = 0;
203	}
204	return ret;
205}
206
207
208int dirty_bytes_handler(struct ctl_table *table, int write,
209		struct file *filp, void __user *buffer, size_t *lenp,
210		loff_t *ppos)
211{
212	unsigned long old_bytes = vm_dirty_bytes;
213	int ret;
214
215	ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
216	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
217		update_completion_period();
218		vm_dirty_ratio = 0;
219	}
220	return ret;
221}
222
223/*
224 * Increment the BDI's writeout completion count and the global writeout
225 * completion count. Called from test_clear_page_writeback().
226 */
227static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
228{
229	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
230			      bdi->max_prop_frac);
231}
232
233void bdi_writeout_inc(struct backing_dev_info *bdi)
234{
235	unsigned long flags;
236
237	local_irq_save(flags);
238	__bdi_writeout_inc(bdi);
239	local_irq_restore(flags);
240}
241EXPORT_SYMBOL_GPL(bdi_writeout_inc);
242
243void task_dirty_inc(struct task_struct *tsk)
244{
245	prop_inc_single(&vm_dirties, &tsk->dirties);
246}
247
248/*
249 * Obtain an accurate fraction of the BDI's portion.
250 */
251static void bdi_writeout_fraction(struct backing_dev_info *bdi,
252		long *numerator, long *denominator)
253{
254	if (bdi_cap_writeback_dirty(bdi)) {
255		prop_fraction_percpu(&vm_completions, &bdi->completions,
256				numerator, denominator);
257	} else {
258		*numerator = 0;
259		*denominator = 1;
260	}
261}
262
263/*
264 * Clip the earned share of dirty pages to that which is actually available.
265 * This avoids exceeding the total dirty_limit when the floating averages
266 * fluctuate too quickly.
267 */
268static void
269clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
270{
271	long avail_dirty;
272
273	avail_dirty = dirty -
274		(global_page_state(NR_FILE_DIRTY) +
275		 global_page_state(NR_WRITEBACK) +
276		 global_page_state(NR_UNSTABLE_NFS) +
277		 global_page_state(NR_WRITEBACK_TEMP));
278
279	if (avail_dirty < 0)
280		avail_dirty = 0;
281
282	avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
283		bdi_stat(bdi, BDI_WRITEBACK);
284
285	*pbdi_dirty = min(*pbdi_dirty, avail_dirty);
286}
287
288static inline void task_dirties_fraction(struct task_struct *tsk,
289		long *numerator, long *denominator)
290{
291	prop_fraction_single(&vm_dirties, &tsk->dirties,
292				numerator, denominator);
293}
294
295/*
296 * scale the dirty limit
297 *
298 * task specific dirty limit:
299 *
300 *   dirty -= (dirty/8) * p_{t}
301 */
302static void task_dirty_limit(struct task_struct *tsk, long *pdirty)
303{
304	long numerator, denominator;
305	long dirty = *pdirty;
306	u64 inv = dirty >> 3;
307
308	task_dirties_fraction(tsk, &numerator, &denominator);
309	inv *= numerator;
310	do_div(inv, denominator);
311
312	dirty -= inv;
313	if (dirty < *pdirty/2)
314		dirty = *pdirty/2;
315
316	*pdirty = dirty;
317}
318
319/*
320 *
321 */
322static DEFINE_SPINLOCK(bdi_lock);
323static unsigned int bdi_min_ratio;
324
325int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
326{
327	int ret = 0;
328	unsigned long flags;
329
330	spin_lock_irqsave(&bdi_lock, flags);
331	if (min_ratio > bdi->max_ratio) {
332		ret = -EINVAL;
333	} else {
334		min_ratio -= bdi->min_ratio;
335		if (bdi_min_ratio + min_ratio < 100) {
336			bdi_min_ratio += min_ratio;
337			bdi->min_ratio += min_ratio;
338		} else {
339			ret = -EINVAL;
340		}
341	}
342	spin_unlock_irqrestore(&bdi_lock, flags);
343
344	return ret;
345}
346
347int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
348{
349	unsigned long flags;
350	int ret = 0;
351
352	if (max_ratio > 100)
353		return -EINVAL;
354
355	spin_lock_irqsave(&bdi_lock, flags);
356	if (bdi->min_ratio > max_ratio) {
357		ret = -EINVAL;
358	} else {
359		bdi->max_ratio = max_ratio;
360		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
361	}
362	spin_unlock_irqrestore(&bdi_lock, flags);
363
364	return ret;
365}
366EXPORT_SYMBOL(bdi_set_max_ratio);
367
368/*
369 * Work out the current dirty-memory clamping and background writeout
370 * thresholds.
371 *
372 * The main aim here is to lower them aggressively if there is a lot of mapped
373 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
374 * pages.  It is better to clamp down on writers than to start swapping, and
375 * performing lots of scanning.
376 *
377 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
378 *
379 * We don't permit the clamping level to fall below 5% - that is getting rather
380 * excessive.
381 *
382 * We make sure that the background writeout level is below the adjusted
383 * clamping level.
384 */
385
386static unsigned long highmem_dirtyable_memory(unsigned long total)
387{
388#ifdef CONFIG_HIGHMEM
389	int node;
390	unsigned long x = 0;
391
392	for_each_node_state(node, N_HIGH_MEMORY) {
393		struct zone *z =
394			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
395
396		x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
397	}
398	/*
399	 * Make sure that the number of highmem pages is never larger
400	 * than the number of the total dirtyable memory. This can only
401	 * occur in very strange VM situations but we want to make sure
402	 * that this does not occur.
403	 */
404	return min(x, total);
405#else
406	return 0;
407#endif
408}
409
410/**
411 * determine_dirtyable_memory - amount of memory that may be used
412 *
413 * Returns the numebr of pages that can currently be freed and used
414 * by the kernel for direct mappings.
415 */
416unsigned long determine_dirtyable_memory(void)
417{
418	unsigned long x;
419
420	x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
421
422	if (!vm_highmem_is_dirtyable)
423		x -= highmem_dirtyable_memory(x);
424
425	return x + 1;	/* Ensure that we never return 0 */
426}
427
428void
429get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
430		 unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
431{
432	unsigned long background;
433	unsigned long dirty;
434	unsigned long available_memory = determine_dirtyable_memory();
435	struct task_struct *tsk;
436
437	if (vm_dirty_bytes)
438		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
439	else {
440		int dirty_ratio;
441
442		dirty_ratio = vm_dirty_ratio;
443		if (dirty_ratio < 5)
444			dirty_ratio = 5;
445		dirty = (dirty_ratio * available_memory) / 100;
446	}
447
448	if (dirty_background_bytes)
449		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
450	else
451		background = (dirty_background_ratio * available_memory) / 100;
452
453	if (background >= dirty)
454		background = dirty / 2;
455	tsk = current;
456	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
457		background += background / 4;
458		dirty += dirty / 4;
459	}
460	*pbackground = background;
461	*pdirty = dirty;
462
463	if (bdi) {
464		u64 bdi_dirty;
465		long numerator, denominator;
466
467		/*
468		 * Calculate this BDI's share of the dirty ratio.
469		 */
470		bdi_writeout_fraction(bdi, &numerator, &denominator);
471
472		bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
473		bdi_dirty *= numerator;
474		do_div(bdi_dirty, denominator);
475		bdi_dirty += (dirty * bdi->min_ratio) / 100;
476		if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
477			bdi_dirty = dirty * bdi->max_ratio / 100;
478
479		*pbdi_dirty = bdi_dirty;
480		clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
481		task_dirty_limit(current, pbdi_dirty);
482	}
483}
484
485/*
486 * balance_dirty_pages() must be called by processes which are generating dirty
487 * data.  It looks at the number of dirty pages in the machine and will force
488 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
489 * If we're over `background_thresh' then pdflush is woken to perform some
490 * writeout.
491 */
492static void balance_dirty_pages(struct address_space *mapping)
493{
494	long nr_reclaimable, bdi_nr_reclaimable;
495	long nr_writeback, bdi_nr_writeback;
496	unsigned long background_thresh;
497	unsigned long dirty_thresh;
498	unsigned long bdi_thresh;
499	unsigned long pages_written = 0;
500	unsigned long write_chunk = sync_writeback_pages();
501
502	struct backing_dev_info *bdi = mapping->backing_dev_info;
503
504	for (;;) {
505		struct writeback_control wbc = {
506			.bdi		= bdi,
507			.sync_mode	= WB_SYNC_NONE,
508			.older_than_this = NULL,
509			.nr_to_write	= write_chunk,
510			.range_cyclic	= 1,
511		};
512
513		get_dirty_limits(&background_thresh, &dirty_thresh,
514				&bdi_thresh, bdi);
515
516		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
517					global_page_state(NR_UNSTABLE_NFS);
518		nr_writeback = global_page_state(NR_WRITEBACK);
519
520		bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
521		bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
522
523		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
524			break;
525
526		/*
527		 * Throttle it only when the background writeback cannot
528		 * catch-up. This avoids (excessively) small writeouts
529		 * when the bdi limits are ramping up.
530		 */
531		if (nr_reclaimable + nr_writeback <
532				(background_thresh + dirty_thresh) / 2)
533			break;
534
535		if (!bdi->dirty_exceeded)
536			bdi->dirty_exceeded = 1;
537
538		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
539		 * Unstable writes are a feature of certain networked
540		 * filesystems (i.e. NFS) in which data may have been
541		 * written to the server's write cache, but has not yet
542		 * been flushed to permanent storage.
543		 */
544		if (bdi_nr_reclaimable) {
545			writeback_inodes(&wbc);
546			pages_written += write_chunk - wbc.nr_to_write;
547			get_dirty_limits(&background_thresh, &dirty_thresh,
548				       &bdi_thresh, bdi);
549		}
550
551		/*
552		 * In order to avoid the stacked BDI deadlock we need
553		 * to ensure we accurately count the 'dirty' pages when
554		 * the threshold is low.
555		 *
556		 * Otherwise it would be possible to get thresh+n pages
557		 * reported dirty, even though there are thresh-m pages
558		 * actually dirty; with m+n sitting in the percpu
559		 * deltas.
560		 */
561		if (bdi_thresh < 2*bdi_stat_error(bdi)) {
562			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
563			bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
564		} else if (bdi_nr_reclaimable) {
565			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
566			bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
567		}
568
569		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
570			break;
571		if (pages_written >= write_chunk)
572			break;		/* We've done our duty */
573
574		congestion_wait(WRITE, HZ/10);
575	}
576
577	if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
578			bdi->dirty_exceeded)
579		bdi->dirty_exceeded = 0;
580
581	if (writeback_in_progress(bdi))
582		return;		/* pdflush is already working this queue */
583
584	/*
585	 * In laptop mode, we wait until hitting the higher threshold before
586	 * starting background writeout, and then write out all the way down
587	 * to the lower threshold.  So slow writers cause minimal disk activity.
588	 *
589	 * In normal mode, we start background writeout at the lower
590	 * background_thresh, to keep the amount of dirty memory low.
591	 */
592	if ((laptop_mode && pages_written) ||
593			(!laptop_mode && (global_page_state(NR_FILE_DIRTY)
594					  + global_page_state(NR_UNSTABLE_NFS)
595					  > background_thresh)))
596		pdflush_operation(background_writeout, 0);
597}
598
599void set_page_dirty_balance(struct page *page, int page_mkwrite)
600{
601	if (set_page_dirty(page) || page_mkwrite) {
602		struct address_space *mapping = page_mapping(page);
603
604		if (mapping)
605			balance_dirty_pages_ratelimited(mapping);
606	}
607}
608
609/**
610 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
611 * @mapping: address_space which was dirtied
612 * @nr_pages_dirtied: number of pages which the caller has just dirtied
613 *
614 * Processes which are dirtying memory should call in here once for each page
615 * which was newly dirtied.  The function will periodically check the system's
616 * dirty state and will initiate writeback if needed.
617 *
618 * On really big machines, get_writeback_state is expensive, so try to avoid
619 * calling it too often (ratelimiting).  But once we're over the dirty memory
620 * limit we decrease the ratelimiting by a lot, to prevent individual processes
621 * from overshooting the limit by (ratelimit_pages) each.
622 */
623void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
624					unsigned long nr_pages_dirtied)
625{
626	static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
627	unsigned long ratelimit;
628	unsigned long *p;
629
630	ratelimit = ratelimit_pages;
631	if (mapping->backing_dev_info->dirty_exceeded)
632		ratelimit = 8;
633
634	/*
635	 * Check the rate limiting. Also, we do not want to throttle real-time
636	 * tasks in balance_dirty_pages(). Period.
637	 */
638	preempt_disable();
639	p =  &__get_cpu_var(ratelimits);
640	*p += nr_pages_dirtied;
641	if (unlikely(*p >= ratelimit)) {
642		*p = 0;
643		preempt_enable();
644		balance_dirty_pages(mapping);
645		return;
646	}
647	preempt_enable();
648}
649EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
650
651void throttle_vm_writeout(gfp_t gfp_mask)
652{
653	unsigned long background_thresh;
654	unsigned long dirty_thresh;
655
656        for ( ; ; ) {
657		get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
658
659                /*
660                 * Boost the allowable dirty threshold a bit for page
661                 * allocators so they don't get DoS'ed by heavy writers
662                 */
663                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
664
665                if (global_page_state(NR_UNSTABLE_NFS) +
666			global_page_state(NR_WRITEBACK) <= dirty_thresh)
667                        	break;
668                congestion_wait(WRITE, HZ/10);
669
670		/*
671		 * The caller might hold locks which can prevent IO completion
672		 * or progress in the filesystem.  So we cannot just sit here
673		 * waiting for IO to complete.
674		 */
675		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
676			break;
677        }
678}
679
680/*
681 * writeback at least _min_pages, and keep writing until the amount of dirty
682 * memory is less than the background threshold, or until we're all clean.
683 */
684static void background_writeout(unsigned long _min_pages)
685{
686	long min_pages = _min_pages;
687	struct writeback_control wbc = {
688		.bdi		= NULL,
689		.sync_mode	= WB_SYNC_NONE,
690		.older_than_this = NULL,
691		.nr_to_write	= 0,
692		.nonblocking	= 1,
693		.range_cyclic	= 1,
694	};
695
696	for ( ; ; ) {
697		unsigned long background_thresh;
698		unsigned long dirty_thresh;
699
700		get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
701		if (global_page_state(NR_FILE_DIRTY) +
702			global_page_state(NR_UNSTABLE_NFS) < background_thresh
703				&& min_pages <= 0)
704			break;
705		wbc.more_io = 0;
706		wbc.encountered_congestion = 0;
707		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
708		wbc.pages_skipped = 0;
709		writeback_inodes(&wbc);
710		min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
711		if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
712			/* Wrote less than expected */
713			if (wbc.encountered_congestion || wbc.more_io)
714				congestion_wait(WRITE, HZ/10);
715			else
716				break;
717		}
718	}
719}
720
721/*
722 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
723 * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
724 * -1 if all pdflush threads were busy.
725 */
726int wakeup_pdflush(long nr_pages)
727{
728	if (nr_pages == 0)
729		nr_pages = global_page_state(NR_FILE_DIRTY) +
730				global_page_state(NR_UNSTABLE_NFS);
731	return pdflush_operation(background_writeout, nr_pages);
732}
733
734static void wb_timer_fn(unsigned long unused);
735static void laptop_timer_fn(unsigned long unused);
736
737static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
738static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
739
740/*
741 * Periodic writeback of "old" data.
742 *
743 * Define "old": the first time one of an inode's pages is dirtied, we mark the
744 * dirtying-time in the inode's address_space.  So this periodic writeback code
745 * just walks the superblock inode list, writing back any inodes which are
746 * older than a specific point in time.
747 *
748 * Try to run once per dirty_writeback_interval.  But if a writeback event
749 * takes longer than a dirty_writeback_interval interval, then leave a
750 * one-second gap.
751 *
752 * older_than_this takes precedence over nr_to_write.  So we'll only write back
753 * all dirty pages if they are all attached to "old" mappings.
754 */
755static void wb_kupdate(unsigned long arg)
756{
757	unsigned long oldest_jif;
758	unsigned long start_jif;
759	unsigned long next_jif;
760	long nr_to_write;
761	struct writeback_control wbc = {
762		.bdi		= NULL,
763		.sync_mode	= WB_SYNC_NONE,
764		.older_than_this = &oldest_jif,
765		.nr_to_write	= 0,
766		.nonblocking	= 1,
767		.for_kupdate	= 1,
768		.range_cyclic	= 1,
769	};
770
771	sync_supers();
772
773	oldest_jif = jiffies - dirty_expire_interval;
774	start_jif = jiffies;
775	next_jif = start_jif + dirty_writeback_interval;
776	nr_to_write = global_page_state(NR_FILE_DIRTY) +
777			global_page_state(NR_UNSTABLE_NFS) +
778			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
779	while (nr_to_write > 0) {
780		wbc.more_io = 0;
781		wbc.encountered_congestion = 0;
782		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
783		writeback_inodes(&wbc);
784		if (wbc.nr_to_write > 0) {
785			if (wbc.encountered_congestion || wbc.more_io)
786				congestion_wait(WRITE, HZ/10);
787			else
788				break;	/* All the old data is written */
789		}
790		nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
791	}
792	if (time_before(next_jif, jiffies + HZ))
793		next_jif = jiffies + HZ;
794	if (dirty_writeback_interval)
795		mod_timer(&wb_timer, next_jif);
796}
797
798/*
799 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
800 */
801int dirty_writeback_centisecs_handler(ctl_table *table, int write,
802	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
803{
804	proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
805	if (dirty_writeback_interval)
806		mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
807	else
808		del_timer(&wb_timer);
809	return 0;
810}
811
812static void wb_timer_fn(unsigned long unused)
813{
814	if (pdflush_operation(wb_kupdate, 0) < 0)
815		mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
816}
817
818static void laptop_flush(unsigned long unused)
819{
820	sys_sync();
821}
822
823static void laptop_timer_fn(unsigned long unused)
824{
825	pdflush_operation(laptop_flush, 0);
826}
827
828/*
829 * We've spun up the disk and we're in laptop mode: schedule writeback
830 * of all dirty data a few seconds from now.  If the flush is already scheduled
831 * then push it back - the user is still using the disk.
832 */
833void laptop_io_completion(void)
834{
835	mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
836}
837
838/*
839 * We're in laptop mode and we've just synced. The sync's writes will have
840 * caused another writeback to be scheduled by laptop_io_completion.
841 * Nothing needs to be written back anymore, so we unschedule the writeback.
842 */
843void laptop_sync_completion(void)
844{
845	del_timer(&laptop_mode_wb_timer);
846}
847
848/*
849 * If ratelimit_pages is too high then we can get into dirty-data overload
850 * if a large number of processes all perform writes at the same time.
851 * If it is too low then SMP machines will call the (expensive)
852 * get_writeback_state too often.
853 *
854 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
855 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
856 * thresholds before writeback cuts in.
857 *
858 * But the limit should not be set too high.  Because it also controls the
859 * amount of memory which the balance_dirty_pages() caller has to write back.
860 * If this is too large then the caller will block on the IO queue all the
861 * time.  So limit it to four megabytes - the balance_dirty_pages() caller
862 * will write six megabyte chunks, max.
863 */
864
865void writeback_set_ratelimit(void)
866{
867	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
868	if (ratelimit_pages < 16)
869		ratelimit_pages = 16;
870	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
871		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
872}
873
874static int __cpuinit
875ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
876{
877	writeback_set_ratelimit();
878	return NOTIFY_DONE;
879}
880
881static struct notifier_block __cpuinitdata ratelimit_nb = {
882	.notifier_call	= ratelimit_handler,
883	.next		= NULL,
884};
885
886/*
887 * Called early on to tune the page writeback dirty limits.
888 *
889 * We used to scale dirty pages according to how total memory
890 * related to pages that could be allocated for buffers (by
891 * comparing nr_free_buffer_pages() to vm_total_pages.
892 *
893 * However, that was when we used "dirty_ratio" to scale with
894 * all memory, and we don't do that any more. "dirty_ratio"
895 * is now applied to total non-HIGHPAGE memory (by subtracting
896 * totalhigh_pages from vm_total_pages), and as such we can't
897 * get into the old insane situation any more where we had
898 * large amounts of dirty pages compared to a small amount of
899 * non-HIGHMEM memory.
900 *
901 * But we might still want to scale the dirty_ratio by how
902 * much memory the box has..
903 */
904void __init page_writeback_init(void)
905{
906	int shift;
907
908	mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
909	writeback_set_ratelimit();
910	register_cpu_notifier(&ratelimit_nb);
911
912	shift = calc_period_shift();
913	prop_descriptor_init(&vm_completions, shift);
914	prop_descriptor_init(&vm_dirties, shift);
915}
916
917/**
918 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
919 * @mapping: address space structure to write
920 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
921 * @writepage: function called for each page
922 * @data: data passed to writepage function
923 *
924 * If a page is already under I/O, write_cache_pages() skips it, even
925 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
926 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
927 * and msync() need to guarantee that all the data which was dirty at the time
928 * the call was made get new I/O started against them.  If wbc->sync_mode is
929 * WB_SYNC_ALL then we were called for data integrity and we must wait for
930 * existing IO to complete.
931 */
932int write_cache_pages(struct address_space *mapping,
933		      struct writeback_control *wbc, writepage_t writepage,
934		      void *data)
935{
936	struct backing_dev_info *bdi = mapping->backing_dev_info;
937	int ret = 0;
938	int done = 0;
939	struct pagevec pvec;
940	int nr_pages;
941	pgoff_t uninitialized_var(writeback_index);
942	pgoff_t index;
943	pgoff_t end;		/* Inclusive */
944	pgoff_t done_index;
945	int cycled;
946	int range_whole = 0;
947	long nr_to_write = wbc->nr_to_write;
948
949	if (wbc->nonblocking && bdi_write_congested(bdi)) {
950		wbc->encountered_congestion = 1;
951		return 0;
952	}
953
954	pagevec_init(&pvec, 0);
955	if (wbc->range_cyclic) {
956		writeback_index = mapping->writeback_index; /* prev offset */
957		index = writeback_index;
958		if (index == 0)
959			cycled = 1;
960		else
961			cycled = 0;
962		end = -1;
963	} else {
964		index = wbc->range_start >> PAGE_CACHE_SHIFT;
965		end = wbc->range_end >> PAGE_CACHE_SHIFT;
966		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
967			range_whole = 1;
968		cycled = 1; /* ignore range_cyclic tests */
969	}
970retry:
971	done_index = index;
972	while (!done && (index <= end)) {
973		int i;
974
975		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
976			      PAGECACHE_TAG_DIRTY,
977			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
978		if (nr_pages == 0)
979			break;
980
981		for (i = 0; i < nr_pages; i++) {
982			struct page *page = pvec.pages[i];
983
984			/*
985			 * At this point, the page may be truncated or
986			 * invalidated (changing page->mapping to NULL), or
987			 * even swizzled back from swapper_space to tmpfs file
988			 * mapping. However, page->index will not change
989			 * because we have a reference on the page.
990			 */
991			if (page->index > end) {
992				/*
993				 * can't be range_cyclic (1st pass) because
994				 * end == -1 in that case.
995				 */
996				done = 1;
997				break;
998			}
999
1000			done_index = page->index + 1;
1001
1002			lock_page(page);
1003
1004			/*
1005			 * Page truncated or invalidated. We can freely skip it
1006			 * then, even for data integrity operations: the page
1007			 * has disappeared concurrently, so there could be no
1008			 * real expectation of this data interity operation
1009			 * even if there is now a new, dirty page at the same
1010			 * pagecache address.
1011			 */
1012			if (unlikely(page->mapping != mapping)) {
1013continue_unlock:
1014				unlock_page(page);
1015				continue;
1016			}
1017
1018			if (!PageDirty(page)) {
1019				/* someone wrote it for us */
1020				goto continue_unlock;
1021			}
1022
1023			if (PageWriteback(page)) {
1024				if (wbc->sync_mode != WB_SYNC_NONE)
1025					wait_on_page_writeback(page);
1026				else
1027					goto continue_unlock;
1028			}
1029
1030			BUG_ON(PageWriteback(page));
1031			if (!clear_page_dirty_for_io(page))
1032				goto continue_unlock;
1033
1034			ret = (*writepage)(page, wbc, data);
1035			if (unlikely(ret)) {
1036				if (ret == AOP_WRITEPAGE_ACTIVATE) {
1037					unlock_page(page);
1038					ret = 0;
1039				} else {
1040					/*
1041					 * done_index is set past this page,
1042					 * so media errors will not choke
1043					 * background writeout for the entire
1044					 * file. This has consequences for
1045					 * range_cyclic semantics (ie. it may
1046					 * not be suitable for data integrity
1047					 * writeout).
1048					 */
1049					done = 1;
1050					break;
1051				}
1052 			}
1053
1054			if (nr_to_write > 0) {
1055				nr_to_write--;
1056				if (nr_to_write == 0 &&
1057				    wbc->sync_mode == WB_SYNC_NONE) {
1058					/*
1059					 * We stop writing back only if we are
1060					 * not doing integrity sync. In case of
1061					 * integrity sync we have to keep going
1062					 * because someone may be concurrently
1063					 * dirtying pages, and we might have
1064					 * synced a lot of newly appeared dirty
1065					 * pages, but have not synced all of the
1066					 * old dirty pages.
1067					 */
1068					done = 1;
1069					break;
1070				}
1071			}
1072
1073			if (wbc->nonblocking && bdi_write_congested(bdi)) {
1074				wbc->encountered_congestion = 1;
1075				done = 1;
1076				break;
1077			}
1078		}
1079		pagevec_release(&pvec);
1080		cond_resched();
1081	}
1082	if (!cycled && !done) {
1083		/*
1084		 * range_cyclic:
1085		 * We hit the last page and there is more work to be done: wrap
1086		 * back to the start of the file
1087		 */
1088		cycled = 1;
1089		index = 0;
1090		end = writeback_index - 1;
1091		goto retry;
1092	}
1093	if (!wbc->no_nrwrite_index_update) {
1094		if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
1095			mapping->writeback_index = done_index;
1096		wbc->nr_to_write = nr_to_write;
1097	}
1098
1099	return ret;
1100}
1101EXPORT_SYMBOL(write_cache_pages);
1102
1103/*
1104 * Function used by generic_writepages to call the real writepage
1105 * function and set the mapping flags on error
1106 */
1107static int __writepage(struct page *page, struct writeback_control *wbc,
1108		       void *data)
1109{
1110	struct address_space *mapping = data;
1111	int ret = mapping->a_ops->writepage(page, wbc);
1112	mapping_set_error(mapping, ret);
1113	return ret;
1114}
1115
1116/**
1117 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1118 * @mapping: address space structure to write
1119 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1120 *
1121 * This is a library function, which implements the writepages()
1122 * address_space_operation.
1123 */
1124int generic_writepages(struct address_space *mapping,
1125		       struct writeback_control *wbc)
1126{
1127	/* deal with chardevs and other special file */
1128	if (!mapping->a_ops->writepage)
1129		return 0;
1130
1131	return write_cache_pages(mapping, wbc, __writepage, mapping);
1132}
1133
1134EXPORT_SYMBOL(generic_writepages);
1135
1136int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1137{
1138	int ret;
1139
1140	if (wbc->nr_to_write <= 0)
1141		return 0;
1142	wbc->for_writepages = 1;
1143	if (mapping->a_ops->writepages)
1144		ret = mapping->a_ops->writepages(mapping, wbc);
1145	else
1146		ret = generic_writepages(mapping, wbc);
1147	wbc->for_writepages = 0;
1148	return ret;
1149}
1150
1151/**
1152 * write_one_page - write out a single page and optionally wait on I/O
1153 * @page: the page to write
1154 * @wait: if true, wait on writeout
1155 *
1156 * The page must be locked by the caller and will be unlocked upon return.
1157 *
1158 * write_one_page() returns a negative error code if I/O failed.
1159 */
1160int write_one_page(struct page *page, int wait)
1161{
1162	struct address_space *mapping = page->mapping;
1163	int ret = 0;
1164	struct writeback_control wbc = {
1165		.sync_mode = WB_SYNC_ALL,
1166		.nr_to_write = 1,
1167	};
1168
1169	BUG_ON(!PageLocked(page));
1170
1171	if (wait)
1172		wait_on_page_writeback(page);
1173
1174	if (clear_page_dirty_for_io(page)) {
1175		page_cache_get(page);
1176		ret = mapping->a_ops->writepage(page, &wbc);
1177		if (ret == 0 && wait) {
1178			wait_on_page_writeback(page);
1179			if (PageError(page))
1180				ret = -EIO;
1181		}
1182		page_cache_release(page);
1183	} else {
1184		unlock_page(page);
1185	}
1186	return ret;
1187}
1188EXPORT_SYMBOL(write_one_page);
1189
1190/*
1191 * For address_spaces which do not use buffers nor write back.
1192 */
1193int __set_page_dirty_no_writeback(struct page *page)
1194{
1195	if (!PageDirty(page))
1196		SetPageDirty(page);
1197	return 0;
1198}
1199
1200/*
1201 * For address_spaces which do not use buffers.  Just tag the page as dirty in
1202 * its radix tree.
1203 *
1204 * This is also used when a single buffer is being dirtied: we want to set the
1205 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
1206 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1207 *
1208 * Most callers have locked the page, which pins the address_space in memory.
1209 * But zap_pte_range() does not lock the page, however in that case the
1210 * mapping is pinned by the vma's ->vm_file reference.
1211 *
1212 * We take care to handle the case where the page was truncated from the
1213 * mapping by re-checking page_mapping() inside tree_lock.
1214 */
1215int __set_page_dirty_nobuffers(struct page *page)
1216{
1217	if (!TestSetPageDirty(page)) {
1218		struct address_space *mapping = page_mapping(page);
1219		struct address_space *mapping2;
1220
1221		if (!mapping)
1222			return 1;
1223
1224		spin_lock_irq(&mapping->tree_lock);
1225		mapping2 = page_mapping(page);
1226		if (mapping2) { /* Race with truncate? */
1227			BUG_ON(mapping2 != mapping);
1228			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1229			if (mapping_cap_account_dirty(mapping)) {
1230				__inc_zone_page_state(page, NR_FILE_DIRTY);
1231				__inc_bdi_stat(mapping->backing_dev_info,
1232						BDI_RECLAIMABLE);
1233				task_dirty_inc(current);
1234				task_io_account_write(PAGE_CACHE_SIZE);
1235			}
1236			radix_tree_tag_set(&mapping->page_tree,
1237				page_index(page), PAGECACHE_TAG_DIRTY);
1238		}
1239		spin_unlock_irq(&mapping->tree_lock);
1240		if (mapping->host) {
1241			/* !PageAnon && !swapper_space */
1242			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1243		}
1244		return 1;
1245	}
1246	return 0;
1247}
1248EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1249
1250/*
1251 * When a writepage implementation decides that it doesn't want to write this
1252 * page for some reason, it should redirty the locked page via
1253 * redirty_page_for_writepage() and it should then unlock the page and return 0
1254 */
1255int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1256{
1257	wbc->pages_skipped++;
1258	return __set_page_dirty_nobuffers(page);
1259}
1260EXPORT_SYMBOL(redirty_page_for_writepage);
1261
1262/*
1263 * If the mapping doesn't provide a set_page_dirty a_op, then
1264 * just fall through and assume that it wants buffer_heads.
1265 */
1266int set_page_dirty(struct page *page)
1267{
1268	struct address_space *mapping = page_mapping(page);
1269
1270	if (likely(mapping)) {
1271		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1272#ifdef CONFIG_BLOCK
1273		if (!spd)
1274			spd = __set_page_dirty_buffers;
1275#endif
1276		return (*spd)(page);
1277	}
1278	if (!PageDirty(page)) {
1279		if (!TestSetPageDirty(page))
1280			return 1;
1281	}
1282	return 0;
1283}
1284EXPORT_SYMBOL(set_page_dirty);
1285
1286/*
1287 * set_page_dirty() is racy if the caller has no reference against
1288 * page->mapping->host, and if the page is unlocked.  This is because another
1289 * CPU could truncate the page off the mapping and then free the mapping.
1290 *
1291 * Usually, the page _is_ locked, or the caller is a user-space process which
1292 * holds a reference on the inode by having an open file.
1293 *
1294 * In other cases, the page should be locked before running set_page_dirty().
1295 */
1296int set_page_dirty_lock(struct page *page)
1297{
1298	int ret;
1299
1300	lock_page_nosync(page);
1301	ret = set_page_dirty(page);
1302	unlock_page(page);
1303	return ret;
1304}
1305EXPORT_SYMBOL(set_page_dirty_lock);
1306
1307/*
1308 * Clear a page's dirty flag, while caring for dirty memory accounting.
1309 * Returns true if the page was previously dirty.
1310 *
1311 * This is for preparing to put the page under writeout.  We leave the page
1312 * tagged as dirty in the radix tree so that a concurrent write-for-sync
1313 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
1314 * implementation will run either set_page_writeback() or set_page_dirty(),
1315 * at which stage we bring the page's dirty flag and radix-tree dirty tag
1316 * back into sync.
1317 *
1318 * This incoherency between the page's dirty flag and radix-tree tag is
1319 * unfortunate, but it only exists while the page is locked.
1320 */
1321int clear_page_dirty_for_io(struct page *page)
1322{
1323	struct address_space *mapping = page_mapping(page);
1324
1325	BUG_ON(!PageLocked(page));
1326
1327	ClearPageReclaim(page);
1328	if (mapping && mapping_cap_account_dirty(mapping)) {
1329		/*
1330		 * Yes, Virginia, this is indeed insane.
1331		 *
1332		 * We use this sequence to make sure that
1333		 *  (a) we account for dirty stats properly
1334		 *  (b) we tell the low-level filesystem to
1335		 *      mark the whole page dirty if it was
1336		 *      dirty in a pagetable. Only to then
1337		 *  (c) clean the page again and return 1 to
1338		 *      cause the writeback.
1339		 *
1340		 * This way we avoid all nasty races with the
1341		 * dirty bit in multiple places and clearing
1342		 * them concurrently from different threads.
1343		 *
1344		 * Note! Normally the "set_page_dirty(page)"
1345		 * has no effect on the actual dirty bit - since
1346		 * that will already usually be set. But we
1347		 * need the side effects, and it can help us
1348		 * avoid races.
1349		 *
1350		 * We basically use the page "master dirty bit"
1351		 * as a serialization point for all the different
1352		 * threads doing their things.
1353		 */
1354		if (page_mkclean(page))
1355			set_page_dirty(page);
1356		/*
1357		 * We carefully synchronise fault handlers against
1358		 * installing a dirty pte and marking the page dirty
1359		 * at this point. We do this by having them hold the
1360		 * page lock at some point after installing their
1361		 * pte, but before marking the page dirty.
1362		 * Pages are always locked coming in here, so we get
1363		 * the desired exclusion. See mm/memory.c:do_wp_page()
1364		 * for more comments.
1365		 */
1366		if (TestClearPageDirty(page)) {
1367			dec_zone_page_state(page, NR_FILE_DIRTY);
1368			dec_bdi_stat(mapping->backing_dev_info,
1369					BDI_RECLAIMABLE);
1370			return 1;
1371		}
1372		return 0;
1373	}
1374	return TestClearPageDirty(page);
1375}
1376EXPORT_SYMBOL(clear_page_dirty_for_io);
1377
1378int test_clear_page_writeback(struct page *page)
1379{
1380	struct address_space *mapping = page_mapping(page);
1381	int ret;
1382
1383	if (mapping) {
1384		struct backing_dev_info *bdi = mapping->backing_dev_info;
1385		unsigned long flags;
1386
1387		spin_lock_irqsave(&mapping->tree_lock, flags);
1388		ret = TestClearPageWriteback(page);
1389		if (ret) {
1390			radix_tree_tag_clear(&mapping->page_tree,
1391						page_index(page),
1392						PAGECACHE_TAG_WRITEBACK);
1393			if (bdi_cap_account_writeback(bdi)) {
1394				__dec_bdi_stat(bdi, BDI_WRITEBACK);
1395				__bdi_writeout_inc(bdi);
1396			}
1397		}
1398		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1399	} else {
1400		ret = TestClearPageWriteback(page);
1401	}
1402	if (ret)
1403		dec_zone_page_state(page, NR_WRITEBACK);
1404	return ret;
1405}
1406
1407int test_set_page_writeback(struct page *page)
1408{
1409	struct address_space *mapping = page_mapping(page);
1410	int ret;
1411
1412	if (mapping) {
1413		struct backing_dev_info *bdi = mapping->backing_dev_info;
1414		unsigned long flags;
1415
1416		spin_lock_irqsave(&mapping->tree_lock, flags);
1417		ret = TestSetPageWriteback(page);
1418		if (!ret) {
1419			radix_tree_tag_set(&mapping->page_tree,
1420						page_index(page),
1421						PAGECACHE_TAG_WRITEBACK);
1422			if (bdi_cap_account_writeback(bdi))
1423				__inc_bdi_stat(bdi, BDI_WRITEBACK);
1424		}
1425		if (!PageDirty(page))
1426			radix_tree_tag_clear(&mapping->page_tree,
1427						page_index(page),
1428						PAGECACHE_TAG_DIRTY);
1429		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1430	} else {
1431		ret = TestSetPageWriteback(page);
1432	}
1433	if (!ret)
1434		inc_zone_page_state(page, NR_WRITEBACK);
1435	return ret;
1436
1437}
1438EXPORT_SYMBOL(test_set_page_writeback);
1439
1440/*
1441 * Return true if any of the pages in the mapping are marked with the
1442 * passed tag.
1443 */
1444int mapping_tagged(struct address_space *mapping, int tag)
1445{
1446	int ret;
1447	rcu_read_lock();
1448	ret = radix_tree_tagged(&mapping->page_tree, tag);
1449	rcu_read_unlock();
1450	return ret;
1451}
1452EXPORT_SYMBOL(mapping_tagged);
1453