filemap.c revision f4c0a0fdfae708f7aa438c27a380ed4071294e11
1/*
2 *	linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999  Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
11 */
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/compiler.h>
15#include <linux/fs.h>
16#include <linux/uaccess.h>
17#include <linux/aio.h>
18#include <linux/capability.h>
19#include <linux/kernel_stat.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/pagemap.h>
24#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/hash.h>
27#include <linux/writeback.h>
28#include <linux/backing-dev.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/security.h>
32#include <linux/syscalls.h>
33#include <linux/cpuset.h>
34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35#include <linux/memcontrol.h>
36#include "internal.h"
37
38/*
39 * FIXME: remove all knowledge of the buffer layer from the core VM
40 */
41#include <linux/buffer_head.h> /* for generic_osync_inode */
42
43#include <asm/mman.h>
44
45static ssize_t
46generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
47	loff_t offset, unsigned long nr_segs);
48
49/*
50 * Shared mappings implemented 30.11.1994. It's not fully working yet,
51 * though.
52 *
53 * Shared mappings now work. 15.8.1995  Bruno.
54 *
55 * finished 'unifying' the page and buffer cache and SMP-threaded the
56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
57 *
58 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
59 */
60
61/*
62 * Lock ordering:
63 *
64 *  ->i_mmap_lock		(vmtruncate)
65 *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
66 *      ->swap_lock		(exclusive_swap_page, others)
67 *        ->mapping->tree_lock
68 *
69 *  ->i_mutex
70 *    ->i_mmap_lock		(truncate->unmap_mapping_range)
71 *
72 *  ->mmap_sem
73 *    ->i_mmap_lock
74 *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
75 *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
76 *
77 *  ->mmap_sem
78 *    ->lock_page		(access_process_vm)
79 *
80 *  ->i_mutex			(generic_file_buffered_write)
81 *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)
82 *
83 *  ->i_mutex
84 *    ->i_alloc_sem             (various)
85 *
86 *  ->inode_lock
87 *    ->sb_lock			(fs/fs-writeback.c)
88 *    ->mapping->tree_lock	(__sync_single_inode)
89 *
90 *  ->i_mmap_lock
91 *    ->anon_vma.lock		(vma_adjust)
92 *
93 *  ->anon_vma.lock
94 *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
95 *
96 *  ->page_table_lock or pte_lock
97 *    ->swap_lock		(try_to_unmap_one)
98 *    ->private_lock		(try_to_unmap_one)
99 *    ->tree_lock		(try_to_unmap_one)
100 *    ->zone.lru_lock		(follow_page->mark_page_accessed)
101 *    ->zone.lru_lock		(check_pte_range->isolate_lru_page)
102 *    ->private_lock		(page_remove_rmap->set_page_dirty)
103 *    ->tree_lock		(page_remove_rmap->set_page_dirty)
104 *    ->inode_lock		(page_remove_rmap->set_page_dirty)
105 *    ->inode_lock		(zap_pte_range->set_page_dirty)
106 *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
107 *
108 *  ->task->proc_lock
109 *    ->dcache_lock		(proc_pid_lookup)
110 */
111
112/*
113 * Remove a page from the page cache and free it. Caller has to make
114 * sure the page is locked and that nobody else uses it - or that usage
115 * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
116 */
117void __remove_from_page_cache(struct page *page)
118{
119	struct address_space *mapping = page->mapping;
120
121	mem_cgroup_uncharge_page(page);
122	radix_tree_delete(&mapping->page_tree, page->index);
123	page->mapping = NULL;
124	mapping->nrpages--;
125	__dec_zone_page_state(page, NR_FILE_PAGES);
126	BUG_ON(page_mapped(page));
127
128	/*
129	 * Some filesystems seem to re-dirty the page even after
130	 * the VM has canceled the dirty bit (eg ext3 journaling).
131	 *
132	 * Fix it up by doing a final dirty accounting check after
133	 * having removed the page entirely.
134	 */
135	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
136		dec_zone_page_state(page, NR_FILE_DIRTY);
137		dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
138	}
139}
140
141void remove_from_page_cache(struct page *page)
142{
143	struct address_space *mapping = page->mapping;
144
145	BUG_ON(!PageLocked(page));
146
147	write_lock_irq(&mapping->tree_lock);
148	__remove_from_page_cache(page);
149	write_unlock_irq(&mapping->tree_lock);
150}
151
152static int sync_page(void *word)
153{
154	struct address_space *mapping;
155	struct page *page;
156
157	page = container_of((unsigned long *)word, struct page, flags);
158
159	/*
160	 * page_mapping() is being called without PG_locked held.
161	 * Some knowledge of the state and use of the page is used to
162	 * reduce the requirements down to a memory barrier.
163	 * The danger here is of a stale page_mapping() return value
164	 * indicating a struct address_space different from the one it's
165	 * associated with when it is associated with one.
166	 * After smp_mb(), it's either the correct page_mapping() for
167	 * the page, or an old page_mapping() and the page's own
168	 * page_mapping() has gone NULL.
169	 * The ->sync_page() address_space operation must tolerate
170	 * page_mapping() going NULL. By an amazing coincidence,
171	 * this comes about because none of the users of the page
172	 * in the ->sync_page() methods make essential use of the
173	 * page_mapping(), merely passing the page down to the backing
174	 * device's unplug functions when it's non-NULL, which in turn
175	 * ignore it for all cases but swap, where only page_private(page) is
176	 * of interest. When page_mapping() does go NULL, the entire
177	 * call stack gracefully ignores the page and returns.
178	 * -- wli
179	 */
180	smp_mb();
181	mapping = page_mapping(page);
182	if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
183		mapping->a_ops->sync_page(page);
184	io_schedule();
185	return 0;
186}
187
188static int sync_page_killable(void *word)
189{
190	sync_page(word);
191	return fatal_signal_pending(current) ? -EINTR : 0;
192}
193
194/**
195 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
196 * @mapping:	address space structure to write
197 * @start:	offset in bytes where the range starts
198 * @end:	offset in bytes where the range ends (inclusive)
199 * @sync_mode:	enable synchronous operation
200 *
201 * Start writeback against all of a mapping's dirty pages that lie
202 * within the byte offsets <start, end> inclusive.
203 *
204 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
205 * opposed to a regular memory cleansing writeback.  The difference between
206 * these two operations is that if a dirty page/buffer is encountered, it must
207 * be waited upon, and not just skipped over.
208 */
209int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
210				loff_t end, int sync_mode)
211{
212	int ret;
213	struct writeback_control wbc = {
214		.sync_mode = sync_mode,
215		.nr_to_write = mapping->nrpages * 2,
216		.range_start = start,
217		.range_end = end,
218	};
219
220	if (!mapping_cap_writeback_dirty(mapping))
221		return 0;
222
223	ret = do_writepages(mapping, &wbc);
224	return ret;
225}
226
227static inline int __filemap_fdatawrite(struct address_space *mapping,
228	int sync_mode)
229{
230	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
231}
232
233int filemap_fdatawrite(struct address_space *mapping)
234{
235	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
236}
237EXPORT_SYMBOL(filemap_fdatawrite);
238
239int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
240				loff_t end)
241{
242	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
243}
244EXPORT_SYMBOL(filemap_fdatawrite_range);
245
246/**
247 * filemap_flush - mostly a non-blocking flush
248 * @mapping:	target address_space
249 *
250 * This is a mostly non-blocking flush.  Not suitable for data-integrity
251 * purposes - I/O may not be started against all dirty pages.
252 */
253int filemap_flush(struct address_space *mapping)
254{
255	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
256}
257EXPORT_SYMBOL(filemap_flush);
258
259/**
260 * wait_on_page_writeback_range - wait for writeback to complete
261 * @mapping:	target address_space
262 * @start:	beginning page index
263 * @end:	ending page index
264 *
265 * Wait for writeback to complete against pages indexed by start->end
266 * inclusive
267 */
268int wait_on_page_writeback_range(struct address_space *mapping,
269				pgoff_t start, pgoff_t end)
270{
271	struct pagevec pvec;
272	int nr_pages;
273	int ret = 0;
274	pgoff_t index;
275
276	if (end < start)
277		return 0;
278
279	pagevec_init(&pvec, 0);
280	index = start;
281	while ((index <= end) &&
282			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
283			PAGECACHE_TAG_WRITEBACK,
284			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
285		unsigned i;
286
287		for (i = 0; i < nr_pages; i++) {
288			struct page *page = pvec.pages[i];
289
290			/* until radix tree lookup accepts end_index */
291			if (page->index > end)
292				continue;
293
294			wait_on_page_writeback(page);
295			if (PageError(page))
296				ret = -EIO;
297		}
298		pagevec_release(&pvec);
299		cond_resched();
300	}
301
302	/* Check for outstanding write errors */
303	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
304		ret = -ENOSPC;
305	if (test_and_clear_bit(AS_EIO, &mapping->flags))
306		ret = -EIO;
307
308	return ret;
309}
310
311/**
312 * sync_page_range - write and wait on all pages in the passed range
313 * @inode:	target inode
314 * @mapping:	target address_space
315 * @pos:	beginning offset in pages to write
316 * @count:	number of bytes to write
317 *
318 * Write and wait upon all the pages in the passed range.  This is a "data
319 * integrity" operation.  It waits upon in-flight writeout before starting and
320 * waiting upon new writeout.  If there was an IO error, return it.
321 *
322 * We need to re-take i_mutex during the generic_osync_inode list walk because
323 * it is otherwise livelockable.
324 */
325int sync_page_range(struct inode *inode, struct address_space *mapping,
326			loff_t pos, loff_t count)
327{
328	pgoff_t start = pos >> PAGE_CACHE_SHIFT;
329	pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
330	int ret;
331
332	if (!mapping_cap_writeback_dirty(mapping) || !count)
333		return 0;
334	ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
335	if (ret == 0) {
336		mutex_lock(&inode->i_mutex);
337		ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
338		mutex_unlock(&inode->i_mutex);
339	}
340	if (ret == 0)
341		ret = wait_on_page_writeback_range(mapping, start, end);
342	return ret;
343}
344EXPORT_SYMBOL(sync_page_range);
345
346/**
347 * sync_page_range_nolock - write & wait on all pages in the passed range without locking
348 * @inode:	target inode
349 * @mapping:	target address_space
350 * @pos:	beginning offset in pages to write
351 * @count:	number of bytes to write
352 *
353 * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
354 * as it forces O_SYNC writers to different parts of the same file
355 * to be serialised right until io completion.
356 */
357int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
358			   loff_t pos, loff_t count)
359{
360	pgoff_t start = pos >> PAGE_CACHE_SHIFT;
361	pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
362	int ret;
363
364	if (!mapping_cap_writeback_dirty(mapping) || !count)
365		return 0;
366	ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
367	if (ret == 0)
368		ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
369	if (ret == 0)
370		ret = wait_on_page_writeback_range(mapping, start, end);
371	return ret;
372}
373EXPORT_SYMBOL(sync_page_range_nolock);
374
375/**
376 * filemap_fdatawait - wait for all under-writeback pages to complete
377 * @mapping: address space structure to wait for
378 *
379 * Walk the list of under-writeback pages of the given address space
380 * and wait for all of them.
381 */
382int filemap_fdatawait(struct address_space *mapping)
383{
384	loff_t i_size = i_size_read(mapping->host);
385
386	if (i_size == 0)
387		return 0;
388
389	return wait_on_page_writeback_range(mapping, 0,
390				(i_size - 1) >> PAGE_CACHE_SHIFT);
391}
392EXPORT_SYMBOL(filemap_fdatawait);
393
394int filemap_write_and_wait(struct address_space *mapping)
395{
396	int err = 0;
397
398	if (mapping->nrpages) {
399		err = filemap_fdatawrite(mapping);
400		/*
401		 * Even if the above returned error, the pages may be
402		 * written partially (e.g. -ENOSPC), so we wait for it.
403		 * But the -EIO is special case, it may indicate the worst
404		 * thing (e.g. bug) happened, so we avoid waiting for it.
405		 */
406		if (err != -EIO) {
407			int err2 = filemap_fdatawait(mapping);
408			if (!err)
409				err = err2;
410		}
411	}
412	return err;
413}
414EXPORT_SYMBOL(filemap_write_and_wait);
415
416/**
417 * filemap_write_and_wait_range - write out & wait on a file range
418 * @mapping:	the address_space for the pages
419 * @lstart:	offset in bytes where the range starts
420 * @lend:	offset in bytes where the range ends (inclusive)
421 *
422 * Write out and wait upon file offsets lstart->lend, inclusive.
423 *
424 * Note that `lend' is inclusive (describes the last byte to be written) so
425 * that this function can be used to write to the very end-of-file (end = -1).
426 */
427int filemap_write_and_wait_range(struct address_space *mapping,
428				 loff_t lstart, loff_t lend)
429{
430	int err = 0;
431
432	if (mapping->nrpages) {
433		err = __filemap_fdatawrite_range(mapping, lstart, lend,
434						 WB_SYNC_ALL);
435		/* See comment of filemap_write_and_wait() */
436		if (err != -EIO) {
437			int err2 = wait_on_page_writeback_range(mapping,
438						lstart >> PAGE_CACHE_SHIFT,
439						lend >> PAGE_CACHE_SHIFT);
440			if (!err)
441				err = err2;
442		}
443	}
444	return err;
445}
446
447/**
448 * add_to_page_cache - add newly allocated pagecache pages
449 * @page:	page to add
450 * @mapping:	the page's address_space
451 * @offset:	page index
452 * @gfp_mask:	page allocation mode
453 *
454 * This function is used to add newly allocated pagecache pages;
455 * the page is new, so we can just run SetPageLocked() against it.
456 * The other page state flags were set by rmqueue().
457 *
458 * This function does not add the page to the LRU.  The caller must do that.
459 */
460int add_to_page_cache(struct page *page, struct address_space *mapping,
461		pgoff_t offset, gfp_t gfp_mask)
462{
463	int error = mem_cgroup_cache_charge(page, current->mm,
464					gfp_mask & ~__GFP_HIGHMEM);
465	if (error)
466		goto out;
467
468	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
469	if (error == 0) {
470		write_lock_irq(&mapping->tree_lock);
471		error = radix_tree_insert(&mapping->page_tree, offset, page);
472		if (!error) {
473			page_cache_get(page);
474			SetPageLocked(page);
475			page->mapping = mapping;
476			page->index = offset;
477			mapping->nrpages++;
478			__inc_zone_page_state(page, NR_FILE_PAGES);
479		} else
480			mem_cgroup_uncharge_page(page);
481
482		write_unlock_irq(&mapping->tree_lock);
483		radix_tree_preload_end();
484	} else
485		mem_cgroup_uncharge_page(page);
486out:
487	return error;
488}
489EXPORT_SYMBOL(add_to_page_cache);
490
491int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
492				pgoff_t offset, gfp_t gfp_mask)
493{
494	int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
495	if (ret == 0)
496		lru_cache_add(page);
497	return ret;
498}
499
500#ifdef CONFIG_NUMA
501struct page *__page_cache_alloc(gfp_t gfp)
502{
503	if (cpuset_do_page_mem_spread()) {
504		int n = cpuset_mem_spread_node();
505		return alloc_pages_node(n, gfp, 0);
506	}
507	return alloc_pages(gfp, 0);
508}
509EXPORT_SYMBOL(__page_cache_alloc);
510#endif
511
512static int __sleep_on_page_lock(void *word)
513{
514	io_schedule();
515	return 0;
516}
517
518/*
519 * In order to wait for pages to become available there must be
520 * waitqueues associated with pages. By using a hash table of
521 * waitqueues where the bucket discipline is to maintain all
522 * waiters on the same queue and wake all when any of the pages
523 * become available, and for the woken contexts to check to be
524 * sure the appropriate page became available, this saves space
525 * at a cost of "thundering herd" phenomena during rare hash
526 * collisions.
527 */
528static wait_queue_head_t *page_waitqueue(struct page *page)
529{
530	const struct zone *zone = page_zone(page);
531
532	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
533}
534
535static inline void wake_up_page(struct page *page, int bit)
536{
537	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
538}
539
540void wait_on_page_bit(struct page *page, int bit_nr)
541{
542	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
543
544	if (test_bit(bit_nr, &page->flags))
545		__wait_on_bit(page_waitqueue(page), &wait, sync_page,
546							TASK_UNINTERRUPTIBLE);
547}
548EXPORT_SYMBOL(wait_on_page_bit);
549
550/**
551 * unlock_page - unlock a locked page
552 * @page: the page
553 *
554 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
555 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
556 * mechananism between PageLocked pages and PageWriteback pages is shared.
557 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
558 *
559 * The first mb is necessary to safely close the critical section opened by the
560 * TestSetPageLocked(), the second mb is necessary to enforce ordering between
561 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
562 * parallel wait_on_page_locked()).
563 */
564void unlock_page(struct page *page)
565{
566	smp_mb__before_clear_bit();
567	if (!TestClearPageLocked(page))
568		BUG();
569	smp_mb__after_clear_bit();
570	wake_up_page(page, PG_locked);
571}
572EXPORT_SYMBOL(unlock_page);
573
574/**
575 * end_page_writeback - end writeback against a page
576 * @page: the page
577 */
578void end_page_writeback(struct page *page)
579{
580	if (TestClearPageReclaim(page))
581		rotate_reclaimable_page(page);
582
583	if (!test_clear_page_writeback(page))
584		BUG();
585
586	smp_mb__after_clear_bit();
587	wake_up_page(page, PG_writeback);
588}
589EXPORT_SYMBOL(end_page_writeback);
590
591/**
592 * __lock_page - get a lock on the page, assuming we need to sleep to get it
593 * @page: the page to lock
594 *
595 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
596 * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
597 * chances are that on the second loop, the block layer's plug list is empty,
598 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
599 */
600void __lock_page(struct page *page)
601{
602	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
603
604	__wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
605							TASK_UNINTERRUPTIBLE);
606}
607EXPORT_SYMBOL(__lock_page);
608
609int __lock_page_killable(struct page *page)
610{
611	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
612
613	return __wait_on_bit_lock(page_waitqueue(page), &wait,
614					sync_page_killable, TASK_KILLABLE);
615}
616
617/**
618 * __lock_page_nosync - get a lock on the page, without calling sync_page()
619 * @page: the page to lock
620 *
621 * Variant of lock_page that does not require the caller to hold a reference
622 * on the page's mapping.
623 */
624void __lock_page_nosync(struct page *page)
625{
626	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
627	__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
628							TASK_UNINTERRUPTIBLE);
629}
630
631/**
632 * find_get_page - find and get a page reference
633 * @mapping: the address_space to search
634 * @offset: the page index
635 *
636 * Is there a pagecache struct page at the given (mapping, offset) tuple?
637 * If yes, increment its refcount and return it; if no, return NULL.
638 */
639struct page * find_get_page(struct address_space *mapping, pgoff_t offset)
640{
641	struct page *page;
642
643	read_lock_irq(&mapping->tree_lock);
644	page = radix_tree_lookup(&mapping->page_tree, offset);
645	if (page)
646		page_cache_get(page);
647	read_unlock_irq(&mapping->tree_lock);
648	return page;
649}
650EXPORT_SYMBOL(find_get_page);
651
652/**
653 * find_lock_page - locate, pin and lock a pagecache page
654 * @mapping: the address_space to search
655 * @offset: the page index
656 *
657 * Locates the desired pagecache page, locks it, increments its reference
658 * count and returns its address.
659 *
660 * Returns zero if the page was not present. find_lock_page() may sleep.
661 */
662struct page *find_lock_page(struct address_space *mapping,
663				pgoff_t offset)
664{
665	struct page *page;
666
667repeat:
668	read_lock_irq(&mapping->tree_lock);
669	page = radix_tree_lookup(&mapping->page_tree, offset);
670	if (page) {
671		page_cache_get(page);
672		if (TestSetPageLocked(page)) {
673			read_unlock_irq(&mapping->tree_lock);
674			__lock_page(page);
675
676			/* Has the page been truncated while we slept? */
677			if (unlikely(page->mapping != mapping)) {
678				unlock_page(page);
679				page_cache_release(page);
680				goto repeat;
681			}
682			VM_BUG_ON(page->index != offset);
683			goto out;
684		}
685	}
686	read_unlock_irq(&mapping->tree_lock);
687out:
688	return page;
689}
690EXPORT_SYMBOL(find_lock_page);
691
692/**
693 * find_or_create_page - locate or add a pagecache page
694 * @mapping: the page's address_space
695 * @index: the page's index into the mapping
696 * @gfp_mask: page allocation mode
697 *
698 * Locates a page in the pagecache.  If the page is not present, a new page
699 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
700 * LRU list.  The returned page is locked and has its reference count
701 * incremented.
702 *
703 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
704 * allocation!
705 *
706 * find_or_create_page() returns the desired page's address, or zero on
707 * memory exhaustion.
708 */
709struct page *find_or_create_page(struct address_space *mapping,
710		pgoff_t index, gfp_t gfp_mask)
711{
712	struct page *page;
713	int err;
714repeat:
715	page = find_lock_page(mapping, index);
716	if (!page) {
717		page = __page_cache_alloc(gfp_mask);
718		if (!page)
719			return NULL;
720		err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
721		if (unlikely(err)) {
722			page_cache_release(page);
723			page = NULL;
724			if (err == -EEXIST)
725				goto repeat;
726		}
727	}
728	return page;
729}
730EXPORT_SYMBOL(find_or_create_page);
731
732/**
733 * find_get_pages - gang pagecache lookup
734 * @mapping:	The address_space to search
735 * @start:	The starting page index
736 * @nr_pages:	The maximum number of pages
737 * @pages:	Where the resulting pages are placed
738 *
739 * find_get_pages() will search for and return a group of up to
740 * @nr_pages pages in the mapping.  The pages are placed at @pages.
741 * find_get_pages() takes a reference against the returned pages.
742 *
743 * The search returns a group of mapping-contiguous pages with ascending
744 * indexes.  There may be holes in the indices due to not-present pages.
745 *
746 * find_get_pages() returns the number of pages which were found.
747 */
748unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
749			    unsigned int nr_pages, struct page **pages)
750{
751	unsigned int i;
752	unsigned int ret;
753
754	read_lock_irq(&mapping->tree_lock);
755	ret = radix_tree_gang_lookup(&mapping->page_tree,
756				(void **)pages, start, nr_pages);
757	for (i = 0; i < ret; i++)
758		page_cache_get(pages[i]);
759	read_unlock_irq(&mapping->tree_lock);
760	return ret;
761}
762
763/**
764 * find_get_pages_contig - gang contiguous pagecache lookup
765 * @mapping:	The address_space to search
766 * @index:	The starting page index
767 * @nr_pages:	The maximum number of pages
768 * @pages:	Where the resulting pages are placed
769 *
770 * find_get_pages_contig() works exactly like find_get_pages(), except
771 * that the returned number of pages are guaranteed to be contiguous.
772 *
773 * find_get_pages_contig() returns the number of pages which were found.
774 */
775unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
776			       unsigned int nr_pages, struct page **pages)
777{
778	unsigned int i;
779	unsigned int ret;
780
781	read_lock_irq(&mapping->tree_lock);
782	ret = radix_tree_gang_lookup(&mapping->page_tree,
783				(void **)pages, index, nr_pages);
784	for (i = 0; i < ret; i++) {
785		if (pages[i]->mapping == NULL || pages[i]->index != index)
786			break;
787
788		page_cache_get(pages[i]);
789		index++;
790	}
791	read_unlock_irq(&mapping->tree_lock);
792	return i;
793}
794EXPORT_SYMBOL(find_get_pages_contig);
795
796/**
797 * find_get_pages_tag - find and return pages that match @tag
798 * @mapping:	the address_space to search
799 * @index:	the starting page index
800 * @tag:	the tag index
801 * @nr_pages:	the maximum number of pages
802 * @pages:	where the resulting pages are placed
803 *
804 * Like find_get_pages, except we only return pages which are tagged with
805 * @tag.   We update @index to index the next page for the traversal.
806 */
807unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
808			int tag, unsigned int nr_pages, struct page **pages)
809{
810	unsigned int i;
811	unsigned int ret;
812
813	read_lock_irq(&mapping->tree_lock);
814	ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
815				(void **)pages, *index, nr_pages, tag);
816	for (i = 0; i < ret; i++)
817		page_cache_get(pages[i]);
818	if (ret)
819		*index = pages[ret - 1]->index + 1;
820	read_unlock_irq(&mapping->tree_lock);
821	return ret;
822}
823EXPORT_SYMBOL(find_get_pages_tag);
824
825/**
826 * grab_cache_page_nowait - returns locked page at given index in given cache
827 * @mapping: target address_space
828 * @index: the page index
829 *
830 * Same as grab_cache_page(), but do not wait if the page is unavailable.
831 * This is intended for speculative data generators, where the data can
832 * be regenerated if the page couldn't be grabbed.  This routine should
833 * be safe to call while holding the lock for another page.
834 *
835 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
836 * and deadlock against the caller's locked page.
837 */
838struct page *
839grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
840{
841	struct page *page = find_get_page(mapping, index);
842
843	if (page) {
844		if (!TestSetPageLocked(page))
845			return page;
846		page_cache_release(page);
847		return NULL;
848	}
849	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
850	if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
851		page_cache_release(page);
852		page = NULL;
853	}
854	return page;
855}
856EXPORT_SYMBOL(grab_cache_page_nowait);
857
858/*
859 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
860 * a _large_ part of the i/o request. Imagine the worst scenario:
861 *
862 *      ---R__________________________________________B__________
863 *         ^ reading here                             ^ bad block(assume 4k)
864 *
865 * read(R) => miss => readahead(R...B) => media error => frustrating retries
866 * => failing the whole request => read(R) => read(R+1) =>
867 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
868 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
869 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
870 *
871 * It is going insane. Fix it by quickly scaling down the readahead size.
872 */
873static void shrink_readahead_size_eio(struct file *filp,
874					struct file_ra_state *ra)
875{
876	if (!ra->ra_pages)
877		return;
878
879	ra->ra_pages /= 4;
880}
881
882/**
883 * do_generic_file_read - generic file read routine
884 * @filp:	the file to read
885 * @ppos:	current file position
886 * @desc:	read_descriptor
887 * @actor:	read method
888 *
889 * This is a generic file read routine, and uses the
890 * mapping->a_ops->readpage() function for the actual low-level stuff.
891 *
892 * This is really ugly. But the goto's actually try to clarify some
893 * of the logic when it comes to error handling etc.
894 */
895static void do_generic_file_read(struct file *filp, loff_t *ppos,
896		read_descriptor_t *desc, read_actor_t actor)
897{
898	struct address_space *mapping = filp->f_mapping;
899	struct inode *inode = mapping->host;
900	struct file_ra_state *ra = &filp->f_ra;
901	pgoff_t index;
902	pgoff_t last_index;
903	pgoff_t prev_index;
904	unsigned long offset;      /* offset into pagecache page */
905	unsigned int prev_offset;
906	int error;
907
908	index = *ppos >> PAGE_CACHE_SHIFT;
909	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
910	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
911	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
912	offset = *ppos & ~PAGE_CACHE_MASK;
913
914	for (;;) {
915		struct page *page;
916		pgoff_t end_index;
917		loff_t isize;
918		unsigned long nr, ret;
919
920		cond_resched();
921find_page:
922		page = find_get_page(mapping, index);
923		if (!page) {
924			page_cache_sync_readahead(mapping,
925					ra, filp,
926					index, last_index - index);
927			page = find_get_page(mapping, index);
928			if (unlikely(page == NULL))
929				goto no_cached_page;
930		}
931		if (PageReadahead(page)) {
932			page_cache_async_readahead(mapping,
933					ra, filp, page,
934					index, last_index - index);
935		}
936		if (!PageUptodate(page))
937			goto page_not_up_to_date;
938page_ok:
939		/*
940		 * i_size must be checked after we know the page is Uptodate.
941		 *
942		 * Checking i_size after the check allows us to calculate
943		 * the correct value for "nr", which means the zero-filled
944		 * part of the page is not copied back to userspace (unless
945		 * another truncate extends the file - this is desired though).
946		 */
947
948		isize = i_size_read(inode);
949		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
950		if (unlikely(!isize || index > end_index)) {
951			page_cache_release(page);
952			goto out;
953		}
954
955		/* nr is the maximum number of bytes to copy from this page */
956		nr = PAGE_CACHE_SIZE;
957		if (index == end_index) {
958			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
959			if (nr <= offset) {
960				page_cache_release(page);
961				goto out;
962			}
963		}
964		nr = nr - offset;
965
966		/* If users can be writing to this page using arbitrary
967		 * virtual addresses, take care about potential aliasing
968		 * before reading the page on the kernel side.
969		 */
970		if (mapping_writably_mapped(mapping))
971			flush_dcache_page(page);
972
973		/*
974		 * When a sequential read accesses a page several times,
975		 * only mark it as accessed the first time.
976		 */
977		if (prev_index != index || offset != prev_offset)
978			mark_page_accessed(page);
979		prev_index = index;
980
981		/*
982		 * Ok, we have the page, and it's up-to-date, so
983		 * now we can copy it to user space...
984		 *
985		 * The actor routine returns how many bytes were actually used..
986		 * NOTE! This may not be the same as how much of a user buffer
987		 * we filled up (we may be padding etc), so we can only update
988		 * "pos" here (the actor routine has to update the user buffer
989		 * pointers and the remaining count).
990		 */
991		ret = actor(desc, page, offset, nr);
992		offset += ret;
993		index += offset >> PAGE_CACHE_SHIFT;
994		offset &= ~PAGE_CACHE_MASK;
995		prev_offset = offset;
996
997		page_cache_release(page);
998		if (ret == nr && desc->count)
999			continue;
1000		goto out;
1001
1002page_not_up_to_date:
1003		/* Get exclusive access to the page ... */
1004		if (lock_page_killable(page))
1005			goto readpage_eio;
1006
1007		/* Did it get truncated before we got the lock? */
1008		if (!page->mapping) {
1009			unlock_page(page);
1010			page_cache_release(page);
1011			continue;
1012		}
1013
1014		/* Did somebody else fill it already? */
1015		if (PageUptodate(page)) {
1016			unlock_page(page);
1017			goto page_ok;
1018		}
1019
1020readpage:
1021		/* Start the actual read. The read will unlock the page. */
1022		error = mapping->a_ops->readpage(filp, page);
1023
1024		if (unlikely(error)) {
1025			if (error == AOP_TRUNCATED_PAGE) {
1026				page_cache_release(page);
1027				goto find_page;
1028			}
1029			goto readpage_error;
1030		}
1031
1032		if (!PageUptodate(page)) {
1033			if (lock_page_killable(page))
1034				goto readpage_eio;
1035			if (!PageUptodate(page)) {
1036				if (page->mapping == NULL) {
1037					/*
1038					 * invalidate_inode_pages got it
1039					 */
1040					unlock_page(page);
1041					page_cache_release(page);
1042					goto find_page;
1043				}
1044				unlock_page(page);
1045				shrink_readahead_size_eio(filp, ra);
1046				goto readpage_eio;
1047			}
1048			unlock_page(page);
1049		}
1050
1051		goto page_ok;
1052
1053readpage_eio:
1054		error = -EIO;
1055readpage_error:
1056		/* UHHUH! A synchronous read error occurred. Report it */
1057		desc->error = error;
1058		page_cache_release(page);
1059		goto out;
1060
1061no_cached_page:
1062		/*
1063		 * Ok, it wasn't cached, so we need to create a new
1064		 * page..
1065		 */
1066		page = page_cache_alloc_cold(mapping);
1067		if (!page) {
1068			desc->error = -ENOMEM;
1069			goto out;
1070		}
1071		error = add_to_page_cache_lru(page, mapping,
1072						index, GFP_KERNEL);
1073		if (error) {
1074			page_cache_release(page);
1075			if (error == -EEXIST)
1076				goto find_page;
1077			desc->error = error;
1078			goto out;
1079		}
1080		goto readpage;
1081	}
1082
1083out:
1084	ra->prev_pos = prev_index;
1085	ra->prev_pos <<= PAGE_CACHE_SHIFT;
1086	ra->prev_pos |= prev_offset;
1087
1088	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1089	if (filp)
1090		file_accessed(filp);
1091}
1092
1093int file_read_actor(read_descriptor_t *desc, struct page *page,
1094			unsigned long offset, unsigned long size)
1095{
1096	char *kaddr;
1097	unsigned long left, count = desc->count;
1098
1099	if (size > count)
1100		size = count;
1101
1102	/*
1103	 * Faults on the destination of a read are common, so do it before
1104	 * taking the kmap.
1105	 */
1106	if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1107		kaddr = kmap_atomic(page, KM_USER0);
1108		left = __copy_to_user_inatomic(desc->arg.buf,
1109						kaddr + offset, size);
1110		kunmap_atomic(kaddr, KM_USER0);
1111		if (left == 0)
1112			goto success;
1113	}
1114
1115	/* Do it the slow way */
1116	kaddr = kmap(page);
1117	left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1118	kunmap(page);
1119
1120	if (left) {
1121		size -= left;
1122		desc->error = -EFAULT;
1123	}
1124success:
1125	desc->count = count - size;
1126	desc->written += size;
1127	desc->arg.buf += size;
1128	return size;
1129}
1130
1131/*
1132 * Performs necessary checks before doing a write
1133 * @iov:	io vector request
1134 * @nr_segs:	number of segments in the iovec
1135 * @count:	number of bytes to write
1136 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1137 *
1138 * Adjust number of segments and amount of bytes to write (nr_segs should be
1139 * properly initialized first). Returns appropriate error code that caller
1140 * should return or zero in case that write should be allowed.
1141 */
1142int generic_segment_checks(const struct iovec *iov,
1143			unsigned long *nr_segs, size_t *count, int access_flags)
1144{
1145	unsigned long   seg;
1146	size_t cnt = 0;
1147	for (seg = 0; seg < *nr_segs; seg++) {
1148		const struct iovec *iv = &iov[seg];
1149
1150		/*
1151		 * If any segment has a negative length, or the cumulative
1152		 * length ever wraps negative then return -EINVAL.
1153		 */
1154		cnt += iv->iov_len;
1155		if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1156			return -EINVAL;
1157		if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1158			continue;
1159		if (seg == 0)
1160			return -EFAULT;
1161		*nr_segs = seg;
1162		cnt -= iv->iov_len;	/* This segment is no good */
1163		break;
1164	}
1165	*count = cnt;
1166	return 0;
1167}
1168EXPORT_SYMBOL(generic_segment_checks);
1169
1170/**
1171 * generic_file_aio_read - generic filesystem read routine
1172 * @iocb:	kernel I/O control block
1173 * @iov:	io vector request
1174 * @nr_segs:	number of segments in the iovec
1175 * @pos:	current file position
1176 *
1177 * This is the "read()" routine for all filesystems
1178 * that can use the page cache directly.
1179 */
1180ssize_t
1181generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1182		unsigned long nr_segs, loff_t pos)
1183{
1184	struct file *filp = iocb->ki_filp;
1185	ssize_t retval;
1186	unsigned long seg;
1187	size_t count;
1188	loff_t *ppos = &iocb->ki_pos;
1189
1190	count = 0;
1191	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1192	if (retval)
1193		return retval;
1194
1195	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1196	if (filp->f_flags & O_DIRECT) {
1197		loff_t size;
1198		struct address_space *mapping;
1199		struct inode *inode;
1200
1201		mapping = filp->f_mapping;
1202		inode = mapping->host;
1203		retval = 0;
1204		if (!count)
1205			goto out; /* skip atime */
1206		size = i_size_read(inode);
1207		if (pos < size) {
1208			retval = generic_file_direct_IO(READ, iocb,
1209						iov, pos, nr_segs);
1210			if (retval > 0)
1211				*ppos = pos + retval;
1212		}
1213		if (likely(retval != 0)) {
1214			file_accessed(filp);
1215			goto out;
1216		}
1217	}
1218
1219	retval = 0;
1220	if (count) {
1221		for (seg = 0; seg < nr_segs; seg++) {
1222			read_descriptor_t desc;
1223
1224			desc.written = 0;
1225			desc.arg.buf = iov[seg].iov_base;
1226			desc.count = iov[seg].iov_len;
1227			if (desc.count == 0)
1228				continue;
1229			desc.error = 0;
1230			do_generic_file_read(filp,ppos,&desc,file_read_actor);
1231			retval += desc.written;
1232			if (desc.error) {
1233				retval = retval ?: desc.error;
1234				break;
1235			}
1236			if (desc.count > 0)
1237				break;
1238		}
1239	}
1240out:
1241	return retval;
1242}
1243EXPORT_SYMBOL(generic_file_aio_read);
1244
1245static ssize_t
1246do_readahead(struct address_space *mapping, struct file *filp,
1247	     pgoff_t index, unsigned long nr)
1248{
1249	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1250		return -EINVAL;
1251
1252	force_page_cache_readahead(mapping, filp, index,
1253					max_sane_readahead(nr));
1254	return 0;
1255}
1256
1257asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1258{
1259	ssize_t ret;
1260	struct file *file;
1261
1262	ret = -EBADF;
1263	file = fget(fd);
1264	if (file) {
1265		if (file->f_mode & FMODE_READ) {
1266			struct address_space *mapping = file->f_mapping;
1267			pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1268			pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1269			unsigned long len = end - start + 1;
1270			ret = do_readahead(mapping, file, start, len);
1271		}
1272		fput(file);
1273	}
1274	return ret;
1275}
1276
1277#ifdef CONFIG_MMU
1278/**
1279 * page_cache_read - adds requested page to the page cache if not already there
1280 * @file:	file to read
1281 * @offset:	page index
1282 *
1283 * This adds the requested page to the page cache if it isn't already there,
1284 * and schedules an I/O to read in its contents from disk.
1285 */
1286static int page_cache_read(struct file *file, pgoff_t offset)
1287{
1288	struct address_space *mapping = file->f_mapping;
1289	struct page *page;
1290	int ret;
1291
1292	do {
1293		page = page_cache_alloc_cold(mapping);
1294		if (!page)
1295			return -ENOMEM;
1296
1297		ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1298		if (ret == 0)
1299			ret = mapping->a_ops->readpage(file, page);
1300		else if (ret == -EEXIST)
1301			ret = 0; /* losing race to add is OK */
1302
1303		page_cache_release(page);
1304
1305	} while (ret == AOP_TRUNCATED_PAGE);
1306
1307	return ret;
1308}
1309
1310#define MMAP_LOTSAMISS  (100)
1311
1312/**
1313 * filemap_fault - read in file data for page fault handling
1314 * @vma:	vma in which the fault was taken
1315 * @vmf:	struct vm_fault containing details of the fault
1316 *
1317 * filemap_fault() is invoked via the vma operations vector for a
1318 * mapped memory region to read in file data during a page fault.
1319 *
1320 * The goto's are kind of ugly, but this streamlines the normal case of having
1321 * it in the page cache, and handles the special cases reasonably without
1322 * having a lot of duplicated code.
1323 */
1324int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1325{
1326	int error;
1327	struct file *file = vma->vm_file;
1328	struct address_space *mapping = file->f_mapping;
1329	struct file_ra_state *ra = &file->f_ra;
1330	struct inode *inode = mapping->host;
1331	struct page *page;
1332	pgoff_t size;
1333	int did_readaround = 0;
1334	int ret = 0;
1335
1336	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1337	if (vmf->pgoff >= size)
1338		return VM_FAULT_SIGBUS;
1339
1340	/* If we don't want any read-ahead, don't bother */
1341	if (VM_RandomReadHint(vma))
1342		goto no_cached_page;
1343
1344	/*
1345	 * Do we have something in the page cache already?
1346	 */
1347retry_find:
1348	page = find_lock_page(mapping, vmf->pgoff);
1349	/*
1350	 * For sequential accesses, we use the generic readahead logic.
1351	 */
1352	if (VM_SequentialReadHint(vma)) {
1353		if (!page) {
1354			page_cache_sync_readahead(mapping, ra, file,
1355							   vmf->pgoff, 1);
1356			page = find_lock_page(mapping, vmf->pgoff);
1357			if (!page)
1358				goto no_cached_page;
1359		}
1360		if (PageReadahead(page)) {
1361			page_cache_async_readahead(mapping, ra, file, page,
1362							   vmf->pgoff, 1);
1363		}
1364	}
1365
1366	if (!page) {
1367		unsigned long ra_pages;
1368
1369		ra->mmap_miss++;
1370
1371		/*
1372		 * Do we miss much more than hit in this file? If so,
1373		 * stop bothering with read-ahead. It will only hurt.
1374		 */
1375		if (ra->mmap_miss > MMAP_LOTSAMISS)
1376			goto no_cached_page;
1377
1378		/*
1379		 * To keep the pgmajfault counter straight, we need to
1380		 * check did_readaround, as this is an inner loop.
1381		 */
1382		if (!did_readaround) {
1383			ret = VM_FAULT_MAJOR;
1384			count_vm_event(PGMAJFAULT);
1385		}
1386		did_readaround = 1;
1387		ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1388		if (ra_pages) {
1389			pgoff_t start = 0;
1390
1391			if (vmf->pgoff > ra_pages / 2)
1392				start = vmf->pgoff - ra_pages / 2;
1393			do_page_cache_readahead(mapping, file, start, ra_pages);
1394		}
1395		page = find_lock_page(mapping, vmf->pgoff);
1396		if (!page)
1397			goto no_cached_page;
1398	}
1399
1400	if (!did_readaround)
1401		ra->mmap_miss--;
1402
1403	/*
1404	 * We have a locked page in the page cache, now we need to check
1405	 * that it's up-to-date. If not, it is going to be due to an error.
1406	 */
1407	if (unlikely(!PageUptodate(page)))
1408		goto page_not_uptodate;
1409
1410	/* Must recheck i_size under page lock */
1411	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1412	if (unlikely(vmf->pgoff >= size)) {
1413		unlock_page(page);
1414		page_cache_release(page);
1415		return VM_FAULT_SIGBUS;
1416	}
1417
1418	/*
1419	 * Found the page and have a reference on it.
1420	 */
1421	mark_page_accessed(page);
1422	ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
1423	vmf->page = page;
1424	return ret | VM_FAULT_LOCKED;
1425
1426no_cached_page:
1427	/*
1428	 * We're only likely to ever get here if MADV_RANDOM is in
1429	 * effect.
1430	 */
1431	error = page_cache_read(file, vmf->pgoff);
1432
1433	/*
1434	 * The page we want has now been added to the page cache.
1435	 * In the unlikely event that someone removed it in the
1436	 * meantime, we'll just come back here and read it again.
1437	 */
1438	if (error >= 0)
1439		goto retry_find;
1440
1441	/*
1442	 * An error return from page_cache_read can result if the
1443	 * system is low on memory, or a problem occurs while trying
1444	 * to schedule I/O.
1445	 */
1446	if (error == -ENOMEM)
1447		return VM_FAULT_OOM;
1448	return VM_FAULT_SIGBUS;
1449
1450page_not_uptodate:
1451	/* IO error path */
1452	if (!did_readaround) {
1453		ret = VM_FAULT_MAJOR;
1454		count_vm_event(PGMAJFAULT);
1455	}
1456
1457	/*
1458	 * Umm, take care of errors if the page isn't up-to-date.
1459	 * Try to re-read it _once_. We do this synchronously,
1460	 * because there really aren't any performance issues here
1461	 * and we need to check for errors.
1462	 */
1463	ClearPageError(page);
1464	error = mapping->a_ops->readpage(file, page);
1465	if (!error) {
1466		wait_on_page_locked(page);
1467		if (!PageUptodate(page))
1468			error = -EIO;
1469	}
1470	page_cache_release(page);
1471
1472	if (!error || error == AOP_TRUNCATED_PAGE)
1473		goto retry_find;
1474
1475	/* Things didn't work out. Return zero to tell the mm layer so. */
1476	shrink_readahead_size_eio(file, ra);
1477	return VM_FAULT_SIGBUS;
1478}
1479EXPORT_SYMBOL(filemap_fault);
1480
1481struct vm_operations_struct generic_file_vm_ops = {
1482	.fault		= filemap_fault,
1483};
1484
1485/* This is used for a general mmap of a disk file */
1486
1487int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1488{
1489	struct address_space *mapping = file->f_mapping;
1490
1491	if (!mapping->a_ops->readpage)
1492		return -ENOEXEC;
1493	file_accessed(file);
1494	vma->vm_ops = &generic_file_vm_ops;
1495	vma->vm_flags |= VM_CAN_NONLINEAR;
1496	return 0;
1497}
1498
1499/*
1500 * This is for filesystems which do not implement ->writepage.
1501 */
1502int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1503{
1504	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1505		return -EINVAL;
1506	return generic_file_mmap(file, vma);
1507}
1508#else
1509int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1510{
1511	return -ENOSYS;
1512}
1513int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1514{
1515	return -ENOSYS;
1516}
1517#endif /* CONFIG_MMU */
1518
1519EXPORT_SYMBOL(generic_file_mmap);
1520EXPORT_SYMBOL(generic_file_readonly_mmap);
1521
1522static struct page *__read_cache_page(struct address_space *mapping,
1523				pgoff_t index,
1524				int (*filler)(void *,struct page*),
1525				void *data)
1526{
1527	struct page *page;
1528	int err;
1529repeat:
1530	page = find_get_page(mapping, index);
1531	if (!page) {
1532		page = page_cache_alloc_cold(mapping);
1533		if (!page)
1534			return ERR_PTR(-ENOMEM);
1535		err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1536		if (unlikely(err)) {
1537			page_cache_release(page);
1538			if (err == -EEXIST)
1539				goto repeat;
1540			/* Presumably ENOMEM for radix tree node */
1541			return ERR_PTR(err);
1542		}
1543		err = filler(data, page);
1544		if (err < 0) {
1545			page_cache_release(page);
1546			page = ERR_PTR(err);
1547		}
1548	}
1549	return page;
1550}
1551
1552/**
1553 * read_cache_page_async - read into page cache, fill it if needed
1554 * @mapping:	the page's address_space
1555 * @index:	the page index
1556 * @filler:	function to perform the read
1557 * @data:	destination for read data
1558 *
1559 * Same as read_cache_page, but don't wait for page to become unlocked
1560 * after submitting it to the filler.
1561 *
1562 * Read into the page cache. If a page already exists, and PageUptodate() is
1563 * not set, try to fill the page but don't wait for it to become unlocked.
1564 *
1565 * If the page does not get brought uptodate, return -EIO.
1566 */
1567struct page *read_cache_page_async(struct address_space *mapping,
1568				pgoff_t index,
1569				int (*filler)(void *,struct page*),
1570				void *data)
1571{
1572	struct page *page;
1573	int err;
1574
1575retry:
1576	page = __read_cache_page(mapping, index, filler, data);
1577	if (IS_ERR(page))
1578		return page;
1579	if (PageUptodate(page))
1580		goto out;
1581
1582	lock_page(page);
1583	if (!page->mapping) {
1584		unlock_page(page);
1585		page_cache_release(page);
1586		goto retry;
1587	}
1588	if (PageUptodate(page)) {
1589		unlock_page(page);
1590		goto out;
1591	}
1592	err = filler(data, page);
1593	if (err < 0) {
1594		page_cache_release(page);
1595		return ERR_PTR(err);
1596	}
1597out:
1598	mark_page_accessed(page);
1599	return page;
1600}
1601EXPORT_SYMBOL(read_cache_page_async);
1602
1603/**
1604 * read_cache_page - read into page cache, fill it if needed
1605 * @mapping:	the page's address_space
1606 * @index:	the page index
1607 * @filler:	function to perform the read
1608 * @data:	destination for read data
1609 *
1610 * Read into the page cache. If a page already exists, and PageUptodate() is
1611 * not set, try to fill the page then wait for it to become unlocked.
1612 *
1613 * If the page does not get brought uptodate, return -EIO.
1614 */
1615struct page *read_cache_page(struct address_space *mapping,
1616				pgoff_t index,
1617				int (*filler)(void *,struct page*),
1618				void *data)
1619{
1620	struct page *page;
1621
1622	page = read_cache_page_async(mapping, index, filler, data);
1623	if (IS_ERR(page))
1624		goto out;
1625	wait_on_page_locked(page);
1626	if (!PageUptodate(page)) {
1627		page_cache_release(page);
1628		page = ERR_PTR(-EIO);
1629	}
1630 out:
1631	return page;
1632}
1633EXPORT_SYMBOL(read_cache_page);
1634
1635/*
1636 * The logic we want is
1637 *
1638 *	if suid or (sgid and xgrp)
1639 *		remove privs
1640 */
1641int should_remove_suid(struct dentry *dentry)
1642{
1643	mode_t mode = dentry->d_inode->i_mode;
1644	int kill = 0;
1645
1646	/* suid always must be killed */
1647	if (unlikely(mode & S_ISUID))
1648		kill = ATTR_KILL_SUID;
1649
1650	/*
1651	 * sgid without any exec bits is just a mandatory locking mark; leave
1652	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1653	 */
1654	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1655		kill |= ATTR_KILL_SGID;
1656
1657	if (unlikely(kill && !capable(CAP_FSETID)))
1658		return kill;
1659
1660	return 0;
1661}
1662EXPORT_SYMBOL(should_remove_suid);
1663
1664static int __remove_suid(struct dentry *dentry, int kill)
1665{
1666	struct iattr newattrs;
1667
1668	newattrs.ia_valid = ATTR_FORCE | kill;
1669	return notify_change(dentry, &newattrs);
1670}
1671
1672int remove_suid(struct dentry *dentry)
1673{
1674	int killsuid = should_remove_suid(dentry);
1675	int killpriv = security_inode_need_killpriv(dentry);
1676	int error = 0;
1677
1678	if (killpriv < 0)
1679		return killpriv;
1680	if (killpriv)
1681		error = security_inode_killpriv(dentry);
1682	if (!error && killsuid)
1683		error = __remove_suid(dentry, killsuid);
1684
1685	return error;
1686}
1687EXPORT_SYMBOL(remove_suid);
1688
1689static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1690			const struct iovec *iov, size_t base, size_t bytes)
1691{
1692	size_t copied = 0, left = 0;
1693
1694	while (bytes) {
1695		char __user *buf = iov->iov_base + base;
1696		int copy = min(bytes, iov->iov_len - base);
1697
1698		base = 0;
1699		left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
1700		copied += copy;
1701		bytes -= copy;
1702		vaddr += copy;
1703		iov++;
1704
1705		if (unlikely(left))
1706			break;
1707	}
1708	return copied - left;
1709}
1710
1711/*
1712 * Copy as much as we can into the page and return the number of bytes which
1713 * were sucessfully copied.  If a fault is encountered then return the number of
1714 * bytes which were copied.
1715 */
1716size_t iov_iter_copy_from_user_atomic(struct page *page,
1717		struct iov_iter *i, unsigned long offset, size_t bytes)
1718{
1719	char *kaddr;
1720	size_t copied;
1721
1722	BUG_ON(!in_atomic());
1723	kaddr = kmap_atomic(page, KM_USER0);
1724	if (likely(i->nr_segs == 1)) {
1725		int left;
1726		char __user *buf = i->iov->iov_base + i->iov_offset;
1727		left = __copy_from_user_inatomic_nocache(kaddr + offset,
1728							buf, bytes);
1729		copied = bytes - left;
1730	} else {
1731		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1732						i->iov, i->iov_offset, bytes);
1733	}
1734	kunmap_atomic(kaddr, KM_USER0);
1735
1736	return copied;
1737}
1738EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1739
1740/*
1741 * This has the same sideeffects and return value as
1742 * iov_iter_copy_from_user_atomic().
1743 * The difference is that it attempts to resolve faults.
1744 * Page must not be locked.
1745 */
1746size_t iov_iter_copy_from_user(struct page *page,
1747		struct iov_iter *i, unsigned long offset, size_t bytes)
1748{
1749	char *kaddr;
1750	size_t copied;
1751
1752	kaddr = kmap(page);
1753	if (likely(i->nr_segs == 1)) {
1754		int left;
1755		char __user *buf = i->iov->iov_base + i->iov_offset;
1756		left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
1757		copied = bytes - left;
1758	} else {
1759		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1760						i->iov, i->iov_offset, bytes);
1761	}
1762	kunmap(page);
1763	return copied;
1764}
1765EXPORT_SYMBOL(iov_iter_copy_from_user);
1766
1767void iov_iter_advance(struct iov_iter *i, size_t bytes)
1768{
1769	BUG_ON(i->count < bytes);
1770
1771	if (likely(i->nr_segs == 1)) {
1772		i->iov_offset += bytes;
1773		i->count -= bytes;
1774	} else {
1775		const struct iovec *iov = i->iov;
1776		size_t base = i->iov_offset;
1777
1778		/*
1779		 * The !iov->iov_len check ensures we skip over unlikely
1780		 * zero-length segments (without overruning the iovec).
1781		 */
1782		while (bytes || unlikely(!iov->iov_len && i->count)) {
1783			int copy;
1784
1785			copy = min(bytes, iov->iov_len - base);
1786			BUG_ON(!i->count || i->count < copy);
1787			i->count -= copy;
1788			bytes -= copy;
1789			base += copy;
1790			if (iov->iov_len == base) {
1791				iov++;
1792				base = 0;
1793			}
1794		}
1795		i->iov = iov;
1796		i->iov_offset = base;
1797	}
1798}
1799EXPORT_SYMBOL(iov_iter_advance);
1800
1801/*
1802 * Fault in the first iovec of the given iov_iter, to a maximum length
1803 * of bytes. Returns 0 on success, or non-zero if the memory could not be
1804 * accessed (ie. because it is an invalid address).
1805 *
1806 * writev-intensive code may want this to prefault several iovecs -- that
1807 * would be possible (callers must not rely on the fact that _only_ the
1808 * first iovec will be faulted with the current implementation).
1809 */
1810int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
1811{
1812	char __user *buf = i->iov->iov_base + i->iov_offset;
1813	bytes = min(bytes, i->iov->iov_len - i->iov_offset);
1814	return fault_in_pages_readable(buf, bytes);
1815}
1816EXPORT_SYMBOL(iov_iter_fault_in_readable);
1817
1818/*
1819 * Return the count of just the current iov_iter segment.
1820 */
1821size_t iov_iter_single_seg_count(struct iov_iter *i)
1822{
1823	const struct iovec *iov = i->iov;
1824	if (i->nr_segs == 1)
1825		return i->count;
1826	else
1827		return min(i->count, iov->iov_len - i->iov_offset);
1828}
1829EXPORT_SYMBOL(iov_iter_single_seg_count);
1830
1831/*
1832 * Performs necessary checks before doing a write
1833 *
1834 * Can adjust writing position or amount of bytes to write.
1835 * Returns appropriate error code that caller should return or
1836 * zero in case that write should be allowed.
1837 */
1838inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1839{
1840	struct inode *inode = file->f_mapping->host;
1841	unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1842
1843        if (unlikely(*pos < 0))
1844                return -EINVAL;
1845
1846	if (!isblk) {
1847		/* FIXME: this is for backwards compatibility with 2.4 */
1848		if (file->f_flags & O_APPEND)
1849                        *pos = i_size_read(inode);
1850
1851		if (limit != RLIM_INFINITY) {
1852			if (*pos >= limit) {
1853				send_sig(SIGXFSZ, current, 0);
1854				return -EFBIG;
1855			}
1856			if (*count > limit - (typeof(limit))*pos) {
1857				*count = limit - (typeof(limit))*pos;
1858			}
1859		}
1860	}
1861
1862	/*
1863	 * LFS rule
1864	 */
1865	if (unlikely(*pos + *count > MAX_NON_LFS &&
1866				!(file->f_flags & O_LARGEFILE))) {
1867		if (*pos >= MAX_NON_LFS) {
1868			return -EFBIG;
1869		}
1870		if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1871			*count = MAX_NON_LFS - (unsigned long)*pos;
1872		}
1873	}
1874
1875	/*
1876	 * Are we about to exceed the fs block limit ?
1877	 *
1878	 * If we have written data it becomes a short write.  If we have
1879	 * exceeded without writing data we send a signal and return EFBIG.
1880	 * Linus frestrict idea will clean these up nicely..
1881	 */
1882	if (likely(!isblk)) {
1883		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1884			if (*count || *pos > inode->i_sb->s_maxbytes) {
1885				return -EFBIG;
1886			}
1887			/* zero-length writes at ->s_maxbytes are OK */
1888		}
1889
1890		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1891			*count = inode->i_sb->s_maxbytes - *pos;
1892	} else {
1893#ifdef CONFIG_BLOCK
1894		loff_t isize;
1895		if (bdev_read_only(I_BDEV(inode)))
1896			return -EPERM;
1897		isize = i_size_read(inode);
1898		if (*pos >= isize) {
1899			if (*count || *pos > isize)
1900				return -ENOSPC;
1901		}
1902
1903		if (*pos + *count > isize)
1904			*count = isize - *pos;
1905#else
1906		return -EPERM;
1907#endif
1908	}
1909	return 0;
1910}
1911EXPORT_SYMBOL(generic_write_checks);
1912
1913int pagecache_write_begin(struct file *file, struct address_space *mapping,
1914				loff_t pos, unsigned len, unsigned flags,
1915				struct page **pagep, void **fsdata)
1916{
1917	const struct address_space_operations *aops = mapping->a_ops;
1918
1919	if (aops->write_begin) {
1920		return aops->write_begin(file, mapping, pos, len, flags,
1921							pagep, fsdata);
1922	} else {
1923		int ret;
1924		pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1925		unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1926		struct inode *inode = mapping->host;
1927		struct page *page;
1928again:
1929		page = __grab_cache_page(mapping, index);
1930		*pagep = page;
1931		if (!page)
1932			return -ENOMEM;
1933
1934		if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
1935			/*
1936			 * There is no way to resolve a short write situation
1937			 * for a !Uptodate page (except by double copying in
1938			 * the caller done by generic_perform_write_2copy).
1939			 *
1940			 * Instead, we have to bring it uptodate here.
1941			 */
1942			ret = aops->readpage(file, page);
1943			page_cache_release(page);
1944			if (ret) {
1945				if (ret == AOP_TRUNCATED_PAGE)
1946					goto again;
1947				return ret;
1948			}
1949			goto again;
1950		}
1951
1952		ret = aops->prepare_write(file, page, offset, offset+len);
1953		if (ret) {
1954			unlock_page(page);
1955			page_cache_release(page);
1956			if (pos + len > inode->i_size)
1957				vmtruncate(inode, inode->i_size);
1958		}
1959		return ret;
1960	}
1961}
1962EXPORT_SYMBOL(pagecache_write_begin);
1963
1964int pagecache_write_end(struct file *file, struct address_space *mapping,
1965				loff_t pos, unsigned len, unsigned copied,
1966				struct page *page, void *fsdata)
1967{
1968	const struct address_space_operations *aops = mapping->a_ops;
1969	int ret;
1970
1971	if (aops->write_end) {
1972		mark_page_accessed(page);
1973		ret = aops->write_end(file, mapping, pos, len, copied,
1974							page, fsdata);
1975	} else {
1976		unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1977		struct inode *inode = mapping->host;
1978
1979		flush_dcache_page(page);
1980		ret = aops->commit_write(file, page, offset, offset+len);
1981		unlock_page(page);
1982		mark_page_accessed(page);
1983		page_cache_release(page);
1984
1985		if (ret < 0) {
1986			if (pos + len > inode->i_size)
1987				vmtruncate(inode, inode->i_size);
1988		} else if (ret > 0)
1989			ret = min_t(size_t, copied, ret);
1990		else
1991			ret = copied;
1992	}
1993
1994	return ret;
1995}
1996EXPORT_SYMBOL(pagecache_write_end);
1997
1998ssize_t
1999generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2000		unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2001		size_t count, size_t ocount)
2002{
2003	struct file	*file = iocb->ki_filp;
2004	struct address_space *mapping = file->f_mapping;
2005	struct inode	*inode = mapping->host;
2006	ssize_t		written;
2007
2008	if (count != ocount)
2009		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2010
2011	written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2012	if (written > 0) {
2013		loff_t end = pos + written;
2014		if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2015			i_size_write(inode,  end);
2016			mark_inode_dirty(inode);
2017		}
2018		*ppos = end;
2019	}
2020
2021	/*
2022	 * Sync the fs metadata but not the minor inode changes and
2023	 * of course not the data as we did direct DMA for the IO.
2024	 * i_mutex is held, which protects generic_osync_inode() from
2025	 * livelocking.  AIO O_DIRECT ops attempt to sync metadata here.
2026	 */
2027	if ((written >= 0 || written == -EIOCBQUEUED) &&
2028	    ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2029		int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
2030		if (err < 0)
2031			written = err;
2032	}
2033	return written;
2034}
2035EXPORT_SYMBOL(generic_file_direct_write);
2036
2037/*
2038 * Find or create a page at the given pagecache position. Return the locked
2039 * page. This function is specifically for buffered writes.
2040 */
2041struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
2042{
2043	int status;
2044	struct page *page;
2045repeat:
2046	page = find_lock_page(mapping, index);
2047	if (likely(page))
2048		return page;
2049
2050	page = page_cache_alloc(mapping);
2051	if (!page)
2052		return NULL;
2053	status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
2054	if (unlikely(status)) {
2055		page_cache_release(page);
2056		if (status == -EEXIST)
2057			goto repeat;
2058		return NULL;
2059	}
2060	return page;
2061}
2062EXPORT_SYMBOL(__grab_cache_page);
2063
2064static ssize_t generic_perform_write_2copy(struct file *file,
2065				struct iov_iter *i, loff_t pos)
2066{
2067	struct address_space *mapping = file->f_mapping;
2068	const struct address_space_operations *a_ops = mapping->a_ops;
2069	struct inode *inode = mapping->host;
2070	long status = 0;
2071	ssize_t written = 0;
2072
2073	do {
2074		struct page *src_page;
2075		struct page *page;
2076		pgoff_t index;		/* Pagecache index for current page */
2077		unsigned long offset;	/* Offset into pagecache page */
2078		unsigned long bytes;	/* Bytes to write to page */
2079		size_t copied;		/* Bytes copied from user */
2080
2081		offset = (pos & (PAGE_CACHE_SIZE - 1));
2082		index = pos >> PAGE_CACHE_SHIFT;
2083		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2084						iov_iter_count(i));
2085
2086		/*
2087		 * a non-NULL src_page indicates that we're doing the
2088		 * copy via get_user_pages and kmap.
2089		 */
2090		src_page = NULL;
2091
2092		/*
2093		 * Bring in the user page that we will copy from _first_.
2094		 * Otherwise there's a nasty deadlock on copying from the
2095		 * same page as we're writing to, without it being marked
2096		 * up-to-date.
2097		 *
2098		 * Not only is this an optimisation, but it is also required
2099		 * to check that the address is actually valid, when atomic
2100		 * usercopies are used, below.
2101		 */
2102		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2103			status = -EFAULT;
2104			break;
2105		}
2106
2107		page = __grab_cache_page(mapping, index);
2108		if (!page) {
2109			status = -ENOMEM;
2110			break;
2111		}
2112
2113		/*
2114		 * non-uptodate pages cannot cope with short copies, and we
2115		 * cannot take a pagefault with the destination page locked.
2116		 * So pin the source page to copy it.
2117		 */
2118		if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
2119			unlock_page(page);
2120
2121			src_page = alloc_page(GFP_KERNEL);
2122			if (!src_page) {
2123				page_cache_release(page);
2124				status = -ENOMEM;
2125				break;
2126			}
2127
2128			/*
2129			 * Cannot get_user_pages with a page locked for the
2130			 * same reason as we can't take a page fault with a
2131			 * page locked (as explained below).
2132			 */
2133			copied = iov_iter_copy_from_user(src_page, i,
2134								offset, bytes);
2135			if (unlikely(copied == 0)) {
2136				status = -EFAULT;
2137				page_cache_release(page);
2138				page_cache_release(src_page);
2139				break;
2140			}
2141			bytes = copied;
2142
2143			lock_page(page);
2144			/*
2145			 * Can't handle the page going uptodate here, because
2146			 * that means we would use non-atomic usercopies, which
2147			 * zero out the tail of the page, which can cause
2148			 * zeroes to become transiently visible. We could just
2149			 * use a non-zeroing copy, but the APIs aren't too
2150			 * consistent.
2151			 */
2152			if (unlikely(!page->mapping || PageUptodate(page))) {
2153				unlock_page(page);
2154				page_cache_release(page);
2155				page_cache_release(src_page);
2156				continue;
2157			}
2158		}
2159
2160		status = a_ops->prepare_write(file, page, offset, offset+bytes);
2161		if (unlikely(status))
2162			goto fs_write_aop_error;
2163
2164		if (!src_page) {
2165			/*
2166			 * Must not enter the pagefault handler here, because
2167			 * we hold the page lock, so we might recursively
2168			 * deadlock on the same lock, or get an ABBA deadlock
2169			 * against a different lock, or against the mmap_sem
2170			 * (which nests outside the page lock).  So increment
2171			 * preempt count, and use _atomic usercopies.
2172			 *
2173			 * The page is uptodate so we are OK to encounter a
2174			 * short copy: if unmodified parts of the page are
2175			 * marked dirty and written out to disk, it doesn't
2176			 * really matter.
2177			 */
2178			pagefault_disable();
2179			copied = iov_iter_copy_from_user_atomic(page, i,
2180								offset, bytes);
2181			pagefault_enable();
2182		} else {
2183			void *src, *dst;
2184			src = kmap_atomic(src_page, KM_USER0);
2185			dst = kmap_atomic(page, KM_USER1);
2186			memcpy(dst + offset, src + offset, bytes);
2187			kunmap_atomic(dst, KM_USER1);
2188			kunmap_atomic(src, KM_USER0);
2189			copied = bytes;
2190		}
2191		flush_dcache_page(page);
2192
2193		status = a_ops->commit_write(file, page, offset, offset+bytes);
2194		if (unlikely(status < 0))
2195			goto fs_write_aop_error;
2196		if (unlikely(status > 0)) /* filesystem did partial write */
2197			copied = min_t(size_t, copied, status);
2198
2199		unlock_page(page);
2200		mark_page_accessed(page);
2201		page_cache_release(page);
2202		if (src_page)
2203			page_cache_release(src_page);
2204
2205		iov_iter_advance(i, copied);
2206		pos += copied;
2207		written += copied;
2208
2209		balance_dirty_pages_ratelimited(mapping);
2210		cond_resched();
2211		continue;
2212
2213fs_write_aop_error:
2214		unlock_page(page);
2215		page_cache_release(page);
2216		if (src_page)
2217			page_cache_release(src_page);
2218
2219		/*
2220		 * prepare_write() may have instantiated a few blocks
2221		 * outside i_size.  Trim these off again. Don't need
2222		 * i_size_read because we hold i_mutex.
2223		 */
2224		if (pos + bytes > inode->i_size)
2225			vmtruncate(inode, inode->i_size);
2226		break;
2227	} while (iov_iter_count(i));
2228
2229	return written ? written : status;
2230}
2231
2232static ssize_t generic_perform_write(struct file *file,
2233				struct iov_iter *i, loff_t pos)
2234{
2235	struct address_space *mapping = file->f_mapping;
2236	const struct address_space_operations *a_ops = mapping->a_ops;
2237	long status = 0;
2238	ssize_t written = 0;
2239	unsigned int flags = 0;
2240
2241	/*
2242	 * Copies from kernel address space cannot fail (NFSD is a big user).
2243	 */
2244	if (segment_eq(get_fs(), KERNEL_DS))
2245		flags |= AOP_FLAG_UNINTERRUPTIBLE;
2246
2247	do {
2248		struct page *page;
2249		pgoff_t index;		/* Pagecache index for current page */
2250		unsigned long offset;	/* Offset into pagecache page */
2251		unsigned long bytes;	/* Bytes to write to page */
2252		size_t copied;		/* Bytes copied from user */
2253		void *fsdata;
2254
2255		offset = (pos & (PAGE_CACHE_SIZE - 1));
2256		index = pos >> PAGE_CACHE_SHIFT;
2257		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2258						iov_iter_count(i));
2259
2260again:
2261
2262		/*
2263		 * Bring in the user page that we will copy from _first_.
2264		 * Otherwise there's a nasty deadlock on copying from the
2265		 * same page as we're writing to, without it being marked
2266		 * up-to-date.
2267		 *
2268		 * Not only is this an optimisation, but it is also required
2269		 * to check that the address is actually valid, when atomic
2270		 * usercopies are used, below.
2271		 */
2272		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2273			status = -EFAULT;
2274			break;
2275		}
2276
2277		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2278						&page, &fsdata);
2279		if (unlikely(status))
2280			break;
2281
2282		pagefault_disable();
2283		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2284		pagefault_enable();
2285		flush_dcache_page(page);
2286
2287		status = a_ops->write_end(file, mapping, pos, bytes, copied,
2288						page, fsdata);
2289		if (unlikely(status < 0))
2290			break;
2291		copied = status;
2292
2293		cond_resched();
2294
2295		iov_iter_advance(i, copied);
2296		if (unlikely(copied == 0)) {
2297			/*
2298			 * If we were unable to copy any data at all, we must
2299			 * fall back to a single segment length write.
2300			 *
2301			 * If we didn't fallback here, we could livelock
2302			 * because not all segments in the iov can be copied at
2303			 * once without a pagefault.
2304			 */
2305			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2306						iov_iter_single_seg_count(i));
2307			goto again;
2308		}
2309		pos += copied;
2310		written += copied;
2311
2312		balance_dirty_pages_ratelimited(mapping);
2313
2314	} while (iov_iter_count(i));
2315
2316	return written ? written : status;
2317}
2318
2319ssize_t
2320generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2321		unsigned long nr_segs, loff_t pos, loff_t *ppos,
2322		size_t count, ssize_t written)
2323{
2324	struct file *file = iocb->ki_filp;
2325	struct address_space *mapping = file->f_mapping;
2326	const struct address_space_operations *a_ops = mapping->a_ops;
2327	struct inode *inode = mapping->host;
2328	ssize_t status;
2329	struct iov_iter i;
2330
2331	iov_iter_init(&i, iov, nr_segs, count, written);
2332	if (a_ops->write_begin)
2333		status = generic_perform_write(file, &i, pos);
2334	else
2335		status = generic_perform_write_2copy(file, &i, pos);
2336
2337	if (likely(status >= 0)) {
2338		written += status;
2339		*ppos = pos + status;
2340
2341		/*
2342		 * For now, when the user asks for O_SYNC, we'll actually give
2343		 * O_DSYNC
2344		 */
2345		if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2346			if (!a_ops->writepage || !is_sync_kiocb(iocb))
2347				status = generic_osync_inode(inode, mapping,
2348						OSYNC_METADATA|OSYNC_DATA);
2349		}
2350  	}
2351
2352	/*
2353	 * If we get here for O_DIRECT writes then we must have fallen through
2354	 * to buffered writes (block instantiation inside i_size).  So we sync
2355	 * the file data here, to try to honour O_DIRECT expectations.
2356	 */
2357	if (unlikely(file->f_flags & O_DIRECT) && written)
2358		status = filemap_write_and_wait(mapping);
2359
2360	return written ? written : status;
2361}
2362EXPORT_SYMBOL(generic_file_buffered_write);
2363
2364static ssize_t
2365__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2366				unsigned long nr_segs, loff_t *ppos)
2367{
2368	struct file *file = iocb->ki_filp;
2369	struct address_space * mapping = file->f_mapping;
2370	size_t ocount;		/* original count */
2371	size_t count;		/* after file limit checks */
2372	struct inode 	*inode = mapping->host;
2373	loff_t		pos;
2374	ssize_t		written;
2375	ssize_t		err;
2376
2377	ocount = 0;
2378	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2379	if (err)
2380		return err;
2381
2382	count = ocount;
2383	pos = *ppos;
2384
2385	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2386
2387	/* We can write back this queue in page reclaim */
2388	current->backing_dev_info = mapping->backing_dev_info;
2389	written = 0;
2390
2391	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2392	if (err)
2393		goto out;
2394
2395	if (count == 0)
2396		goto out;
2397
2398	err = remove_suid(file->f_path.dentry);
2399	if (err)
2400		goto out;
2401
2402	file_update_time(file);
2403
2404	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2405	if (unlikely(file->f_flags & O_DIRECT)) {
2406		loff_t endbyte;
2407		ssize_t written_buffered;
2408
2409		written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2410							ppos, count, ocount);
2411		if (written < 0 || written == count)
2412			goto out;
2413		/*
2414		 * direct-io write to a hole: fall through to buffered I/O
2415		 * for completing the rest of the request.
2416		 */
2417		pos += written;
2418		count -= written;
2419		written_buffered = generic_file_buffered_write(iocb, iov,
2420						nr_segs, pos, ppos, count,
2421						written);
2422		/*
2423		 * If generic_file_buffered_write() retuned a synchronous error
2424		 * then we want to return the number of bytes which were
2425		 * direct-written, or the error code if that was zero.  Note
2426		 * that this differs from normal direct-io semantics, which
2427		 * will return -EFOO even if some bytes were written.
2428		 */
2429		if (written_buffered < 0) {
2430			err = written_buffered;
2431			goto out;
2432		}
2433
2434		/*
2435		 * We need to ensure that the page cache pages are written to
2436		 * disk and invalidated to preserve the expected O_DIRECT
2437		 * semantics.
2438		 */
2439		endbyte = pos + written_buffered - written - 1;
2440		err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
2441					    SYNC_FILE_RANGE_WAIT_BEFORE|
2442					    SYNC_FILE_RANGE_WRITE|
2443					    SYNC_FILE_RANGE_WAIT_AFTER);
2444		if (err == 0) {
2445			written = written_buffered;
2446			invalidate_mapping_pages(mapping,
2447						 pos >> PAGE_CACHE_SHIFT,
2448						 endbyte >> PAGE_CACHE_SHIFT);
2449		} else {
2450			/*
2451			 * We don't know how much we wrote, so just return
2452			 * the number of bytes which were direct-written
2453			 */
2454		}
2455	} else {
2456		written = generic_file_buffered_write(iocb, iov, nr_segs,
2457				pos, ppos, count, written);
2458	}
2459out:
2460	current->backing_dev_info = NULL;
2461	return written ? written : err;
2462}
2463
2464ssize_t generic_file_aio_write_nolock(struct kiocb *iocb,
2465		const struct iovec *iov, unsigned long nr_segs, loff_t pos)
2466{
2467	struct file *file = iocb->ki_filp;
2468	struct address_space *mapping = file->f_mapping;
2469	struct inode *inode = mapping->host;
2470	ssize_t ret;
2471
2472	BUG_ON(iocb->ki_pos != pos);
2473
2474	ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
2475			&iocb->ki_pos);
2476
2477	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2478		ssize_t err;
2479
2480		err = sync_page_range_nolock(inode, mapping, pos, ret);
2481		if (err < 0)
2482			ret = err;
2483	}
2484	return ret;
2485}
2486EXPORT_SYMBOL(generic_file_aio_write_nolock);
2487
2488ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2489		unsigned long nr_segs, loff_t pos)
2490{
2491	struct file *file = iocb->ki_filp;
2492	struct address_space *mapping = file->f_mapping;
2493	struct inode *inode = mapping->host;
2494	ssize_t ret;
2495
2496	BUG_ON(iocb->ki_pos != pos);
2497
2498	mutex_lock(&inode->i_mutex);
2499	ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
2500			&iocb->ki_pos);
2501	mutex_unlock(&inode->i_mutex);
2502
2503	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2504		ssize_t err;
2505
2506		err = sync_page_range(inode, mapping, pos, ret);
2507		if (err < 0)
2508			ret = err;
2509	}
2510	return ret;
2511}
2512EXPORT_SYMBOL(generic_file_aio_write);
2513
2514/*
2515 * Called under i_mutex for writes to S_ISREG files.   Returns -EIO if something
2516 * went wrong during pagecache shootdown.
2517 */
2518static ssize_t
2519generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2520	loff_t offset, unsigned long nr_segs)
2521{
2522	struct file *file = iocb->ki_filp;
2523	struct address_space *mapping = file->f_mapping;
2524	ssize_t retval;
2525	size_t write_len;
2526	pgoff_t end = 0; /* silence gcc */
2527
2528	/*
2529	 * If it's a write, unmap all mmappings of the file up-front.  This
2530	 * will cause any pte dirty bits to be propagated into the pageframes
2531	 * for the subsequent filemap_write_and_wait().
2532	 */
2533	if (rw == WRITE) {
2534		write_len = iov_length(iov, nr_segs);
2535		end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
2536	       	if (mapping_mapped(mapping))
2537			unmap_mapping_range(mapping, offset, write_len, 0);
2538	}
2539
2540	retval = filemap_write_and_wait(mapping);
2541	if (retval)
2542		goto out;
2543
2544	/*
2545	 * After a write we want buffered reads to be sure to go to disk to get
2546	 * the new data.  We invalidate clean cached page from the region we're
2547	 * about to write.  We do this *before* the write so that we can return
2548	 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
2549	 */
2550	if (rw == WRITE && mapping->nrpages) {
2551		retval = invalidate_inode_pages2_range(mapping,
2552					offset >> PAGE_CACHE_SHIFT, end);
2553		if (retval)
2554			goto out;
2555	}
2556
2557	retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
2558
2559	/*
2560	 * Finally, try again to invalidate clean pages which might have been
2561	 * cached by non-direct readahead, or faulted in by get_user_pages()
2562	 * if the source of the write was an mmap'ed region of the file
2563	 * we're writing.  Either one is a pretty crazy thing to do,
2564	 * so we don't support it 100%.  If this invalidation
2565	 * fails, tough, the write still worked...
2566	 */
2567	if (rw == WRITE && mapping->nrpages) {
2568		invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
2569	}
2570out:
2571	return retval;
2572}
2573
2574/**
2575 * try_to_release_page() - release old fs-specific metadata on a page
2576 *
2577 * @page: the page which the kernel is trying to free
2578 * @gfp_mask: memory allocation flags (and I/O mode)
2579 *
2580 * The address_space is to try to release any data against the page
2581 * (presumably at page->private).  If the release was successful, return `1'.
2582 * Otherwise return zero.
2583 *
2584 * The @gfp_mask argument specifies whether I/O may be performed to release
2585 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
2586 *
2587 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
2588 */
2589int try_to_release_page(struct page *page, gfp_t gfp_mask)
2590{
2591	struct address_space * const mapping = page->mapping;
2592
2593	BUG_ON(!PageLocked(page));
2594	if (PageWriteback(page))
2595		return 0;
2596
2597	if (mapping && mapping->a_ops->releasepage)
2598		return mapping->a_ops->releasepage(page, gfp_mask);
2599	return try_to_free_buffers(page);
2600}
2601
2602EXPORT_SYMBOL(try_to_release_page);
2603