filemap.c revision d44ed4f86892e350f4b16a3489b7e7c1a9bb7ead
1/*
2 *	linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999  Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
11 */
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/aio.h>
18#include <linux/kernel_stat.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/file.h>
24#include <linux/uio.h>
25#include <linux/hash.h>
26#include <linux/writeback.h>
27#include <linux/pagevec.h>
28#include <linux/blkdev.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include "filemap.h"
32/*
33 * FIXME: remove all knowledge of the buffer layer from the core VM
34 */
35#include <linux/buffer_head.h> /* for generic_osync_inode */
36
37#include <asm/uaccess.h>
38#include <asm/mman.h>
39
40/*
41 * Shared mappings implemented 30.11.1994. It's not fully working yet,
42 * though.
43 *
44 * Shared mappings now work. 15.8.1995  Bruno.
45 *
46 * finished 'unifying' the page and buffer cache and SMP-threaded the
47 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
48 *
49 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
50 */
51
52/*
53 * Lock ordering:
54 *
55 *  ->i_mmap_lock		(vmtruncate)
56 *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
57 *      ->swap_lock		(exclusive_swap_page, others)
58 *        ->mapping->tree_lock
59 *
60 *  ->i_sem
61 *    ->i_mmap_lock		(truncate->unmap_mapping_range)
62 *
63 *  ->mmap_sem
64 *    ->i_mmap_lock
65 *      ->page_table_lock	(various places, mainly in mmap.c)
66 *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
67 *
68 *  ->mmap_sem
69 *    ->lock_page		(access_process_vm)
70 *
71 *  ->mmap_sem
72 *    ->i_sem			(msync)
73 *
74 *  ->i_sem
75 *    ->i_alloc_sem             (various)
76 *
77 *  ->inode_lock
78 *    ->sb_lock			(fs/fs-writeback.c)
79 *    ->mapping->tree_lock	(__sync_single_inode)
80 *
81 *  ->i_mmap_lock
82 *    ->anon_vma.lock		(vma_adjust)
83 *
84 *  ->anon_vma.lock
85 *    ->page_table_lock		(anon_vma_prepare and various)
86 *
87 *  ->page_table_lock
88 *    ->swap_lock		(try_to_unmap_one)
89 *    ->private_lock		(try_to_unmap_one)
90 *    ->tree_lock		(try_to_unmap_one)
91 *    ->zone.lru_lock		(follow_page->mark_page_accessed)
92 *    ->private_lock		(page_remove_rmap->set_page_dirty)
93 *    ->tree_lock		(page_remove_rmap->set_page_dirty)
94 *    ->inode_lock		(page_remove_rmap->set_page_dirty)
95 *    ->inode_lock		(zap_pte_range->set_page_dirty)
96 *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
97 *
98 *  ->task->proc_lock
99 *    ->dcache_lock		(proc_pid_lookup)
100 */
101
102/*
103 * Remove a page from the page cache and free it. Caller has to make
104 * sure the page is locked and that nobody else uses it - or that usage
105 * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
106 */
107void __remove_from_page_cache(struct page *page)
108{
109	struct address_space *mapping = page->mapping;
110
111	radix_tree_delete(&mapping->page_tree, page->index);
112	page->mapping = NULL;
113	mapping->nrpages--;
114	pagecache_acct(-1);
115}
116
117void remove_from_page_cache(struct page *page)
118{
119	struct address_space *mapping = page->mapping;
120
121	BUG_ON(!PageLocked(page));
122
123	write_lock_irq(&mapping->tree_lock);
124	__remove_from_page_cache(page);
125	write_unlock_irq(&mapping->tree_lock);
126}
127
128static int sync_page(void *word)
129{
130	struct address_space *mapping;
131	struct page *page;
132
133	page = container_of((page_flags_t *)word, struct page, flags);
134
135	/*
136	 * page_mapping() is being called without PG_locked held.
137	 * Some knowledge of the state and use of the page is used to
138	 * reduce the requirements down to a memory barrier.
139	 * The danger here is of a stale page_mapping() return value
140	 * indicating a struct address_space different from the one it's
141	 * associated with when it is associated with one.
142	 * After smp_mb(), it's either the correct page_mapping() for
143	 * the page, or an old page_mapping() and the page's own
144	 * page_mapping() has gone NULL.
145	 * The ->sync_page() address_space operation must tolerate
146	 * page_mapping() going NULL. By an amazing coincidence,
147	 * this comes about because none of the users of the page
148	 * in the ->sync_page() methods make essential use of the
149	 * page_mapping(), merely passing the page down to the backing
150	 * device's unplug functions when it's non-NULL, which in turn
151	 * ignore it for all cases but swap, where only page->private is
152	 * of interest. When page_mapping() does go NULL, the entire
153	 * call stack gracefully ignores the page and returns.
154	 * -- wli
155	 */
156	smp_mb();
157	mapping = page_mapping(page);
158	if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
159		mapping->a_ops->sync_page(page);
160	io_schedule();
161	return 0;
162}
163
164/**
165 * filemap_fdatawrite_range - start writeback against all of a mapping's
166 * dirty pages that lie within the byte offsets <start, end>
167 * @mapping:	address space structure to write
168 * @start:	offset in bytes where the range starts
169 * @end:	offset in bytes where the range ends
170 * @sync_mode:	enable synchronous operation
171 *
172 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
173 * opposed to a regular memory * cleansing writeback.  The difference between
174 * these two operations is that if a dirty page/buffer is encountered, it must
175 * be waited upon, and not just skipped over.
176 */
177static int __filemap_fdatawrite_range(struct address_space *mapping,
178	loff_t start, loff_t end, int sync_mode)
179{
180	int ret;
181	struct writeback_control wbc = {
182		.sync_mode = sync_mode,
183		.nr_to_write = mapping->nrpages * 2,
184		.start = start,
185		.end = end,
186	};
187
188	if (!mapping_cap_writeback_dirty(mapping))
189		return 0;
190
191	ret = do_writepages(mapping, &wbc);
192	return ret;
193}
194
195static inline int __filemap_fdatawrite(struct address_space *mapping,
196	int sync_mode)
197{
198	return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
199}
200
201int filemap_fdatawrite(struct address_space *mapping)
202{
203	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
204}
205EXPORT_SYMBOL(filemap_fdatawrite);
206
207static int filemap_fdatawrite_range(struct address_space *mapping,
208	loff_t start, loff_t end)
209{
210	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
211}
212
213/*
214 * This is a mostly non-blocking flush.  Not suitable for data-integrity
215 * purposes - I/O may not be started against all dirty pages.
216 */
217int filemap_flush(struct address_space *mapping)
218{
219	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
220}
221EXPORT_SYMBOL(filemap_flush);
222
223/*
224 * Wait for writeback to complete against pages indexed by start->end
225 * inclusive
226 */
227static int wait_on_page_writeback_range(struct address_space *mapping,
228				pgoff_t start, pgoff_t end)
229{
230	struct pagevec pvec;
231	int nr_pages;
232	int ret = 0;
233	pgoff_t index;
234
235	if (end < start)
236		return 0;
237
238	pagevec_init(&pvec, 0);
239	index = start;
240	while ((index <= end) &&
241			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
242			PAGECACHE_TAG_WRITEBACK,
243			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
244		unsigned i;
245
246		for (i = 0; i < nr_pages; i++) {
247			struct page *page = pvec.pages[i];
248
249			/* until radix tree lookup accepts end_index */
250			if (page->index > end)
251				continue;
252
253			wait_on_page_writeback(page);
254			if (PageError(page))
255				ret = -EIO;
256		}
257		pagevec_release(&pvec);
258		cond_resched();
259	}
260
261	/* Check for outstanding write errors */
262	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
263		ret = -ENOSPC;
264	if (test_and_clear_bit(AS_EIO, &mapping->flags))
265		ret = -EIO;
266
267	return ret;
268}
269
270/*
271 * Write and wait upon all the pages in the passed range.  This is a "data
272 * integrity" operation.  It waits upon in-flight writeout before starting and
273 * waiting upon new writeout.  If there was an IO error, return it.
274 *
275 * We need to re-take i_sem during the generic_osync_inode list walk because
276 * it is otherwise livelockable.
277 */
278int sync_page_range(struct inode *inode, struct address_space *mapping,
279			loff_t pos, size_t count)
280{
281	pgoff_t start = pos >> PAGE_CACHE_SHIFT;
282	pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
283	int ret;
284
285	if (!mapping_cap_writeback_dirty(mapping) || !count)
286		return 0;
287	ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
288	if (ret == 0) {
289		down(&inode->i_sem);
290		ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
291		up(&inode->i_sem);
292	}
293	if (ret == 0)
294		ret = wait_on_page_writeback_range(mapping, start, end);
295	return ret;
296}
297EXPORT_SYMBOL(sync_page_range);
298
299/*
300 * Note: Holding i_sem across sync_page_range_nolock is not a good idea
301 * as it forces O_SYNC writers to different parts of the same file
302 * to be serialised right until io completion.
303 */
304int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
305			loff_t pos, size_t count)
306{
307	pgoff_t start = pos >> PAGE_CACHE_SHIFT;
308	pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
309	int ret;
310
311	if (!mapping_cap_writeback_dirty(mapping) || !count)
312		return 0;
313	ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
314	if (ret == 0)
315		ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
316	if (ret == 0)
317		ret = wait_on_page_writeback_range(mapping, start, end);
318	return ret;
319}
320EXPORT_SYMBOL(sync_page_range_nolock);
321
322/**
323 * filemap_fdatawait - walk the list of under-writeback pages of the given
324 *     address space and wait for all of them.
325 *
326 * @mapping: address space structure to wait for
327 */
328int filemap_fdatawait(struct address_space *mapping)
329{
330	loff_t i_size = i_size_read(mapping->host);
331
332	if (i_size == 0)
333		return 0;
334
335	return wait_on_page_writeback_range(mapping, 0,
336				(i_size - 1) >> PAGE_CACHE_SHIFT);
337}
338EXPORT_SYMBOL(filemap_fdatawait);
339
340int filemap_write_and_wait(struct address_space *mapping)
341{
342	int retval = 0;
343
344	if (mapping->nrpages) {
345		retval = filemap_fdatawrite(mapping);
346		if (retval == 0)
347			retval = filemap_fdatawait(mapping);
348	}
349	return retval;
350}
351
352int filemap_write_and_wait_range(struct address_space *mapping,
353				 loff_t lstart, loff_t lend)
354{
355	int retval = 0;
356
357	if (mapping->nrpages) {
358		retval = __filemap_fdatawrite_range(mapping, lstart, lend,
359						    WB_SYNC_ALL);
360		if (retval == 0)
361			retval = wait_on_page_writeback_range(mapping,
362						    lstart >> PAGE_CACHE_SHIFT,
363						    lend >> PAGE_CACHE_SHIFT);
364	}
365	return retval;
366}
367
368/*
369 * This function is used to add newly allocated pagecache pages:
370 * the page is new, so we can just run SetPageLocked() against it.
371 * The other page state flags were set by rmqueue().
372 *
373 * This function does not add the page to the LRU.  The caller must do that.
374 */
375int add_to_page_cache(struct page *page, struct address_space *mapping,
376		pgoff_t offset, int gfp_mask)
377{
378	int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
379
380	if (error == 0) {
381		write_lock_irq(&mapping->tree_lock);
382		error = radix_tree_insert(&mapping->page_tree, offset, page);
383		if (!error) {
384			page_cache_get(page);
385			SetPageLocked(page);
386			page->mapping = mapping;
387			page->index = offset;
388			mapping->nrpages++;
389			pagecache_acct(1);
390		}
391		write_unlock_irq(&mapping->tree_lock);
392		radix_tree_preload_end();
393	}
394	return error;
395}
396
397EXPORT_SYMBOL(add_to_page_cache);
398
399int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
400				pgoff_t offset, int gfp_mask)
401{
402	int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
403	if (ret == 0)
404		lru_cache_add(page);
405	return ret;
406}
407
408/*
409 * In order to wait for pages to become available there must be
410 * waitqueues associated with pages. By using a hash table of
411 * waitqueues where the bucket discipline is to maintain all
412 * waiters on the same queue and wake all when any of the pages
413 * become available, and for the woken contexts to check to be
414 * sure the appropriate page became available, this saves space
415 * at a cost of "thundering herd" phenomena during rare hash
416 * collisions.
417 */
418static wait_queue_head_t *page_waitqueue(struct page *page)
419{
420	const struct zone *zone = page_zone(page);
421
422	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
423}
424
425static inline void wake_up_page(struct page *page, int bit)
426{
427	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
428}
429
430void fastcall wait_on_page_bit(struct page *page, int bit_nr)
431{
432	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
433
434	if (test_bit(bit_nr, &page->flags))
435		__wait_on_bit(page_waitqueue(page), &wait, sync_page,
436							TASK_UNINTERRUPTIBLE);
437}
438EXPORT_SYMBOL(wait_on_page_bit);
439
440/**
441 * unlock_page() - unlock a locked page
442 *
443 * @page: the page
444 *
445 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
446 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
447 * mechananism between PageLocked pages and PageWriteback pages is shared.
448 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
449 *
450 * The first mb is necessary to safely close the critical section opened by the
451 * TestSetPageLocked(), the second mb is necessary to enforce ordering between
452 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
453 * parallel wait_on_page_locked()).
454 */
455void fastcall unlock_page(struct page *page)
456{
457	smp_mb__before_clear_bit();
458	if (!TestClearPageLocked(page))
459		BUG();
460	smp_mb__after_clear_bit();
461	wake_up_page(page, PG_locked);
462}
463EXPORT_SYMBOL(unlock_page);
464
465/*
466 * End writeback against a page.
467 */
468void end_page_writeback(struct page *page)
469{
470	if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
471		if (!test_clear_page_writeback(page))
472			BUG();
473	}
474	smp_mb__after_clear_bit();
475	wake_up_page(page, PG_writeback);
476}
477EXPORT_SYMBOL(end_page_writeback);
478
479/*
480 * Get a lock on the page, assuming we need to sleep to get it.
481 *
482 * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
483 * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
484 * chances are that on the second loop, the block layer's plug list is empty,
485 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
486 */
487void fastcall __lock_page(struct page *page)
488{
489	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
490
491	__wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
492							TASK_UNINTERRUPTIBLE);
493}
494EXPORT_SYMBOL(__lock_page);
495
496/*
497 * a rather lightweight function, finding and getting a reference to a
498 * hashed page atomically.
499 */
500struct page * find_get_page(struct address_space *mapping, unsigned long offset)
501{
502	struct page *page;
503
504	read_lock_irq(&mapping->tree_lock);
505	page = radix_tree_lookup(&mapping->page_tree, offset);
506	if (page)
507		page_cache_get(page);
508	read_unlock_irq(&mapping->tree_lock);
509	return page;
510}
511
512EXPORT_SYMBOL(find_get_page);
513
514/*
515 * Same as above, but trylock it instead of incrementing the count.
516 */
517struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
518{
519	struct page *page;
520
521	read_lock_irq(&mapping->tree_lock);
522	page = radix_tree_lookup(&mapping->page_tree, offset);
523	if (page && TestSetPageLocked(page))
524		page = NULL;
525	read_unlock_irq(&mapping->tree_lock);
526	return page;
527}
528
529EXPORT_SYMBOL(find_trylock_page);
530
531/**
532 * find_lock_page - locate, pin and lock a pagecache page
533 *
534 * @mapping: the address_space to search
535 * @offset: the page index
536 *
537 * Locates the desired pagecache page, locks it, increments its reference
538 * count and returns its address.
539 *
540 * Returns zero if the page was not present. find_lock_page() may sleep.
541 */
542struct page *find_lock_page(struct address_space *mapping,
543				unsigned long offset)
544{
545	struct page *page;
546
547	read_lock_irq(&mapping->tree_lock);
548repeat:
549	page = radix_tree_lookup(&mapping->page_tree, offset);
550	if (page) {
551		page_cache_get(page);
552		if (TestSetPageLocked(page)) {
553			read_unlock_irq(&mapping->tree_lock);
554			lock_page(page);
555			read_lock_irq(&mapping->tree_lock);
556
557			/* Has the page been truncated while we slept? */
558			if (page->mapping != mapping || page->index != offset) {
559				unlock_page(page);
560				page_cache_release(page);
561				goto repeat;
562			}
563		}
564	}
565	read_unlock_irq(&mapping->tree_lock);
566	return page;
567}
568
569EXPORT_SYMBOL(find_lock_page);
570
571/**
572 * find_or_create_page - locate or add a pagecache page
573 *
574 * @mapping: the page's address_space
575 * @index: the page's index into the mapping
576 * @gfp_mask: page allocation mode
577 *
578 * Locates a page in the pagecache.  If the page is not present, a new page
579 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
580 * LRU list.  The returned page is locked and has its reference count
581 * incremented.
582 *
583 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
584 * allocation!
585 *
586 * find_or_create_page() returns the desired page's address, or zero on
587 * memory exhaustion.
588 */
589struct page *find_or_create_page(struct address_space *mapping,
590		unsigned long index, unsigned int gfp_mask)
591{
592	struct page *page, *cached_page = NULL;
593	int err;
594repeat:
595	page = find_lock_page(mapping, index);
596	if (!page) {
597		if (!cached_page) {
598			cached_page = alloc_page(gfp_mask);
599			if (!cached_page)
600				return NULL;
601		}
602		err = add_to_page_cache_lru(cached_page, mapping,
603					index, gfp_mask);
604		if (!err) {
605			page = cached_page;
606			cached_page = NULL;
607		} else if (err == -EEXIST)
608			goto repeat;
609	}
610	if (cached_page)
611		page_cache_release(cached_page);
612	return page;
613}
614
615EXPORT_SYMBOL(find_or_create_page);
616
617/**
618 * find_get_pages - gang pagecache lookup
619 * @mapping:	The address_space to search
620 * @start:	The starting page index
621 * @nr_pages:	The maximum number of pages
622 * @pages:	Where the resulting pages are placed
623 *
624 * find_get_pages() will search for and return a group of up to
625 * @nr_pages pages in the mapping.  The pages are placed at @pages.
626 * find_get_pages() takes a reference against the returned pages.
627 *
628 * The search returns a group of mapping-contiguous pages with ascending
629 * indexes.  There may be holes in the indices due to not-present pages.
630 *
631 * find_get_pages() returns the number of pages which were found.
632 */
633unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
634			    unsigned int nr_pages, struct page **pages)
635{
636	unsigned int i;
637	unsigned int ret;
638
639	read_lock_irq(&mapping->tree_lock);
640	ret = radix_tree_gang_lookup(&mapping->page_tree,
641				(void **)pages, start, nr_pages);
642	for (i = 0; i < ret; i++)
643		page_cache_get(pages[i]);
644	read_unlock_irq(&mapping->tree_lock);
645	return ret;
646}
647
648/*
649 * Like find_get_pages, except we only return pages which are tagged with
650 * `tag'.   We update *index to index the next page for the traversal.
651 */
652unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
653			int tag, unsigned int nr_pages, struct page **pages)
654{
655	unsigned int i;
656	unsigned int ret;
657
658	read_lock_irq(&mapping->tree_lock);
659	ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
660				(void **)pages, *index, nr_pages, tag);
661	for (i = 0; i < ret; i++)
662		page_cache_get(pages[i]);
663	if (ret)
664		*index = pages[ret - 1]->index + 1;
665	read_unlock_irq(&mapping->tree_lock);
666	return ret;
667}
668
669/*
670 * Same as grab_cache_page, but do not wait if the page is unavailable.
671 * This is intended for speculative data generators, where the data can
672 * be regenerated if the page couldn't be grabbed.  This routine should
673 * be safe to call while holding the lock for another page.
674 *
675 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
676 * and deadlock against the caller's locked page.
677 */
678struct page *
679grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
680{
681	struct page *page = find_get_page(mapping, index);
682	unsigned int gfp_mask;
683
684	if (page) {
685		if (!TestSetPageLocked(page))
686			return page;
687		page_cache_release(page);
688		return NULL;
689	}
690	gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
691	page = alloc_pages(gfp_mask, 0);
692	if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
693		page_cache_release(page);
694		page = NULL;
695	}
696	return page;
697}
698
699EXPORT_SYMBOL(grab_cache_page_nowait);
700
701/*
702 * This is a generic file read routine, and uses the
703 * mapping->a_ops->readpage() function for the actual low-level
704 * stuff.
705 *
706 * This is really ugly. But the goto's actually try to clarify some
707 * of the logic when it comes to error handling etc.
708 *
709 * Note the struct file* is only passed for the use of readpage.  It may be
710 * NULL.
711 */
712void do_generic_mapping_read(struct address_space *mapping,
713			     struct file_ra_state *_ra,
714			     struct file *filp,
715			     loff_t *ppos,
716			     read_descriptor_t *desc,
717			     read_actor_t actor)
718{
719	struct inode *inode = mapping->host;
720	unsigned long index;
721	unsigned long end_index;
722	unsigned long offset;
723	unsigned long last_index;
724	unsigned long next_index;
725	unsigned long prev_index;
726	loff_t isize;
727	struct page *cached_page;
728	int error;
729	struct file_ra_state ra = *_ra;
730
731	cached_page = NULL;
732	index = *ppos >> PAGE_CACHE_SHIFT;
733	next_index = index;
734	prev_index = ra.prev_page;
735	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
736	offset = *ppos & ~PAGE_CACHE_MASK;
737
738	isize = i_size_read(inode);
739	if (!isize)
740		goto out;
741
742	end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
743	for (;;) {
744		struct page *page;
745		unsigned long nr, ret;
746
747		/* nr is the maximum number of bytes to copy from this page */
748		nr = PAGE_CACHE_SIZE;
749		if (index >= end_index) {
750			if (index > end_index)
751				goto out;
752			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
753			if (nr <= offset) {
754				goto out;
755			}
756		}
757		nr = nr - offset;
758
759		cond_resched();
760		if (index == next_index)
761			next_index = page_cache_readahead(mapping, &ra, filp,
762					index, last_index - index);
763
764find_page:
765		page = find_get_page(mapping, index);
766		if (unlikely(page == NULL)) {
767			handle_ra_miss(mapping, &ra, index);
768			goto no_cached_page;
769		}
770		if (!PageUptodate(page))
771			goto page_not_up_to_date;
772page_ok:
773
774		/* If users can be writing to this page using arbitrary
775		 * virtual addresses, take care about potential aliasing
776		 * before reading the page on the kernel side.
777		 */
778		if (mapping_writably_mapped(mapping))
779			flush_dcache_page(page);
780
781		/*
782		 * When (part of) the same page is read multiple times
783		 * in succession, only mark it as accessed the first time.
784		 */
785		if (prev_index != index)
786			mark_page_accessed(page);
787		prev_index = index;
788
789		/*
790		 * Ok, we have the page, and it's up-to-date, so
791		 * now we can copy it to user space...
792		 *
793		 * The actor routine returns how many bytes were actually used..
794		 * NOTE! This may not be the same as how much of a user buffer
795		 * we filled up (we may be padding etc), so we can only update
796		 * "pos" here (the actor routine has to update the user buffer
797		 * pointers and the remaining count).
798		 */
799		ret = actor(desc, page, offset, nr);
800		offset += ret;
801		index += offset >> PAGE_CACHE_SHIFT;
802		offset &= ~PAGE_CACHE_MASK;
803
804		page_cache_release(page);
805		if (ret == nr && desc->count)
806			continue;
807		goto out;
808
809page_not_up_to_date:
810		/* Get exclusive access to the page ... */
811		lock_page(page);
812
813		/* Did it get unhashed before we got the lock? */
814		if (!page->mapping) {
815			unlock_page(page);
816			page_cache_release(page);
817			continue;
818		}
819
820		/* Did somebody else fill it already? */
821		if (PageUptodate(page)) {
822			unlock_page(page);
823			goto page_ok;
824		}
825
826readpage:
827		/* Start the actual read. The read will unlock the page. */
828		error = mapping->a_ops->readpage(filp, page);
829
830		if (unlikely(error))
831			goto readpage_error;
832
833		if (!PageUptodate(page)) {
834			lock_page(page);
835			if (!PageUptodate(page)) {
836				if (page->mapping == NULL) {
837					/*
838					 * invalidate_inode_pages got it
839					 */
840					unlock_page(page);
841					page_cache_release(page);
842					goto find_page;
843				}
844				unlock_page(page);
845				error = -EIO;
846				goto readpage_error;
847			}
848			unlock_page(page);
849		}
850
851		/*
852		 * i_size must be checked after we have done ->readpage.
853		 *
854		 * Checking i_size after the readpage allows us to calculate
855		 * the correct value for "nr", which means the zero-filled
856		 * part of the page is not copied back to userspace (unless
857		 * another truncate extends the file - this is desired though).
858		 */
859		isize = i_size_read(inode);
860		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
861		if (unlikely(!isize || index > end_index)) {
862			page_cache_release(page);
863			goto out;
864		}
865
866		/* nr is the maximum number of bytes to copy from this page */
867		nr = PAGE_CACHE_SIZE;
868		if (index == end_index) {
869			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
870			if (nr <= offset) {
871				page_cache_release(page);
872				goto out;
873			}
874		}
875		nr = nr - offset;
876		goto page_ok;
877
878readpage_error:
879		/* UHHUH! A synchronous read error occurred. Report it */
880		desc->error = error;
881		page_cache_release(page);
882		goto out;
883
884no_cached_page:
885		/*
886		 * Ok, it wasn't cached, so we need to create a new
887		 * page..
888		 */
889		if (!cached_page) {
890			cached_page = page_cache_alloc_cold(mapping);
891			if (!cached_page) {
892				desc->error = -ENOMEM;
893				goto out;
894			}
895		}
896		error = add_to_page_cache_lru(cached_page, mapping,
897						index, GFP_KERNEL);
898		if (error) {
899			if (error == -EEXIST)
900				goto find_page;
901			desc->error = error;
902			goto out;
903		}
904		page = cached_page;
905		cached_page = NULL;
906		goto readpage;
907	}
908
909out:
910	*_ra = ra;
911
912	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
913	if (cached_page)
914		page_cache_release(cached_page);
915	if (filp)
916		file_accessed(filp);
917}
918
919EXPORT_SYMBOL(do_generic_mapping_read);
920
921int file_read_actor(read_descriptor_t *desc, struct page *page,
922			unsigned long offset, unsigned long size)
923{
924	char *kaddr;
925	unsigned long left, count = desc->count;
926
927	if (size > count)
928		size = count;
929
930	/*
931	 * Faults on the destination of a read are common, so do it before
932	 * taking the kmap.
933	 */
934	if (!fault_in_pages_writeable(desc->arg.buf, size)) {
935		kaddr = kmap_atomic(page, KM_USER0);
936		left = __copy_to_user_inatomic(desc->arg.buf,
937						kaddr + offset, size);
938		kunmap_atomic(kaddr, KM_USER0);
939		if (left == 0)
940			goto success;
941	}
942
943	/* Do it the slow way */
944	kaddr = kmap(page);
945	left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
946	kunmap(page);
947
948	if (left) {
949		size -= left;
950		desc->error = -EFAULT;
951	}
952success:
953	desc->count = count - size;
954	desc->written += size;
955	desc->arg.buf += size;
956	return size;
957}
958
959/*
960 * This is the "read()" routine for all filesystems
961 * that can use the page cache directly.
962 */
963ssize_t
964__generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
965		unsigned long nr_segs, loff_t *ppos)
966{
967	struct file *filp = iocb->ki_filp;
968	ssize_t retval;
969	unsigned long seg;
970	size_t count;
971
972	count = 0;
973	for (seg = 0; seg < nr_segs; seg++) {
974		const struct iovec *iv = &iov[seg];
975
976		/*
977		 * If any segment has a negative length, or the cumulative
978		 * length ever wraps negative then return -EINVAL.
979		 */
980		count += iv->iov_len;
981		if (unlikely((ssize_t)(count|iv->iov_len) < 0))
982			return -EINVAL;
983		if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
984			continue;
985		if (seg == 0)
986			return -EFAULT;
987		nr_segs = seg;
988		count -= iv->iov_len;	/* This segment is no good */
989		break;
990	}
991
992	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
993	if (filp->f_flags & O_DIRECT) {
994		loff_t pos = *ppos, size;
995		struct address_space *mapping;
996		struct inode *inode;
997
998		mapping = filp->f_mapping;
999		inode = mapping->host;
1000		retval = 0;
1001		if (!count)
1002			goto out; /* skip atime */
1003		size = i_size_read(inode);
1004		if (pos < size) {
1005			retval = generic_file_direct_IO(READ, iocb,
1006						iov, pos, nr_segs);
1007			if (retval > 0 && !is_sync_kiocb(iocb))
1008				retval = -EIOCBQUEUED;
1009			if (retval > 0)
1010				*ppos = pos + retval;
1011		}
1012		file_accessed(filp);
1013		goto out;
1014	}
1015
1016	retval = 0;
1017	if (count) {
1018		for (seg = 0; seg < nr_segs; seg++) {
1019			read_descriptor_t desc;
1020
1021			desc.written = 0;
1022			desc.arg.buf = iov[seg].iov_base;
1023			desc.count = iov[seg].iov_len;
1024			if (desc.count == 0)
1025				continue;
1026			desc.error = 0;
1027			do_generic_file_read(filp,ppos,&desc,file_read_actor);
1028			retval += desc.written;
1029			if (!retval) {
1030				retval = desc.error;
1031				break;
1032			}
1033		}
1034	}
1035out:
1036	return retval;
1037}
1038
1039EXPORT_SYMBOL(__generic_file_aio_read);
1040
1041ssize_t
1042generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
1043{
1044	struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1045
1046	BUG_ON(iocb->ki_pos != pos);
1047	return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
1048}
1049
1050EXPORT_SYMBOL(generic_file_aio_read);
1051
1052ssize_t
1053generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1054{
1055	struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1056	struct kiocb kiocb;
1057	ssize_t ret;
1058
1059	init_sync_kiocb(&kiocb, filp);
1060	ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos);
1061	if (-EIOCBQUEUED == ret)
1062		ret = wait_on_sync_kiocb(&kiocb);
1063	return ret;
1064}
1065
1066EXPORT_SYMBOL(generic_file_read);
1067
1068int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
1069{
1070	ssize_t written;
1071	unsigned long count = desc->count;
1072	struct file *file = desc->arg.data;
1073
1074	if (size > count)
1075		size = count;
1076
1077	written = file->f_op->sendpage(file, page, offset,
1078				       size, &file->f_pos, size<count);
1079	if (written < 0) {
1080		desc->error = written;
1081		written = 0;
1082	}
1083	desc->count = count - written;
1084	desc->written += written;
1085	return written;
1086}
1087
1088ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
1089			 size_t count, read_actor_t actor, void *target)
1090{
1091	read_descriptor_t desc;
1092
1093	if (!count)
1094		return 0;
1095
1096	desc.written = 0;
1097	desc.count = count;
1098	desc.arg.data = target;
1099	desc.error = 0;
1100
1101	do_generic_file_read(in_file, ppos, &desc, actor);
1102	if (desc.written)
1103		return desc.written;
1104	return desc.error;
1105}
1106
1107EXPORT_SYMBOL(generic_file_sendfile);
1108
1109static ssize_t
1110do_readahead(struct address_space *mapping, struct file *filp,
1111	     unsigned long index, unsigned long nr)
1112{
1113	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1114		return -EINVAL;
1115
1116	force_page_cache_readahead(mapping, filp, index,
1117					max_sane_readahead(nr));
1118	return 0;
1119}
1120
1121asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1122{
1123	ssize_t ret;
1124	struct file *file;
1125
1126	ret = -EBADF;
1127	file = fget(fd);
1128	if (file) {
1129		if (file->f_mode & FMODE_READ) {
1130			struct address_space *mapping = file->f_mapping;
1131			unsigned long start = offset >> PAGE_CACHE_SHIFT;
1132			unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1133			unsigned long len = end - start + 1;
1134			ret = do_readahead(mapping, file, start, len);
1135		}
1136		fput(file);
1137	}
1138	return ret;
1139}
1140
1141#ifdef CONFIG_MMU
1142/*
1143 * This adds the requested page to the page cache if it isn't already there,
1144 * and schedules an I/O to read in its contents from disk.
1145 */
1146static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
1147static int fastcall page_cache_read(struct file * file, unsigned long offset)
1148{
1149	struct address_space *mapping = file->f_mapping;
1150	struct page *page;
1151	int error;
1152
1153	page = page_cache_alloc_cold(mapping);
1154	if (!page)
1155		return -ENOMEM;
1156
1157	error = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1158	if (!error) {
1159		error = mapping->a_ops->readpage(file, page);
1160		page_cache_release(page);
1161		return error;
1162	}
1163
1164	/*
1165	 * We arrive here in the unlikely event that someone
1166	 * raced with us and added our page to the cache first
1167	 * or we are out of memory for radix-tree nodes.
1168	 */
1169	page_cache_release(page);
1170	return error == -EEXIST ? 0 : error;
1171}
1172
1173#define MMAP_LOTSAMISS  (100)
1174
1175/*
1176 * filemap_nopage() is invoked via the vma operations vector for a
1177 * mapped memory region to read in file data during a page fault.
1178 *
1179 * The goto's are kind of ugly, but this streamlines the normal case of having
1180 * it in the page cache, and handles the special cases reasonably without
1181 * having a lot of duplicated code.
1182 */
1183struct page *filemap_nopage(struct vm_area_struct *area,
1184				unsigned long address, int *type)
1185{
1186	int error;
1187	struct file *file = area->vm_file;
1188	struct address_space *mapping = file->f_mapping;
1189	struct file_ra_state *ra = &file->f_ra;
1190	struct inode *inode = mapping->host;
1191	struct page *page;
1192	unsigned long size, pgoff;
1193	int did_readaround = 0, majmin = VM_FAULT_MINOR;
1194
1195	pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
1196
1197retry_all:
1198	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1199	if (pgoff >= size)
1200		goto outside_data_content;
1201
1202	/* If we don't want any read-ahead, don't bother */
1203	if (VM_RandomReadHint(area))
1204		goto no_cached_page;
1205
1206	/*
1207	 * The readahead code wants to be told about each and every page
1208	 * so it can build and shrink its windows appropriately
1209	 *
1210	 * For sequential accesses, we use the generic readahead logic.
1211	 */
1212	if (VM_SequentialReadHint(area))
1213		page_cache_readahead(mapping, ra, file, pgoff, 1);
1214
1215	/*
1216	 * Do we have something in the page cache already?
1217	 */
1218retry_find:
1219	page = find_get_page(mapping, pgoff);
1220	if (!page) {
1221		unsigned long ra_pages;
1222
1223		if (VM_SequentialReadHint(area)) {
1224			handle_ra_miss(mapping, ra, pgoff);
1225			goto no_cached_page;
1226		}
1227		ra->mmap_miss++;
1228
1229		/*
1230		 * Do we miss much more than hit in this file? If so,
1231		 * stop bothering with read-ahead. It will only hurt.
1232		 */
1233		if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
1234			goto no_cached_page;
1235
1236		/*
1237		 * To keep the pgmajfault counter straight, we need to
1238		 * check did_readaround, as this is an inner loop.
1239		 */
1240		if (!did_readaround) {
1241			majmin = VM_FAULT_MAJOR;
1242			inc_page_state(pgmajfault);
1243		}
1244		did_readaround = 1;
1245		ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1246		if (ra_pages) {
1247			pgoff_t start = 0;
1248
1249			if (pgoff > ra_pages / 2)
1250				start = pgoff - ra_pages / 2;
1251			do_page_cache_readahead(mapping, file, start, ra_pages);
1252		}
1253		page = find_get_page(mapping, pgoff);
1254		if (!page)
1255			goto no_cached_page;
1256	}
1257
1258	if (!did_readaround)
1259		ra->mmap_hit++;
1260
1261	/*
1262	 * Ok, found a page in the page cache, now we need to check
1263	 * that it's up-to-date.
1264	 */
1265	if (!PageUptodate(page))
1266		goto page_not_uptodate;
1267
1268success:
1269	/*
1270	 * Found the page and have a reference on it.
1271	 */
1272	mark_page_accessed(page);
1273	if (type)
1274		*type = majmin;
1275	return page;
1276
1277outside_data_content:
1278	/*
1279	 * An external ptracer can access pages that normally aren't
1280	 * accessible..
1281	 */
1282	if (area->vm_mm == current->mm)
1283		return NULL;
1284	/* Fall through to the non-read-ahead case */
1285no_cached_page:
1286	/*
1287	 * We're only likely to ever get here if MADV_RANDOM is in
1288	 * effect.
1289	 */
1290	error = page_cache_read(file, pgoff);
1291	grab_swap_token();
1292
1293	/*
1294	 * The page we want has now been added to the page cache.
1295	 * In the unlikely event that someone removed it in the
1296	 * meantime, we'll just come back here and read it again.
1297	 */
1298	if (error >= 0)
1299		goto retry_find;
1300
1301	/*
1302	 * An error return from page_cache_read can result if the
1303	 * system is low on memory, or a problem occurs while trying
1304	 * to schedule I/O.
1305	 */
1306	if (error == -ENOMEM)
1307		return NOPAGE_OOM;
1308	return NULL;
1309
1310page_not_uptodate:
1311	if (!did_readaround) {
1312		majmin = VM_FAULT_MAJOR;
1313		inc_page_state(pgmajfault);
1314	}
1315	lock_page(page);
1316
1317	/* Did it get unhashed while we waited for it? */
1318	if (!page->mapping) {
1319		unlock_page(page);
1320		page_cache_release(page);
1321		goto retry_all;
1322	}
1323
1324	/* Did somebody else get it up-to-date? */
1325	if (PageUptodate(page)) {
1326		unlock_page(page);
1327		goto success;
1328	}
1329
1330	if (!mapping->a_ops->readpage(file, page)) {
1331		wait_on_page_locked(page);
1332		if (PageUptodate(page))
1333			goto success;
1334	}
1335
1336	/*
1337	 * Umm, take care of errors if the page isn't up-to-date.
1338	 * Try to re-read it _once_. We do this synchronously,
1339	 * because there really aren't any performance issues here
1340	 * and we need to check for errors.
1341	 */
1342	lock_page(page);
1343
1344	/* Somebody truncated the page on us? */
1345	if (!page->mapping) {
1346		unlock_page(page);
1347		page_cache_release(page);
1348		goto retry_all;
1349	}
1350
1351	/* Somebody else successfully read it in? */
1352	if (PageUptodate(page)) {
1353		unlock_page(page);
1354		goto success;
1355	}
1356	ClearPageError(page);
1357	if (!mapping->a_ops->readpage(file, page)) {
1358		wait_on_page_locked(page);
1359		if (PageUptodate(page))
1360			goto success;
1361	}
1362
1363	/*
1364	 * Things didn't work out. Return zero to tell the
1365	 * mm layer so, possibly freeing the page cache page first.
1366	 */
1367	page_cache_release(page);
1368	return NULL;
1369}
1370
1371EXPORT_SYMBOL(filemap_nopage);
1372
1373static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
1374					int nonblock)
1375{
1376	struct address_space *mapping = file->f_mapping;
1377	struct page *page;
1378	int error;
1379
1380	/*
1381	 * Do we have something in the page cache already?
1382	 */
1383retry_find:
1384	page = find_get_page(mapping, pgoff);
1385	if (!page) {
1386		if (nonblock)
1387			return NULL;
1388		goto no_cached_page;
1389	}
1390
1391	/*
1392	 * Ok, found a page in the page cache, now we need to check
1393	 * that it's up-to-date.
1394	 */
1395	if (!PageUptodate(page)) {
1396		if (nonblock) {
1397			page_cache_release(page);
1398			return NULL;
1399		}
1400		goto page_not_uptodate;
1401	}
1402
1403success:
1404	/*
1405	 * Found the page and have a reference on it.
1406	 */
1407	mark_page_accessed(page);
1408	return page;
1409
1410no_cached_page:
1411	error = page_cache_read(file, pgoff);
1412
1413	/*
1414	 * The page we want has now been added to the page cache.
1415	 * In the unlikely event that someone removed it in the
1416	 * meantime, we'll just come back here and read it again.
1417	 */
1418	if (error >= 0)
1419		goto retry_find;
1420
1421	/*
1422	 * An error return from page_cache_read can result if the
1423	 * system is low on memory, or a problem occurs while trying
1424	 * to schedule I/O.
1425	 */
1426	return NULL;
1427
1428page_not_uptodate:
1429	lock_page(page);
1430
1431	/* Did it get unhashed while we waited for it? */
1432	if (!page->mapping) {
1433		unlock_page(page);
1434		goto err;
1435	}
1436
1437	/* Did somebody else get it up-to-date? */
1438	if (PageUptodate(page)) {
1439		unlock_page(page);
1440		goto success;
1441	}
1442
1443	if (!mapping->a_ops->readpage(file, page)) {
1444		wait_on_page_locked(page);
1445		if (PageUptodate(page))
1446			goto success;
1447	}
1448
1449	/*
1450	 * Umm, take care of errors if the page isn't up-to-date.
1451	 * Try to re-read it _once_. We do this synchronously,
1452	 * because there really aren't any performance issues here
1453	 * and we need to check for errors.
1454	 */
1455	lock_page(page);
1456
1457	/* Somebody truncated the page on us? */
1458	if (!page->mapping) {
1459		unlock_page(page);
1460		goto err;
1461	}
1462	/* Somebody else successfully read it in? */
1463	if (PageUptodate(page)) {
1464		unlock_page(page);
1465		goto success;
1466	}
1467
1468	ClearPageError(page);
1469	if (!mapping->a_ops->readpage(file, page)) {
1470		wait_on_page_locked(page);
1471		if (PageUptodate(page))
1472			goto success;
1473	}
1474
1475	/*
1476	 * Things didn't work out. Return zero to tell the
1477	 * mm layer so, possibly freeing the page cache page first.
1478	 */
1479err:
1480	page_cache_release(page);
1481
1482	return NULL;
1483}
1484
1485int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
1486		unsigned long len, pgprot_t prot, unsigned long pgoff,
1487		int nonblock)
1488{
1489	struct file *file = vma->vm_file;
1490	struct address_space *mapping = file->f_mapping;
1491	struct inode *inode = mapping->host;
1492	unsigned long size;
1493	struct mm_struct *mm = vma->vm_mm;
1494	struct page *page;
1495	int err;
1496
1497	if (!nonblock)
1498		force_page_cache_readahead(mapping, vma->vm_file,
1499					pgoff, len >> PAGE_CACHE_SHIFT);
1500
1501repeat:
1502	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1503	if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
1504		return -EINVAL;
1505
1506	page = filemap_getpage(file, pgoff, nonblock);
1507
1508	/* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
1509	 * done in shmem_populate calling shmem_getpage */
1510	if (!page && !nonblock)
1511		return -ENOMEM;
1512
1513	if (page) {
1514		err = install_page(mm, vma, addr, page, prot);
1515		if (err) {
1516			page_cache_release(page);
1517			return err;
1518		}
1519	} else {
1520		/* No page was found just because we can't read it in now (being
1521		 * here implies nonblock != 0), but the page may exist, so set
1522		 * the PTE to fault it in later. */
1523		err = install_file_pte(mm, vma, addr, pgoff, prot);
1524		if (err)
1525			return err;
1526	}
1527
1528	len -= PAGE_SIZE;
1529	addr += PAGE_SIZE;
1530	pgoff++;
1531	if (len)
1532		goto repeat;
1533
1534	return 0;
1535}
1536
1537struct vm_operations_struct generic_file_vm_ops = {
1538	.nopage		= filemap_nopage,
1539	.populate	= filemap_populate,
1540};
1541
1542/* This is used for a general mmap of a disk file */
1543
1544int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1545{
1546	struct address_space *mapping = file->f_mapping;
1547
1548	if (!mapping->a_ops->readpage)
1549		return -ENOEXEC;
1550	file_accessed(file);
1551	vma->vm_ops = &generic_file_vm_ops;
1552	return 0;
1553}
1554EXPORT_SYMBOL(filemap_populate);
1555
1556/*
1557 * This is for filesystems which do not implement ->writepage.
1558 */
1559int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1560{
1561	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1562		return -EINVAL;
1563	return generic_file_mmap(file, vma);
1564}
1565#else
1566int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1567{
1568	return -ENOSYS;
1569}
1570int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1571{
1572	return -ENOSYS;
1573}
1574#endif /* CONFIG_MMU */
1575
1576EXPORT_SYMBOL(generic_file_mmap);
1577EXPORT_SYMBOL(generic_file_readonly_mmap);
1578
1579static inline struct page *__read_cache_page(struct address_space *mapping,
1580				unsigned long index,
1581				int (*filler)(void *,struct page*),
1582				void *data)
1583{
1584	struct page *page, *cached_page = NULL;
1585	int err;
1586repeat:
1587	page = find_get_page(mapping, index);
1588	if (!page) {
1589		if (!cached_page) {
1590			cached_page = page_cache_alloc_cold(mapping);
1591			if (!cached_page)
1592				return ERR_PTR(-ENOMEM);
1593		}
1594		err = add_to_page_cache_lru(cached_page, mapping,
1595					index, GFP_KERNEL);
1596		if (err == -EEXIST)
1597			goto repeat;
1598		if (err < 0) {
1599			/* Presumably ENOMEM for radix tree node */
1600			page_cache_release(cached_page);
1601			return ERR_PTR(err);
1602		}
1603		page = cached_page;
1604		cached_page = NULL;
1605		err = filler(data, page);
1606		if (err < 0) {
1607			page_cache_release(page);
1608			page = ERR_PTR(err);
1609		}
1610	}
1611	if (cached_page)
1612		page_cache_release(cached_page);
1613	return page;
1614}
1615
1616/*
1617 * Read into the page cache. If a page already exists,
1618 * and PageUptodate() is not set, try to fill the page.
1619 */
1620struct page *read_cache_page(struct address_space *mapping,
1621				unsigned long index,
1622				int (*filler)(void *,struct page*),
1623				void *data)
1624{
1625	struct page *page;
1626	int err;
1627
1628retry:
1629	page = __read_cache_page(mapping, index, filler, data);
1630	if (IS_ERR(page))
1631		goto out;
1632	mark_page_accessed(page);
1633	if (PageUptodate(page))
1634		goto out;
1635
1636	lock_page(page);
1637	if (!page->mapping) {
1638		unlock_page(page);
1639		page_cache_release(page);
1640		goto retry;
1641	}
1642	if (PageUptodate(page)) {
1643		unlock_page(page);
1644		goto out;
1645	}
1646	err = filler(data, page);
1647	if (err < 0) {
1648		page_cache_release(page);
1649		page = ERR_PTR(err);
1650	}
1651 out:
1652	return page;
1653}
1654
1655EXPORT_SYMBOL(read_cache_page);
1656
1657/*
1658 * If the page was newly created, increment its refcount and add it to the
1659 * caller's lru-buffering pagevec.  This function is specifically for
1660 * generic_file_write().
1661 */
1662static inline struct page *
1663__grab_cache_page(struct address_space *mapping, unsigned long index,
1664			struct page **cached_page, struct pagevec *lru_pvec)
1665{
1666	int err;
1667	struct page *page;
1668repeat:
1669	page = find_lock_page(mapping, index);
1670	if (!page) {
1671		if (!*cached_page) {
1672			*cached_page = page_cache_alloc(mapping);
1673			if (!*cached_page)
1674				return NULL;
1675		}
1676		err = add_to_page_cache(*cached_page, mapping,
1677					index, GFP_KERNEL);
1678		if (err == -EEXIST)
1679			goto repeat;
1680		if (err == 0) {
1681			page = *cached_page;
1682			page_cache_get(page);
1683			if (!pagevec_add(lru_pvec, page))
1684				__pagevec_lru_add(lru_pvec);
1685			*cached_page = NULL;
1686		}
1687	}
1688	return page;
1689}
1690
1691/*
1692 * The logic we want is
1693 *
1694 *	if suid or (sgid and xgrp)
1695 *		remove privs
1696 */
1697int remove_suid(struct dentry *dentry)
1698{
1699	mode_t mode = dentry->d_inode->i_mode;
1700	int kill = 0;
1701	int result = 0;
1702
1703	/* suid always must be killed */
1704	if (unlikely(mode & S_ISUID))
1705		kill = ATTR_KILL_SUID;
1706
1707	/*
1708	 * sgid without any exec bits is just a mandatory locking mark; leave
1709	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1710	 */
1711	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1712		kill |= ATTR_KILL_SGID;
1713
1714	if (unlikely(kill && !capable(CAP_FSETID))) {
1715		struct iattr newattrs;
1716
1717		newattrs.ia_valid = ATTR_FORCE | kill;
1718		result = notify_change(dentry, &newattrs);
1719	}
1720	return result;
1721}
1722EXPORT_SYMBOL(remove_suid);
1723
1724size_t
1725__filemap_copy_from_user_iovec(char *vaddr,
1726			const struct iovec *iov, size_t base, size_t bytes)
1727{
1728	size_t copied = 0, left = 0;
1729
1730	while (bytes) {
1731		char __user *buf = iov->iov_base + base;
1732		int copy = min(bytes, iov->iov_len - base);
1733
1734		base = 0;
1735		left = __copy_from_user_inatomic(vaddr, buf, copy);
1736		copied += copy;
1737		bytes -= copy;
1738		vaddr += copy;
1739		iov++;
1740
1741		if (unlikely(left)) {
1742			/* zero the rest of the target like __copy_from_user */
1743			if (bytes)
1744				memset(vaddr, 0, bytes);
1745			break;
1746		}
1747	}
1748	return copied - left;
1749}
1750
1751/*
1752 * Performs necessary checks before doing a write
1753 *
1754 * Can adjust writing position aor amount of bytes to write.
1755 * Returns appropriate error code that caller should return or
1756 * zero in case that write should be allowed.
1757 */
1758inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1759{
1760	struct inode *inode = file->f_mapping->host;
1761	unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1762
1763        if (unlikely(*pos < 0))
1764                return -EINVAL;
1765
1766	if (!isblk) {
1767		/* FIXME: this is for backwards compatibility with 2.4 */
1768		if (file->f_flags & O_APPEND)
1769                        *pos = i_size_read(inode);
1770
1771		if (limit != RLIM_INFINITY) {
1772			if (*pos >= limit) {
1773				send_sig(SIGXFSZ, current, 0);
1774				return -EFBIG;
1775			}
1776			if (*count > limit - (typeof(limit))*pos) {
1777				*count = limit - (typeof(limit))*pos;
1778			}
1779		}
1780	}
1781
1782	/*
1783	 * LFS rule
1784	 */
1785	if (unlikely(*pos + *count > MAX_NON_LFS &&
1786				!(file->f_flags & O_LARGEFILE))) {
1787		if (*pos >= MAX_NON_LFS) {
1788			send_sig(SIGXFSZ, current, 0);
1789			return -EFBIG;
1790		}
1791		if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1792			*count = MAX_NON_LFS - (unsigned long)*pos;
1793		}
1794	}
1795
1796	/*
1797	 * Are we about to exceed the fs block limit ?
1798	 *
1799	 * If we have written data it becomes a short write.  If we have
1800	 * exceeded without writing data we send a signal and return EFBIG.
1801	 * Linus frestrict idea will clean these up nicely..
1802	 */
1803	if (likely(!isblk)) {
1804		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1805			if (*count || *pos > inode->i_sb->s_maxbytes) {
1806				send_sig(SIGXFSZ, current, 0);
1807				return -EFBIG;
1808			}
1809			/* zero-length writes at ->s_maxbytes are OK */
1810		}
1811
1812		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1813			*count = inode->i_sb->s_maxbytes - *pos;
1814	} else {
1815		loff_t isize;
1816		if (bdev_read_only(I_BDEV(inode)))
1817			return -EPERM;
1818		isize = i_size_read(inode);
1819		if (*pos >= isize) {
1820			if (*count || *pos > isize)
1821				return -ENOSPC;
1822		}
1823
1824		if (*pos + *count > isize)
1825			*count = isize - *pos;
1826	}
1827	return 0;
1828}
1829EXPORT_SYMBOL(generic_write_checks);
1830
1831ssize_t
1832generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1833		unsigned long *nr_segs, loff_t pos, loff_t *ppos,
1834		size_t count, size_t ocount)
1835{
1836	struct file	*file = iocb->ki_filp;
1837	struct address_space *mapping = file->f_mapping;
1838	struct inode	*inode = mapping->host;
1839	ssize_t		written;
1840
1841	if (count != ocount)
1842		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
1843
1844	written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
1845	if (written > 0) {
1846		loff_t end = pos + written;
1847		if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
1848			i_size_write(inode,  end);
1849			mark_inode_dirty(inode);
1850		}
1851		*ppos = end;
1852	}
1853
1854	/*
1855	 * Sync the fs metadata but not the minor inode changes and
1856	 * of course not the data as we did direct DMA for the IO.
1857	 * i_sem is held, which protects generic_osync_inode() from
1858	 * livelocking.
1859	 */
1860	if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
1861		int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
1862		if (err < 0)
1863			written = err;
1864	}
1865	if (written == count && !is_sync_kiocb(iocb))
1866		written = -EIOCBQUEUED;
1867	return written;
1868}
1869EXPORT_SYMBOL(generic_file_direct_write);
1870
1871ssize_t
1872generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1873		unsigned long nr_segs, loff_t pos, loff_t *ppos,
1874		size_t count, ssize_t written)
1875{
1876	struct file *file = iocb->ki_filp;
1877	struct address_space * mapping = file->f_mapping;
1878	struct address_space_operations *a_ops = mapping->a_ops;
1879	struct inode 	*inode = mapping->host;
1880	long		status = 0;
1881	struct page	*page;
1882	struct page	*cached_page = NULL;
1883	size_t		bytes;
1884	struct pagevec	lru_pvec;
1885	const struct iovec *cur_iov = iov; /* current iovec */
1886	size_t		iov_base = 0;	   /* offset in the current iovec */
1887	char __user	*buf;
1888
1889	pagevec_init(&lru_pvec, 0);
1890
1891	/*
1892	 * handle partial DIO write.  Adjust cur_iov if needed.
1893	 */
1894	if (likely(nr_segs == 1))
1895		buf = iov->iov_base + written;
1896	else {
1897		filemap_set_next_iovec(&cur_iov, &iov_base, written);
1898		buf = cur_iov->iov_base + iov_base;
1899	}
1900
1901	do {
1902		unsigned long index;
1903		unsigned long offset;
1904		unsigned long maxlen;
1905		size_t copied;
1906
1907		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1908		index = pos >> PAGE_CACHE_SHIFT;
1909		bytes = PAGE_CACHE_SIZE - offset;
1910		if (bytes > count)
1911			bytes = count;
1912
1913		/*
1914		 * Bring in the user page that we will copy from _first_.
1915		 * Otherwise there's a nasty deadlock on copying from the
1916		 * same page as we're writing to, without it being marked
1917		 * up-to-date.
1918		 */
1919		maxlen = cur_iov->iov_len - iov_base;
1920		if (maxlen > bytes)
1921			maxlen = bytes;
1922		fault_in_pages_readable(buf, maxlen);
1923
1924		page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
1925		if (!page) {
1926			status = -ENOMEM;
1927			break;
1928		}
1929
1930		status = a_ops->prepare_write(file, page, offset, offset+bytes);
1931		if (unlikely(status)) {
1932			loff_t isize = i_size_read(inode);
1933			/*
1934			 * prepare_write() may have instantiated a few blocks
1935			 * outside i_size.  Trim these off again.
1936			 */
1937			unlock_page(page);
1938			page_cache_release(page);
1939			if (pos + bytes > isize)
1940				vmtruncate(inode, isize);
1941			break;
1942		}
1943		if (likely(nr_segs == 1))
1944			copied = filemap_copy_from_user(page, offset,
1945							buf, bytes);
1946		else
1947			copied = filemap_copy_from_user_iovec(page, offset,
1948						cur_iov, iov_base, bytes);
1949		flush_dcache_page(page);
1950		status = a_ops->commit_write(file, page, offset, offset+bytes);
1951		if (likely(copied > 0)) {
1952			if (!status)
1953				status = copied;
1954
1955			if (status >= 0) {
1956				written += status;
1957				count -= status;
1958				pos += status;
1959				buf += status;
1960				if (unlikely(nr_segs > 1)) {
1961					filemap_set_next_iovec(&cur_iov,
1962							&iov_base, status);
1963					if (count)
1964						buf = cur_iov->iov_base +
1965							iov_base;
1966				} else {
1967					iov_base += status;
1968				}
1969			}
1970		}
1971		if (unlikely(copied != bytes))
1972			if (status >= 0)
1973				status = -EFAULT;
1974		unlock_page(page);
1975		mark_page_accessed(page);
1976		page_cache_release(page);
1977		if (status < 0)
1978			break;
1979		balance_dirty_pages_ratelimited(mapping);
1980		cond_resched();
1981	} while (count);
1982	*ppos = pos;
1983
1984	if (cached_page)
1985		page_cache_release(cached_page);
1986
1987	/*
1988	 * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
1989	 */
1990	if (likely(status >= 0)) {
1991		if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
1992			if (!a_ops->writepage || !is_sync_kiocb(iocb))
1993				status = generic_osync_inode(inode, mapping,
1994						OSYNC_METADATA|OSYNC_DATA);
1995		}
1996  	}
1997
1998	/*
1999	 * If we get here for O_DIRECT writes then we must have fallen through
2000	 * to buffered writes (block instantiation inside i_size).  So we sync
2001	 * the file data here, to try to honour O_DIRECT expectations.
2002	 */
2003	if (unlikely(file->f_flags & O_DIRECT) && written)
2004		status = filemap_write_and_wait(mapping);
2005
2006	pagevec_lru_add(&lru_pvec);
2007	return written ? written : status;
2008}
2009EXPORT_SYMBOL(generic_file_buffered_write);
2010
2011ssize_t
2012__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2013				unsigned long nr_segs, loff_t *ppos)
2014{
2015	struct file *file = iocb->ki_filp;
2016	struct address_space * mapping = file->f_mapping;
2017	size_t ocount;		/* original count */
2018	size_t count;		/* after file limit checks */
2019	struct inode 	*inode = mapping->host;
2020	unsigned long	seg;
2021	loff_t		pos;
2022	ssize_t		written;
2023	ssize_t		err;
2024
2025	ocount = 0;
2026	for (seg = 0; seg < nr_segs; seg++) {
2027		const struct iovec *iv = &iov[seg];
2028
2029		/*
2030		 * If any segment has a negative length, or the cumulative
2031		 * length ever wraps negative then return -EINVAL.
2032		 */
2033		ocount += iv->iov_len;
2034		if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
2035			return -EINVAL;
2036		if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
2037			continue;
2038		if (seg == 0)
2039			return -EFAULT;
2040		nr_segs = seg;
2041		ocount -= iv->iov_len;	/* This segment is no good */
2042		break;
2043	}
2044
2045	count = ocount;
2046	pos = *ppos;
2047
2048	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2049
2050	/* We can write back this queue in page reclaim */
2051	current->backing_dev_info = mapping->backing_dev_info;
2052	written = 0;
2053
2054	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2055	if (err)
2056		goto out;
2057
2058	if (count == 0)
2059		goto out;
2060
2061	err = remove_suid(file->f_dentry);
2062	if (err)
2063		goto out;
2064
2065	inode_update_time(inode, 1);
2066
2067	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2068	if (unlikely(file->f_flags & O_DIRECT)) {
2069		written = generic_file_direct_write(iocb, iov,
2070				&nr_segs, pos, ppos, count, ocount);
2071		if (written < 0 || written == count)
2072			goto out;
2073		/*
2074		 * direct-io write to a hole: fall through to buffered I/O
2075		 * for completing the rest of the request.
2076		 */
2077		pos += written;
2078		count -= written;
2079	}
2080
2081	written = generic_file_buffered_write(iocb, iov, nr_segs,
2082			pos, ppos, count, written);
2083out:
2084	current->backing_dev_info = NULL;
2085	return written ? written : err;
2086}
2087EXPORT_SYMBOL(generic_file_aio_write_nolock);
2088
2089ssize_t
2090generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2091				unsigned long nr_segs, loff_t *ppos)
2092{
2093	struct file *file = iocb->ki_filp;
2094	struct address_space *mapping = file->f_mapping;
2095	struct inode *inode = mapping->host;
2096	ssize_t ret;
2097	loff_t pos = *ppos;
2098
2099	ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos);
2100
2101	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2102		int err;
2103
2104		err = sync_page_range_nolock(inode, mapping, pos, ret);
2105		if (err < 0)
2106			ret = err;
2107	}
2108	return ret;
2109}
2110
2111ssize_t
2112__generic_file_write_nolock(struct file *file, const struct iovec *iov,
2113				unsigned long nr_segs, loff_t *ppos)
2114{
2115	struct kiocb kiocb;
2116	ssize_t ret;
2117
2118	init_sync_kiocb(&kiocb, file);
2119	ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2120	if (ret == -EIOCBQUEUED)
2121		ret = wait_on_sync_kiocb(&kiocb);
2122	return ret;
2123}
2124
2125ssize_t
2126generic_file_write_nolock(struct file *file, const struct iovec *iov,
2127				unsigned long nr_segs, loff_t *ppos)
2128{
2129	struct kiocb kiocb;
2130	ssize_t ret;
2131
2132	init_sync_kiocb(&kiocb, file);
2133	ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2134	if (-EIOCBQUEUED == ret)
2135		ret = wait_on_sync_kiocb(&kiocb);
2136	return ret;
2137}
2138EXPORT_SYMBOL(generic_file_write_nolock);
2139
2140ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
2141			       size_t count, loff_t pos)
2142{
2143	struct file *file = iocb->ki_filp;
2144	struct address_space *mapping = file->f_mapping;
2145	struct inode *inode = mapping->host;
2146	ssize_t ret;
2147	struct iovec local_iov = { .iov_base = (void __user *)buf,
2148					.iov_len = count };
2149
2150	BUG_ON(iocb->ki_pos != pos);
2151
2152	down(&inode->i_sem);
2153	ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
2154						&iocb->ki_pos);
2155	up(&inode->i_sem);
2156
2157	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2158		ssize_t err;
2159
2160		err = sync_page_range(inode, mapping, pos, ret);
2161		if (err < 0)
2162			ret = err;
2163	}
2164	return ret;
2165}
2166EXPORT_SYMBOL(generic_file_aio_write);
2167
2168ssize_t generic_file_write(struct file *file, const char __user *buf,
2169			   size_t count, loff_t *ppos)
2170{
2171	struct address_space *mapping = file->f_mapping;
2172	struct inode *inode = mapping->host;
2173	ssize_t	ret;
2174	struct iovec local_iov = { .iov_base = (void __user *)buf,
2175					.iov_len = count };
2176
2177	down(&inode->i_sem);
2178	ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
2179	up(&inode->i_sem);
2180
2181	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2182		ssize_t err;
2183
2184		err = sync_page_range(inode, mapping, *ppos - ret, ret);
2185		if (err < 0)
2186			ret = err;
2187	}
2188	return ret;
2189}
2190EXPORT_SYMBOL(generic_file_write);
2191
2192ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
2193			unsigned long nr_segs, loff_t *ppos)
2194{
2195	struct kiocb kiocb;
2196	ssize_t ret;
2197
2198	init_sync_kiocb(&kiocb, filp);
2199	ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos);
2200	if (-EIOCBQUEUED == ret)
2201		ret = wait_on_sync_kiocb(&kiocb);
2202	return ret;
2203}
2204EXPORT_SYMBOL(generic_file_readv);
2205
2206ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
2207			unsigned long nr_segs, loff_t *ppos)
2208{
2209	struct address_space *mapping = file->f_mapping;
2210	struct inode *inode = mapping->host;
2211	ssize_t ret;
2212
2213	down(&inode->i_sem);
2214	ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
2215	up(&inode->i_sem);
2216
2217	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2218		int err;
2219
2220		err = sync_page_range(inode, mapping, *ppos - ret, ret);
2221		if (err < 0)
2222			ret = err;
2223	}
2224	return ret;
2225}
2226EXPORT_SYMBOL(generic_file_writev);
2227
2228/*
2229 * Called under i_sem for writes to S_ISREG files.   Returns -EIO if something
2230 * went wrong during pagecache shootdown.
2231 */
2232ssize_t
2233generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2234	loff_t offset, unsigned long nr_segs)
2235{
2236	struct file *file = iocb->ki_filp;
2237	struct address_space *mapping = file->f_mapping;
2238	ssize_t retval;
2239	size_t write_len = 0;
2240
2241	/*
2242	 * If it's a write, unmap all mmappings of the file up-front.  This
2243	 * will cause any pte dirty bits to be propagated into the pageframes
2244	 * for the subsequent filemap_write_and_wait().
2245	 */
2246	if (rw == WRITE) {
2247		write_len = iov_length(iov, nr_segs);
2248	       	if (mapping_mapped(mapping))
2249			unmap_mapping_range(mapping, offset, write_len, 0);
2250	}
2251
2252	retval = filemap_write_and_wait(mapping);
2253	if (retval == 0) {
2254		retval = mapping->a_ops->direct_IO(rw, iocb, iov,
2255						offset, nr_segs);
2256		if (rw == WRITE && mapping->nrpages) {
2257			pgoff_t end = (offset + write_len - 1)
2258						>> PAGE_CACHE_SHIFT;
2259			int err = invalidate_inode_pages2_range(mapping,
2260					offset >> PAGE_CACHE_SHIFT, end);
2261			if (err)
2262				retval = err;
2263		}
2264	}
2265	return retval;
2266}
2267EXPORT_SYMBOL_GPL(generic_file_direct_IO);
2268