filemap.c revision 124d3b7041f9a0ca7c43a6293e1cae4576c32fd5
1/*
2 *	linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999  Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
11 */
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/compiler.h>
15#include <linux/fs.h>
16#include <linux/uaccess.h>
17#include <linux/aio.h>
18#include <linux/capability.h>
19#include <linux/kernel_stat.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/pagemap.h>
24#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/hash.h>
27#include <linux/writeback.h>
28#include <linux/backing-dev.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/backing-dev.h>
32#include <linux/security.h>
33#include <linux/syscalls.h>
34#include <linux/cpuset.h>
35#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
36#include "internal.h"
37
38/*
39 * FIXME: remove all knowledge of the buffer layer from the core VM
40 */
41#include <linux/buffer_head.h> /* for generic_osync_inode */
42
43#include <asm/mman.h>
44
45static ssize_t
46generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
47	loff_t offset, unsigned long nr_segs);
48
49/*
50 * Shared mappings implemented 30.11.1994. It's not fully working yet,
51 * though.
52 *
53 * Shared mappings now work. 15.8.1995  Bruno.
54 *
55 * finished 'unifying' the page and buffer cache and SMP-threaded the
56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
57 *
58 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
59 */
60
61/*
62 * Lock ordering:
63 *
64 *  ->i_mmap_lock		(vmtruncate)
65 *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
66 *      ->swap_lock		(exclusive_swap_page, others)
67 *        ->mapping->tree_lock
68 *          ->zone.lock
69 *
70 *  ->i_mutex
71 *    ->i_mmap_lock		(truncate->unmap_mapping_range)
72 *
73 *  ->mmap_sem
74 *    ->i_mmap_lock
75 *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
76 *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
77 *
78 *  ->mmap_sem
79 *    ->lock_page		(access_process_vm)
80 *
81 *  ->i_mutex			(generic_file_buffered_write)
82 *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)
83 *
84 *  ->i_mutex
85 *    ->i_alloc_sem             (various)
86 *
87 *  ->inode_lock
88 *    ->sb_lock			(fs/fs-writeback.c)
89 *    ->mapping->tree_lock	(__sync_single_inode)
90 *
91 *  ->i_mmap_lock
92 *    ->anon_vma.lock		(vma_adjust)
93 *
94 *  ->anon_vma.lock
95 *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
96 *
97 *  ->page_table_lock or pte_lock
98 *    ->swap_lock		(try_to_unmap_one)
99 *    ->private_lock		(try_to_unmap_one)
100 *    ->tree_lock		(try_to_unmap_one)
101 *    ->zone.lru_lock		(follow_page->mark_page_accessed)
102 *    ->zone.lru_lock		(check_pte_range->isolate_lru_page)
103 *    ->private_lock		(page_remove_rmap->set_page_dirty)
104 *    ->tree_lock		(page_remove_rmap->set_page_dirty)
105 *    ->inode_lock		(page_remove_rmap->set_page_dirty)
106 *    ->inode_lock		(zap_pte_range->set_page_dirty)
107 *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
108 *
109 *  ->task->proc_lock
110 *    ->dcache_lock		(proc_pid_lookup)
111 */
112
113/*
114 * Remove a page from the page cache and free it. Caller has to make
115 * sure the page is locked and that nobody else uses it - or that usage
116 * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
117 */
118void __remove_from_page_cache(struct page *page)
119{
120	struct address_space *mapping = page->mapping;
121
122	radix_tree_delete(&mapping->page_tree, page->index);
123	page->mapping = NULL;
124	mapping->nrpages--;
125	__dec_zone_page_state(page, NR_FILE_PAGES);
126	BUG_ON(page_mapped(page));
127
128	/*
129	 * Some filesystems seem to re-dirty the page even after
130	 * the VM has canceled the dirty bit (eg ext3 journaling).
131	 *
132	 * Fix it up by doing a final dirty accounting check after
133	 * having removed the page entirely.
134	 */
135	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
136		dec_zone_page_state(page, NR_FILE_DIRTY);
137		dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
138	}
139}
140
141void remove_from_page_cache(struct page *page)
142{
143	struct address_space *mapping = page->mapping;
144
145	BUG_ON(!PageLocked(page));
146
147	write_lock_irq(&mapping->tree_lock);
148	__remove_from_page_cache(page);
149	write_unlock_irq(&mapping->tree_lock);
150}
151
152static int sync_page(void *word)
153{
154	struct address_space *mapping;
155	struct page *page;
156
157	page = container_of((unsigned long *)word, struct page, flags);
158
159	/*
160	 * page_mapping() is being called without PG_locked held.
161	 * Some knowledge of the state and use of the page is used to
162	 * reduce the requirements down to a memory barrier.
163	 * The danger here is of a stale page_mapping() return value
164	 * indicating a struct address_space different from the one it's
165	 * associated with when it is associated with one.
166	 * After smp_mb(), it's either the correct page_mapping() for
167	 * the page, or an old page_mapping() and the page's own
168	 * page_mapping() has gone NULL.
169	 * The ->sync_page() address_space operation must tolerate
170	 * page_mapping() going NULL. By an amazing coincidence,
171	 * this comes about because none of the users of the page
172	 * in the ->sync_page() methods make essential use of the
173	 * page_mapping(), merely passing the page down to the backing
174	 * device's unplug functions when it's non-NULL, which in turn
175	 * ignore it for all cases but swap, where only page_private(page) is
176	 * of interest. When page_mapping() does go NULL, the entire
177	 * call stack gracefully ignores the page and returns.
178	 * -- wli
179	 */
180	smp_mb();
181	mapping = page_mapping(page);
182	if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
183		mapping->a_ops->sync_page(page);
184	io_schedule();
185	return 0;
186}
187
188static int sync_page_killable(void *word)
189{
190	sync_page(word);
191	return fatal_signal_pending(current) ? -EINTR : 0;
192}
193
194/**
195 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
196 * @mapping:	address space structure to write
197 * @start:	offset in bytes where the range starts
198 * @end:	offset in bytes where the range ends (inclusive)
199 * @sync_mode:	enable synchronous operation
200 *
201 * Start writeback against all of a mapping's dirty pages that lie
202 * within the byte offsets <start, end> inclusive.
203 *
204 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
205 * opposed to a regular memory cleansing writeback.  The difference between
206 * these two operations is that if a dirty page/buffer is encountered, it must
207 * be waited upon, and not just skipped over.
208 */
209int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
210				loff_t end, int sync_mode)
211{
212	int ret;
213	struct writeback_control wbc = {
214		.sync_mode = sync_mode,
215		.nr_to_write = mapping->nrpages * 2,
216		.range_start = start,
217		.range_end = end,
218	};
219
220	if (!mapping_cap_writeback_dirty(mapping))
221		return 0;
222
223	ret = do_writepages(mapping, &wbc);
224	return ret;
225}
226
227static inline int __filemap_fdatawrite(struct address_space *mapping,
228	int sync_mode)
229{
230	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
231}
232
233int filemap_fdatawrite(struct address_space *mapping)
234{
235	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
236}
237EXPORT_SYMBOL(filemap_fdatawrite);
238
239static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
240				loff_t end)
241{
242	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
243}
244
245/**
246 * filemap_flush - mostly a non-blocking flush
247 * @mapping:	target address_space
248 *
249 * This is a mostly non-blocking flush.  Not suitable for data-integrity
250 * purposes - I/O may not be started against all dirty pages.
251 */
252int filemap_flush(struct address_space *mapping)
253{
254	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
255}
256EXPORT_SYMBOL(filemap_flush);
257
258/**
259 * wait_on_page_writeback_range - wait for writeback to complete
260 * @mapping:	target address_space
261 * @start:	beginning page index
262 * @end:	ending page index
263 *
264 * Wait for writeback to complete against pages indexed by start->end
265 * inclusive
266 */
267int wait_on_page_writeback_range(struct address_space *mapping,
268				pgoff_t start, pgoff_t end)
269{
270	struct pagevec pvec;
271	int nr_pages;
272	int ret = 0;
273	pgoff_t index;
274
275	if (end < start)
276		return 0;
277
278	pagevec_init(&pvec, 0);
279	index = start;
280	while ((index <= end) &&
281			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
282			PAGECACHE_TAG_WRITEBACK,
283			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
284		unsigned i;
285
286		for (i = 0; i < nr_pages; i++) {
287			struct page *page = pvec.pages[i];
288
289			/* until radix tree lookup accepts end_index */
290			if (page->index > end)
291				continue;
292
293			wait_on_page_writeback(page);
294			if (PageError(page))
295				ret = -EIO;
296		}
297		pagevec_release(&pvec);
298		cond_resched();
299	}
300
301	/* Check for outstanding write errors */
302	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
303		ret = -ENOSPC;
304	if (test_and_clear_bit(AS_EIO, &mapping->flags))
305		ret = -EIO;
306
307	return ret;
308}
309
310/**
311 * sync_page_range - write and wait on all pages in the passed range
312 * @inode:	target inode
313 * @mapping:	target address_space
314 * @pos:	beginning offset in pages to write
315 * @count:	number of bytes to write
316 *
317 * Write and wait upon all the pages in the passed range.  This is a "data
318 * integrity" operation.  It waits upon in-flight writeout before starting and
319 * waiting upon new writeout.  If there was an IO error, return it.
320 *
321 * We need to re-take i_mutex during the generic_osync_inode list walk because
322 * it is otherwise livelockable.
323 */
324int sync_page_range(struct inode *inode, struct address_space *mapping,
325			loff_t pos, loff_t count)
326{
327	pgoff_t start = pos >> PAGE_CACHE_SHIFT;
328	pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
329	int ret;
330
331	if (!mapping_cap_writeback_dirty(mapping) || !count)
332		return 0;
333	ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
334	if (ret == 0) {
335		mutex_lock(&inode->i_mutex);
336		ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
337		mutex_unlock(&inode->i_mutex);
338	}
339	if (ret == 0)
340		ret = wait_on_page_writeback_range(mapping, start, end);
341	return ret;
342}
343EXPORT_SYMBOL(sync_page_range);
344
345/**
346 * sync_page_range_nolock
347 * @inode:	target inode
348 * @mapping:	target address_space
349 * @pos:	beginning offset in pages to write
350 * @count:	number of bytes to write
351 *
352 * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
353 * as it forces O_SYNC writers to different parts of the same file
354 * to be serialised right until io completion.
355 */
356int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
357			   loff_t pos, loff_t count)
358{
359	pgoff_t start = pos >> PAGE_CACHE_SHIFT;
360	pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
361	int ret;
362
363	if (!mapping_cap_writeback_dirty(mapping) || !count)
364		return 0;
365	ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
366	if (ret == 0)
367		ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
368	if (ret == 0)
369		ret = wait_on_page_writeback_range(mapping, start, end);
370	return ret;
371}
372EXPORT_SYMBOL(sync_page_range_nolock);
373
374/**
375 * filemap_fdatawait - wait for all under-writeback pages to complete
376 * @mapping: address space structure to wait for
377 *
378 * Walk the list of under-writeback pages of the given address space
379 * and wait for all of them.
380 */
381int filemap_fdatawait(struct address_space *mapping)
382{
383	loff_t i_size = i_size_read(mapping->host);
384
385	if (i_size == 0)
386		return 0;
387
388	return wait_on_page_writeback_range(mapping, 0,
389				(i_size - 1) >> PAGE_CACHE_SHIFT);
390}
391EXPORT_SYMBOL(filemap_fdatawait);
392
393int filemap_write_and_wait(struct address_space *mapping)
394{
395	int err = 0;
396
397	if (mapping->nrpages) {
398		err = filemap_fdatawrite(mapping);
399		/*
400		 * Even if the above returned error, the pages may be
401		 * written partially (e.g. -ENOSPC), so we wait for it.
402		 * But the -EIO is special case, it may indicate the worst
403		 * thing (e.g. bug) happened, so we avoid waiting for it.
404		 */
405		if (err != -EIO) {
406			int err2 = filemap_fdatawait(mapping);
407			if (!err)
408				err = err2;
409		}
410	}
411	return err;
412}
413EXPORT_SYMBOL(filemap_write_and_wait);
414
415/**
416 * filemap_write_and_wait_range - write out & wait on a file range
417 * @mapping:	the address_space for the pages
418 * @lstart:	offset in bytes where the range starts
419 * @lend:	offset in bytes where the range ends (inclusive)
420 *
421 * Write out and wait upon file offsets lstart->lend, inclusive.
422 *
423 * Note that `lend' is inclusive (describes the last byte to be written) so
424 * that this function can be used to write to the very end-of-file (end = -1).
425 */
426int filemap_write_and_wait_range(struct address_space *mapping,
427				 loff_t lstart, loff_t lend)
428{
429	int err = 0;
430
431	if (mapping->nrpages) {
432		err = __filemap_fdatawrite_range(mapping, lstart, lend,
433						 WB_SYNC_ALL);
434		/* See comment of filemap_write_and_wait() */
435		if (err != -EIO) {
436			int err2 = wait_on_page_writeback_range(mapping,
437						lstart >> PAGE_CACHE_SHIFT,
438						lend >> PAGE_CACHE_SHIFT);
439			if (!err)
440				err = err2;
441		}
442	}
443	return err;
444}
445
446/**
447 * add_to_page_cache - add newly allocated pagecache pages
448 * @page:	page to add
449 * @mapping:	the page's address_space
450 * @offset:	page index
451 * @gfp_mask:	page allocation mode
452 *
453 * This function is used to add newly allocated pagecache pages;
454 * the page is new, so we can just run SetPageLocked() against it.
455 * The other page state flags were set by rmqueue().
456 *
457 * This function does not add the page to the LRU.  The caller must do that.
458 */
459int add_to_page_cache(struct page *page, struct address_space *mapping,
460		pgoff_t offset, gfp_t gfp_mask)
461{
462	int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
463
464	if (error == 0) {
465		write_lock_irq(&mapping->tree_lock);
466		error = radix_tree_insert(&mapping->page_tree, offset, page);
467		if (!error) {
468			page_cache_get(page);
469			SetPageLocked(page);
470			page->mapping = mapping;
471			page->index = offset;
472			mapping->nrpages++;
473			__inc_zone_page_state(page, NR_FILE_PAGES);
474		}
475		write_unlock_irq(&mapping->tree_lock);
476		radix_tree_preload_end();
477	}
478	return error;
479}
480EXPORT_SYMBOL(add_to_page_cache);
481
482int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
483				pgoff_t offset, gfp_t gfp_mask)
484{
485	int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
486	if (ret == 0)
487		lru_cache_add(page);
488	return ret;
489}
490
491#ifdef CONFIG_NUMA
492struct page *__page_cache_alloc(gfp_t gfp)
493{
494	if (cpuset_do_page_mem_spread()) {
495		int n = cpuset_mem_spread_node();
496		return alloc_pages_node(n, gfp, 0);
497	}
498	return alloc_pages(gfp, 0);
499}
500EXPORT_SYMBOL(__page_cache_alloc);
501#endif
502
503static int __sleep_on_page_lock(void *word)
504{
505	io_schedule();
506	return 0;
507}
508
509/*
510 * In order to wait for pages to become available there must be
511 * waitqueues associated with pages. By using a hash table of
512 * waitqueues where the bucket discipline is to maintain all
513 * waiters on the same queue and wake all when any of the pages
514 * become available, and for the woken contexts to check to be
515 * sure the appropriate page became available, this saves space
516 * at a cost of "thundering herd" phenomena during rare hash
517 * collisions.
518 */
519static wait_queue_head_t *page_waitqueue(struct page *page)
520{
521	const struct zone *zone = page_zone(page);
522
523	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
524}
525
526static inline void wake_up_page(struct page *page, int bit)
527{
528	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
529}
530
531void fastcall wait_on_page_bit(struct page *page, int bit_nr)
532{
533	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
534
535	if (test_bit(bit_nr, &page->flags))
536		__wait_on_bit(page_waitqueue(page), &wait, sync_page,
537							TASK_UNINTERRUPTIBLE);
538}
539EXPORT_SYMBOL(wait_on_page_bit);
540
541/**
542 * unlock_page - unlock a locked page
543 * @page: the page
544 *
545 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
546 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
547 * mechananism between PageLocked pages and PageWriteback pages is shared.
548 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
549 *
550 * The first mb is necessary to safely close the critical section opened by the
551 * TestSetPageLocked(), the second mb is necessary to enforce ordering between
552 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
553 * parallel wait_on_page_locked()).
554 */
555void fastcall unlock_page(struct page *page)
556{
557	smp_mb__before_clear_bit();
558	if (!TestClearPageLocked(page))
559		BUG();
560	smp_mb__after_clear_bit();
561	wake_up_page(page, PG_locked);
562}
563EXPORT_SYMBOL(unlock_page);
564
565/**
566 * end_page_writeback - end writeback against a page
567 * @page: the page
568 */
569void end_page_writeback(struct page *page)
570{
571	if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
572		if (!test_clear_page_writeback(page))
573			BUG();
574	}
575	smp_mb__after_clear_bit();
576	wake_up_page(page, PG_writeback);
577}
578EXPORT_SYMBOL(end_page_writeback);
579
580/**
581 * __lock_page - get a lock on the page, assuming we need to sleep to get it
582 * @page: the page to lock
583 *
584 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
585 * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
586 * chances are that on the second loop, the block layer's plug list is empty,
587 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
588 */
589void fastcall __lock_page(struct page *page)
590{
591	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
592
593	__wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
594							TASK_UNINTERRUPTIBLE);
595}
596EXPORT_SYMBOL(__lock_page);
597
598int fastcall __lock_page_killable(struct page *page)
599{
600	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
601
602	return __wait_on_bit_lock(page_waitqueue(page), &wait,
603					sync_page_killable, TASK_KILLABLE);
604}
605
606/*
607 * Variant of lock_page that does not require the caller to hold a reference
608 * on the page's mapping.
609 */
610void fastcall __lock_page_nosync(struct page *page)
611{
612	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
613	__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
614							TASK_UNINTERRUPTIBLE);
615}
616
617/**
618 * find_get_page - find and get a page reference
619 * @mapping: the address_space to search
620 * @offset: the page index
621 *
622 * Is there a pagecache struct page at the given (mapping, offset) tuple?
623 * If yes, increment its refcount and return it; if no, return NULL.
624 */
625struct page * find_get_page(struct address_space *mapping, pgoff_t offset)
626{
627	struct page *page;
628
629	read_lock_irq(&mapping->tree_lock);
630	page = radix_tree_lookup(&mapping->page_tree, offset);
631	if (page)
632		page_cache_get(page);
633	read_unlock_irq(&mapping->tree_lock);
634	return page;
635}
636EXPORT_SYMBOL(find_get_page);
637
638/**
639 * find_lock_page - locate, pin and lock a pagecache page
640 * @mapping: the address_space to search
641 * @offset: the page index
642 *
643 * Locates the desired pagecache page, locks it, increments its reference
644 * count and returns its address.
645 *
646 * Returns zero if the page was not present. find_lock_page() may sleep.
647 */
648struct page *find_lock_page(struct address_space *mapping,
649				pgoff_t offset)
650{
651	struct page *page;
652
653repeat:
654	read_lock_irq(&mapping->tree_lock);
655	page = radix_tree_lookup(&mapping->page_tree, offset);
656	if (page) {
657		page_cache_get(page);
658		if (TestSetPageLocked(page)) {
659			read_unlock_irq(&mapping->tree_lock);
660			__lock_page(page);
661
662			/* Has the page been truncated while we slept? */
663			if (unlikely(page->mapping != mapping)) {
664				unlock_page(page);
665				page_cache_release(page);
666				goto repeat;
667			}
668			VM_BUG_ON(page->index != offset);
669			goto out;
670		}
671	}
672	read_unlock_irq(&mapping->tree_lock);
673out:
674	return page;
675}
676EXPORT_SYMBOL(find_lock_page);
677
678/**
679 * find_or_create_page - locate or add a pagecache page
680 * @mapping: the page's address_space
681 * @index: the page's index into the mapping
682 * @gfp_mask: page allocation mode
683 *
684 * Locates a page in the pagecache.  If the page is not present, a new page
685 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
686 * LRU list.  The returned page is locked and has its reference count
687 * incremented.
688 *
689 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
690 * allocation!
691 *
692 * find_or_create_page() returns the desired page's address, or zero on
693 * memory exhaustion.
694 */
695struct page *find_or_create_page(struct address_space *mapping,
696		pgoff_t index, gfp_t gfp_mask)
697{
698	struct page *page;
699	int err;
700repeat:
701	page = find_lock_page(mapping, index);
702	if (!page) {
703		page = __page_cache_alloc(gfp_mask);
704		if (!page)
705			return NULL;
706		err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
707		if (unlikely(err)) {
708			page_cache_release(page);
709			page = NULL;
710			if (err == -EEXIST)
711				goto repeat;
712		}
713	}
714	return page;
715}
716EXPORT_SYMBOL(find_or_create_page);
717
718/**
719 * find_get_pages - gang pagecache lookup
720 * @mapping:	The address_space to search
721 * @start:	The starting page index
722 * @nr_pages:	The maximum number of pages
723 * @pages:	Where the resulting pages are placed
724 *
725 * find_get_pages() will search for and return a group of up to
726 * @nr_pages pages in the mapping.  The pages are placed at @pages.
727 * find_get_pages() takes a reference against the returned pages.
728 *
729 * The search returns a group of mapping-contiguous pages with ascending
730 * indexes.  There may be holes in the indices due to not-present pages.
731 *
732 * find_get_pages() returns the number of pages which were found.
733 */
734unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
735			    unsigned int nr_pages, struct page **pages)
736{
737	unsigned int i;
738	unsigned int ret;
739
740	read_lock_irq(&mapping->tree_lock);
741	ret = radix_tree_gang_lookup(&mapping->page_tree,
742				(void **)pages, start, nr_pages);
743	for (i = 0; i < ret; i++)
744		page_cache_get(pages[i]);
745	read_unlock_irq(&mapping->tree_lock);
746	return ret;
747}
748
749/**
750 * find_get_pages_contig - gang contiguous pagecache lookup
751 * @mapping:	The address_space to search
752 * @index:	The starting page index
753 * @nr_pages:	The maximum number of pages
754 * @pages:	Where the resulting pages are placed
755 *
756 * find_get_pages_contig() works exactly like find_get_pages(), except
757 * that the returned number of pages are guaranteed to be contiguous.
758 *
759 * find_get_pages_contig() returns the number of pages which were found.
760 */
761unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
762			       unsigned int nr_pages, struct page **pages)
763{
764	unsigned int i;
765	unsigned int ret;
766
767	read_lock_irq(&mapping->tree_lock);
768	ret = radix_tree_gang_lookup(&mapping->page_tree,
769				(void **)pages, index, nr_pages);
770	for (i = 0; i < ret; i++) {
771		if (pages[i]->mapping == NULL || pages[i]->index != index)
772			break;
773
774		page_cache_get(pages[i]);
775		index++;
776	}
777	read_unlock_irq(&mapping->tree_lock);
778	return i;
779}
780EXPORT_SYMBOL(find_get_pages_contig);
781
782/**
783 * find_get_pages_tag - find and return pages that match @tag
784 * @mapping:	the address_space to search
785 * @index:	the starting page index
786 * @tag:	the tag index
787 * @nr_pages:	the maximum number of pages
788 * @pages:	where the resulting pages are placed
789 *
790 * Like find_get_pages, except we only return pages which are tagged with
791 * @tag.   We update @index to index the next page for the traversal.
792 */
793unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
794			int tag, unsigned int nr_pages, struct page **pages)
795{
796	unsigned int i;
797	unsigned int ret;
798
799	read_lock_irq(&mapping->tree_lock);
800	ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
801				(void **)pages, *index, nr_pages, tag);
802	for (i = 0; i < ret; i++)
803		page_cache_get(pages[i]);
804	if (ret)
805		*index = pages[ret - 1]->index + 1;
806	read_unlock_irq(&mapping->tree_lock);
807	return ret;
808}
809EXPORT_SYMBOL(find_get_pages_tag);
810
811/**
812 * grab_cache_page_nowait - returns locked page at given index in given cache
813 * @mapping: target address_space
814 * @index: the page index
815 *
816 * Same as grab_cache_page(), but do not wait if the page is unavailable.
817 * This is intended for speculative data generators, where the data can
818 * be regenerated if the page couldn't be grabbed.  This routine should
819 * be safe to call while holding the lock for another page.
820 *
821 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
822 * and deadlock against the caller's locked page.
823 */
824struct page *
825grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
826{
827	struct page *page = find_get_page(mapping, index);
828
829	if (page) {
830		if (!TestSetPageLocked(page))
831			return page;
832		page_cache_release(page);
833		return NULL;
834	}
835	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
836	if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
837		page_cache_release(page);
838		page = NULL;
839	}
840	return page;
841}
842EXPORT_SYMBOL(grab_cache_page_nowait);
843
844/*
845 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
846 * a _large_ part of the i/o request. Imagine the worst scenario:
847 *
848 *      ---R__________________________________________B__________
849 *         ^ reading here                             ^ bad block(assume 4k)
850 *
851 * read(R) => miss => readahead(R...B) => media error => frustrating retries
852 * => failing the whole request => read(R) => read(R+1) =>
853 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
854 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
855 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
856 *
857 * It is going insane. Fix it by quickly scaling down the readahead size.
858 */
859static void shrink_readahead_size_eio(struct file *filp,
860					struct file_ra_state *ra)
861{
862	if (!ra->ra_pages)
863		return;
864
865	ra->ra_pages /= 4;
866}
867
868/**
869 * do_generic_mapping_read - generic file read routine
870 * @mapping:	address_space to be read
871 * @ra:		file's readahead state
872 * @filp:	the file to read
873 * @ppos:	current file position
874 * @desc:	read_descriptor
875 * @actor:	read method
876 *
877 * This is a generic file read routine, and uses the
878 * mapping->a_ops->readpage() function for the actual low-level stuff.
879 *
880 * This is really ugly. But the goto's actually try to clarify some
881 * of the logic when it comes to error handling etc.
882 *
883 * Note the struct file* is only passed for the use of readpage.
884 * It may be NULL.
885 */
886void do_generic_mapping_read(struct address_space *mapping,
887			     struct file_ra_state *ra,
888			     struct file *filp,
889			     loff_t *ppos,
890			     read_descriptor_t *desc,
891			     read_actor_t actor)
892{
893	struct inode *inode = mapping->host;
894	pgoff_t index;
895	pgoff_t last_index;
896	pgoff_t prev_index;
897	unsigned long offset;      /* offset into pagecache page */
898	unsigned int prev_offset;
899	int error;
900
901	index = *ppos >> PAGE_CACHE_SHIFT;
902	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
903	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
904	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
905	offset = *ppos & ~PAGE_CACHE_MASK;
906
907	for (;;) {
908		struct page *page;
909		pgoff_t end_index;
910		loff_t isize;
911		unsigned long nr, ret;
912
913		cond_resched();
914find_page:
915		page = find_get_page(mapping, index);
916		if (!page) {
917			page_cache_sync_readahead(mapping,
918					ra, filp,
919					index, last_index - index);
920			page = find_get_page(mapping, index);
921			if (unlikely(page == NULL))
922				goto no_cached_page;
923		}
924		if (PageReadahead(page)) {
925			page_cache_async_readahead(mapping,
926					ra, filp, page,
927					index, last_index - index);
928		}
929		if (!PageUptodate(page))
930			goto page_not_up_to_date;
931page_ok:
932		/*
933		 * i_size must be checked after we know the page is Uptodate.
934		 *
935		 * Checking i_size after the check allows us to calculate
936		 * the correct value for "nr", which means the zero-filled
937		 * part of the page is not copied back to userspace (unless
938		 * another truncate extends the file - this is desired though).
939		 */
940
941		isize = i_size_read(inode);
942		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
943		if (unlikely(!isize || index > end_index)) {
944			page_cache_release(page);
945			goto out;
946		}
947
948		/* nr is the maximum number of bytes to copy from this page */
949		nr = PAGE_CACHE_SIZE;
950		if (index == end_index) {
951			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
952			if (nr <= offset) {
953				page_cache_release(page);
954				goto out;
955			}
956		}
957		nr = nr - offset;
958
959		/* If users can be writing to this page using arbitrary
960		 * virtual addresses, take care about potential aliasing
961		 * before reading the page on the kernel side.
962		 */
963		if (mapping_writably_mapped(mapping))
964			flush_dcache_page(page);
965
966		/*
967		 * When a sequential read accesses a page several times,
968		 * only mark it as accessed the first time.
969		 */
970		if (prev_index != index || offset != prev_offset)
971			mark_page_accessed(page);
972		prev_index = index;
973
974		/*
975		 * Ok, we have the page, and it's up-to-date, so
976		 * now we can copy it to user space...
977		 *
978		 * The actor routine returns how many bytes were actually used..
979		 * NOTE! This may not be the same as how much of a user buffer
980		 * we filled up (we may be padding etc), so we can only update
981		 * "pos" here (the actor routine has to update the user buffer
982		 * pointers and the remaining count).
983		 */
984		ret = actor(desc, page, offset, nr);
985		offset += ret;
986		index += offset >> PAGE_CACHE_SHIFT;
987		offset &= ~PAGE_CACHE_MASK;
988		prev_offset = offset;
989
990		page_cache_release(page);
991		if (ret == nr && desc->count)
992			continue;
993		goto out;
994
995page_not_up_to_date:
996		/* Get exclusive access to the page ... */
997		if (lock_page_killable(page))
998			goto readpage_eio;
999
1000		/* Did it get truncated before we got the lock? */
1001		if (!page->mapping) {
1002			unlock_page(page);
1003			page_cache_release(page);
1004			continue;
1005		}
1006
1007		/* Did somebody else fill it already? */
1008		if (PageUptodate(page)) {
1009			unlock_page(page);
1010			goto page_ok;
1011		}
1012
1013readpage:
1014		/* Start the actual read. The read will unlock the page. */
1015		error = mapping->a_ops->readpage(filp, page);
1016
1017		if (unlikely(error)) {
1018			if (error == AOP_TRUNCATED_PAGE) {
1019				page_cache_release(page);
1020				goto find_page;
1021			}
1022			goto readpage_error;
1023		}
1024
1025		if (!PageUptodate(page)) {
1026			if (lock_page_killable(page))
1027				goto readpage_eio;
1028			if (!PageUptodate(page)) {
1029				if (page->mapping == NULL) {
1030					/*
1031					 * invalidate_inode_pages got it
1032					 */
1033					unlock_page(page);
1034					page_cache_release(page);
1035					goto find_page;
1036				}
1037				unlock_page(page);
1038				shrink_readahead_size_eio(filp, ra);
1039				goto readpage_eio;
1040			}
1041			unlock_page(page);
1042		}
1043
1044		goto page_ok;
1045
1046readpage_eio:
1047		error = -EIO;
1048readpage_error:
1049		/* UHHUH! A synchronous read error occurred. Report it */
1050		desc->error = error;
1051		page_cache_release(page);
1052		goto out;
1053
1054no_cached_page:
1055		/*
1056		 * Ok, it wasn't cached, so we need to create a new
1057		 * page..
1058		 */
1059		page = page_cache_alloc_cold(mapping);
1060		if (!page) {
1061			desc->error = -ENOMEM;
1062			goto out;
1063		}
1064		error = add_to_page_cache_lru(page, mapping,
1065						index, GFP_KERNEL);
1066		if (error) {
1067			page_cache_release(page);
1068			if (error == -EEXIST)
1069				goto find_page;
1070			desc->error = error;
1071			goto out;
1072		}
1073		goto readpage;
1074	}
1075
1076out:
1077	ra->prev_pos = prev_index;
1078	ra->prev_pos <<= PAGE_CACHE_SHIFT;
1079	ra->prev_pos |= prev_offset;
1080
1081	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1082	if (filp)
1083		file_accessed(filp);
1084}
1085EXPORT_SYMBOL(do_generic_mapping_read);
1086
1087int file_read_actor(read_descriptor_t *desc, struct page *page,
1088			unsigned long offset, unsigned long size)
1089{
1090	char *kaddr;
1091	unsigned long left, count = desc->count;
1092
1093	if (size > count)
1094		size = count;
1095
1096	/*
1097	 * Faults on the destination of a read are common, so do it before
1098	 * taking the kmap.
1099	 */
1100	if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1101		kaddr = kmap_atomic(page, KM_USER0);
1102		left = __copy_to_user_inatomic(desc->arg.buf,
1103						kaddr + offset, size);
1104		kunmap_atomic(kaddr, KM_USER0);
1105		if (left == 0)
1106			goto success;
1107	}
1108
1109	/* Do it the slow way */
1110	kaddr = kmap(page);
1111	left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1112	kunmap(page);
1113
1114	if (left) {
1115		size -= left;
1116		desc->error = -EFAULT;
1117	}
1118success:
1119	desc->count = count - size;
1120	desc->written += size;
1121	desc->arg.buf += size;
1122	return size;
1123}
1124
1125/*
1126 * Performs necessary checks before doing a write
1127 * @iov:	io vector request
1128 * @nr_segs:	number of segments in the iovec
1129 * @count:	number of bytes to write
1130 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1131 *
1132 * Adjust number of segments and amount of bytes to write (nr_segs should be
1133 * properly initialized first). Returns appropriate error code that caller
1134 * should return or zero in case that write should be allowed.
1135 */
1136int generic_segment_checks(const struct iovec *iov,
1137			unsigned long *nr_segs, size_t *count, int access_flags)
1138{
1139	unsigned long   seg;
1140	size_t cnt = 0;
1141	for (seg = 0; seg < *nr_segs; seg++) {
1142		const struct iovec *iv = &iov[seg];
1143
1144		/*
1145		 * If any segment has a negative length, or the cumulative
1146		 * length ever wraps negative then return -EINVAL.
1147		 */
1148		cnt += iv->iov_len;
1149		if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1150			return -EINVAL;
1151		if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1152			continue;
1153		if (seg == 0)
1154			return -EFAULT;
1155		*nr_segs = seg;
1156		cnt -= iv->iov_len;	/* This segment is no good */
1157		break;
1158	}
1159	*count = cnt;
1160	return 0;
1161}
1162EXPORT_SYMBOL(generic_segment_checks);
1163
1164/**
1165 * generic_file_aio_read - generic filesystem read routine
1166 * @iocb:	kernel I/O control block
1167 * @iov:	io vector request
1168 * @nr_segs:	number of segments in the iovec
1169 * @pos:	current file position
1170 *
1171 * This is the "read()" routine for all filesystems
1172 * that can use the page cache directly.
1173 */
1174ssize_t
1175generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1176		unsigned long nr_segs, loff_t pos)
1177{
1178	struct file *filp = iocb->ki_filp;
1179	ssize_t retval;
1180	unsigned long seg;
1181	size_t count;
1182	loff_t *ppos = &iocb->ki_pos;
1183
1184	count = 0;
1185	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1186	if (retval)
1187		return retval;
1188
1189	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1190	if (filp->f_flags & O_DIRECT) {
1191		loff_t size;
1192		struct address_space *mapping;
1193		struct inode *inode;
1194
1195		mapping = filp->f_mapping;
1196		inode = mapping->host;
1197		retval = 0;
1198		if (!count)
1199			goto out; /* skip atime */
1200		size = i_size_read(inode);
1201		if (pos < size) {
1202			retval = generic_file_direct_IO(READ, iocb,
1203						iov, pos, nr_segs);
1204			if (retval > 0)
1205				*ppos = pos + retval;
1206		}
1207		if (likely(retval != 0)) {
1208			file_accessed(filp);
1209			goto out;
1210		}
1211	}
1212
1213	retval = 0;
1214	if (count) {
1215		for (seg = 0; seg < nr_segs; seg++) {
1216			read_descriptor_t desc;
1217
1218			desc.written = 0;
1219			desc.arg.buf = iov[seg].iov_base;
1220			desc.count = iov[seg].iov_len;
1221			if (desc.count == 0)
1222				continue;
1223			desc.error = 0;
1224			do_generic_file_read(filp,ppos,&desc,file_read_actor);
1225			retval += desc.written;
1226			if (desc.error) {
1227				retval = retval ?: desc.error;
1228				break;
1229			}
1230			if (desc.count > 0)
1231				break;
1232		}
1233	}
1234out:
1235	return retval;
1236}
1237EXPORT_SYMBOL(generic_file_aio_read);
1238
1239static ssize_t
1240do_readahead(struct address_space *mapping, struct file *filp,
1241	     pgoff_t index, unsigned long nr)
1242{
1243	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1244		return -EINVAL;
1245
1246	force_page_cache_readahead(mapping, filp, index,
1247					max_sane_readahead(nr));
1248	return 0;
1249}
1250
1251asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1252{
1253	ssize_t ret;
1254	struct file *file;
1255
1256	ret = -EBADF;
1257	file = fget(fd);
1258	if (file) {
1259		if (file->f_mode & FMODE_READ) {
1260			struct address_space *mapping = file->f_mapping;
1261			pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1262			pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1263			unsigned long len = end - start + 1;
1264			ret = do_readahead(mapping, file, start, len);
1265		}
1266		fput(file);
1267	}
1268	return ret;
1269}
1270
1271#ifdef CONFIG_MMU
1272/**
1273 * page_cache_read - adds requested page to the page cache if not already there
1274 * @file:	file to read
1275 * @offset:	page index
1276 *
1277 * This adds the requested page to the page cache if it isn't already there,
1278 * and schedules an I/O to read in its contents from disk.
1279 */
1280static int fastcall page_cache_read(struct file * file, pgoff_t offset)
1281{
1282	struct address_space *mapping = file->f_mapping;
1283	struct page *page;
1284	int ret;
1285
1286	do {
1287		page = page_cache_alloc_cold(mapping);
1288		if (!page)
1289			return -ENOMEM;
1290
1291		ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1292		if (ret == 0)
1293			ret = mapping->a_ops->readpage(file, page);
1294		else if (ret == -EEXIST)
1295			ret = 0; /* losing race to add is OK */
1296
1297		page_cache_release(page);
1298
1299	} while (ret == AOP_TRUNCATED_PAGE);
1300
1301	return ret;
1302}
1303
1304#define MMAP_LOTSAMISS  (100)
1305
1306/**
1307 * filemap_fault - read in file data for page fault handling
1308 * @vma:	vma in which the fault was taken
1309 * @vmf:	struct vm_fault containing details of the fault
1310 *
1311 * filemap_fault() is invoked via the vma operations vector for a
1312 * mapped memory region to read in file data during a page fault.
1313 *
1314 * The goto's are kind of ugly, but this streamlines the normal case of having
1315 * it in the page cache, and handles the special cases reasonably without
1316 * having a lot of duplicated code.
1317 */
1318int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1319{
1320	int error;
1321	struct file *file = vma->vm_file;
1322	struct address_space *mapping = file->f_mapping;
1323	struct file_ra_state *ra = &file->f_ra;
1324	struct inode *inode = mapping->host;
1325	struct page *page;
1326	unsigned long size;
1327	int did_readaround = 0;
1328	int ret = 0;
1329
1330	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1331	if (vmf->pgoff >= size)
1332		return VM_FAULT_SIGBUS;
1333
1334	/* If we don't want any read-ahead, don't bother */
1335	if (VM_RandomReadHint(vma))
1336		goto no_cached_page;
1337
1338	/*
1339	 * Do we have something in the page cache already?
1340	 */
1341retry_find:
1342	page = find_lock_page(mapping, vmf->pgoff);
1343	/*
1344	 * For sequential accesses, we use the generic readahead logic.
1345	 */
1346	if (VM_SequentialReadHint(vma)) {
1347		if (!page) {
1348			page_cache_sync_readahead(mapping, ra, file,
1349							   vmf->pgoff, 1);
1350			page = find_lock_page(mapping, vmf->pgoff);
1351			if (!page)
1352				goto no_cached_page;
1353		}
1354		if (PageReadahead(page)) {
1355			page_cache_async_readahead(mapping, ra, file, page,
1356							   vmf->pgoff, 1);
1357		}
1358	}
1359
1360	if (!page) {
1361		unsigned long ra_pages;
1362
1363		ra->mmap_miss++;
1364
1365		/*
1366		 * Do we miss much more than hit in this file? If so,
1367		 * stop bothering with read-ahead. It will only hurt.
1368		 */
1369		if (ra->mmap_miss > MMAP_LOTSAMISS)
1370			goto no_cached_page;
1371
1372		/*
1373		 * To keep the pgmajfault counter straight, we need to
1374		 * check did_readaround, as this is an inner loop.
1375		 */
1376		if (!did_readaround) {
1377			ret = VM_FAULT_MAJOR;
1378			count_vm_event(PGMAJFAULT);
1379		}
1380		did_readaround = 1;
1381		ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1382		if (ra_pages) {
1383			pgoff_t start = 0;
1384
1385			if (vmf->pgoff > ra_pages / 2)
1386				start = vmf->pgoff - ra_pages / 2;
1387			do_page_cache_readahead(mapping, file, start, ra_pages);
1388		}
1389		page = find_lock_page(mapping, vmf->pgoff);
1390		if (!page)
1391			goto no_cached_page;
1392	}
1393
1394	if (!did_readaround)
1395		ra->mmap_miss--;
1396
1397	/*
1398	 * We have a locked page in the page cache, now we need to check
1399	 * that it's up-to-date. If not, it is going to be due to an error.
1400	 */
1401	if (unlikely(!PageUptodate(page)))
1402		goto page_not_uptodate;
1403
1404	/* Must recheck i_size under page lock */
1405	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1406	if (unlikely(vmf->pgoff >= size)) {
1407		unlock_page(page);
1408		page_cache_release(page);
1409		return VM_FAULT_SIGBUS;
1410	}
1411
1412	/*
1413	 * Found the page and have a reference on it.
1414	 */
1415	mark_page_accessed(page);
1416	ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
1417	vmf->page = page;
1418	return ret | VM_FAULT_LOCKED;
1419
1420no_cached_page:
1421	/*
1422	 * We're only likely to ever get here if MADV_RANDOM is in
1423	 * effect.
1424	 */
1425	error = page_cache_read(file, vmf->pgoff);
1426
1427	/*
1428	 * The page we want has now been added to the page cache.
1429	 * In the unlikely event that someone removed it in the
1430	 * meantime, we'll just come back here and read it again.
1431	 */
1432	if (error >= 0)
1433		goto retry_find;
1434
1435	/*
1436	 * An error return from page_cache_read can result if the
1437	 * system is low on memory, or a problem occurs while trying
1438	 * to schedule I/O.
1439	 */
1440	if (error == -ENOMEM)
1441		return VM_FAULT_OOM;
1442	return VM_FAULT_SIGBUS;
1443
1444page_not_uptodate:
1445	/* IO error path */
1446	if (!did_readaround) {
1447		ret = VM_FAULT_MAJOR;
1448		count_vm_event(PGMAJFAULT);
1449	}
1450
1451	/*
1452	 * Umm, take care of errors if the page isn't up-to-date.
1453	 * Try to re-read it _once_. We do this synchronously,
1454	 * because there really aren't any performance issues here
1455	 * and we need to check for errors.
1456	 */
1457	ClearPageError(page);
1458	error = mapping->a_ops->readpage(file, page);
1459	page_cache_release(page);
1460
1461	if (!error || error == AOP_TRUNCATED_PAGE)
1462		goto retry_find;
1463
1464	/* Things didn't work out. Return zero to tell the mm layer so. */
1465	shrink_readahead_size_eio(file, ra);
1466	return VM_FAULT_SIGBUS;
1467}
1468EXPORT_SYMBOL(filemap_fault);
1469
1470struct vm_operations_struct generic_file_vm_ops = {
1471	.fault		= filemap_fault,
1472};
1473
1474/* This is used for a general mmap of a disk file */
1475
1476int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1477{
1478	struct address_space *mapping = file->f_mapping;
1479
1480	if (!mapping->a_ops->readpage)
1481		return -ENOEXEC;
1482	file_accessed(file);
1483	vma->vm_ops = &generic_file_vm_ops;
1484	vma->vm_flags |= VM_CAN_NONLINEAR;
1485	return 0;
1486}
1487
1488/*
1489 * This is for filesystems which do not implement ->writepage.
1490 */
1491int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1492{
1493	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1494		return -EINVAL;
1495	return generic_file_mmap(file, vma);
1496}
1497#else
1498int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1499{
1500	return -ENOSYS;
1501}
1502int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1503{
1504	return -ENOSYS;
1505}
1506#endif /* CONFIG_MMU */
1507
1508EXPORT_SYMBOL(generic_file_mmap);
1509EXPORT_SYMBOL(generic_file_readonly_mmap);
1510
1511static struct page *__read_cache_page(struct address_space *mapping,
1512				pgoff_t index,
1513				int (*filler)(void *,struct page*),
1514				void *data)
1515{
1516	struct page *page;
1517	int err;
1518repeat:
1519	page = find_get_page(mapping, index);
1520	if (!page) {
1521		page = page_cache_alloc_cold(mapping);
1522		if (!page)
1523			return ERR_PTR(-ENOMEM);
1524		err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1525		if (unlikely(err)) {
1526			page_cache_release(page);
1527			if (err == -EEXIST)
1528				goto repeat;
1529			/* Presumably ENOMEM for radix tree node */
1530			return ERR_PTR(err);
1531		}
1532		err = filler(data, page);
1533		if (err < 0) {
1534			page_cache_release(page);
1535			page = ERR_PTR(err);
1536		}
1537	}
1538	return page;
1539}
1540
1541/*
1542 * Same as read_cache_page, but don't wait for page to become unlocked
1543 * after submitting it to the filler.
1544 */
1545struct page *read_cache_page_async(struct address_space *mapping,
1546				pgoff_t index,
1547				int (*filler)(void *,struct page*),
1548				void *data)
1549{
1550	struct page *page;
1551	int err;
1552
1553retry:
1554	page = __read_cache_page(mapping, index, filler, data);
1555	if (IS_ERR(page))
1556		return page;
1557	if (PageUptodate(page))
1558		goto out;
1559
1560	lock_page(page);
1561	if (!page->mapping) {
1562		unlock_page(page);
1563		page_cache_release(page);
1564		goto retry;
1565	}
1566	if (PageUptodate(page)) {
1567		unlock_page(page);
1568		goto out;
1569	}
1570	err = filler(data, page);
1571	if (err < 0) {
1572		page_cache_release(page);
1573		return ERR_PTR(err);
1574	}
1575out:
1576	mark_page_accessed(page);
1577	return page;
1578}
1579EXPORT_SYMBOL(read_cache_page_async);
1580
1581/**
1582 * read_cache_page - read into page cache, fill it if needed
1583 * @mapping:	the page's address_space
1584 * @index:	the page index
1585 * @filler:	function to perform the read
1586 * @data:	destination for read data
1587 *
1588 * Read into the page cache. If a page already exists, and PageUptodate() is
1589 * not set, try to fill the page then wait for it to become unlocked.
1590 *
1591 * If the page does not get brought uptodate, return -EIO.
1592 */
1593struct page *read_cache_page(struct address_space *mapping,
1594				pgoff_t index,
1595				int (*filler)(void *,struct page*),
1596				void *data)
1597{
1598	struct page *page;
1599
1600	page = read_cache_page_async(mapping, index, filler, data);
1601	if (IS_ERR(page))
1602		goto out;
1603	wait_on_page_locked(page);
1604	if (!PageUptodate(page)) {
1605		page_cache_release(page);
1606		page = ERR_PTR(-EIO);
1607	}
1608 out:
1609	return page;
1610}
1611EXPORT_SYMBOL(read_cache_page);
1612
1613/*
1614 * The logic we want is
1615 *
1616 *	if suid or (sgid and xgrp)
1617 *		remove privs
1618 */
1619int should_remove_suid(struct dentry *dentry)
1620{
1621	mode_t mode = dentry->d_inode->i_mode;
1622	int kill = 0;
1623
1624	/* suid always must be killed */
1625	if (unlikely(mode & S_ISUID))
1626		kill = ATTR_KILL_SUID;
1627
1628	/*
1629	 * sgid without any exec bits is just a mandatory locking mark; leave
1630	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1631	 */
1632	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1633		kill |= ATTR_KILL_SGID;
1634
1635	if (unlikely(kill && !capable(CAP_FSETID)))
1636		return kill;
1637
1638	return 0;
1639}
1640EXPORT_SYMBOL(should_remove_suid);
1641
1642int __remove_suid(struct dentry *dentry, int kill)
1643{
1644	struct iattr newattrs;
1645
1646	newattrs.ia_valid = ATTR_FORCE | kill;
1647	return notify_change(dentry, &newattrs);
1648}
1649
1650int remove_suid(struct dentry *dentry)
1651{
1652	int killsuid = should_remove_suid(dentry);
1653	int killpriv = security_inode_need_killpriv(dentry);
1654	int error = 0;
1655
1656	if (killpriv < 0)
1657		return killpriv;
1658	if (killpriv)
1659		error = security_inode_killpriv(dentry);
1660	if (!error && killsuid)
1661		error = __remove_suid(dentry, killsuid);
1662
1663	return error;
1664}
1665EXPORT_SYMBOL(remove_suid);
1666
1667static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1668			const struct iovec *iov, size_t base, size_t bytes)
1669{
1670	size_t copied = 0, left = 0;
1671
1672	while (bytes) {
1673		char __user *buf = iov->iov_base + base;
1674		int copy = min(bytes, iov->iov_len - base);
1675
1676		base = 0;
1677		left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
1678		copied += copy;
1679		bytes -= copy;
1680		vaddr += copy;
1681		iov++;
1682
1683		if (unlikely(left))
1684			break;
1685	}
1686	return copied - left;
1687}
1688
1689/*
1690 * Copy as much as we can into the page and return the number of bytes which
1691 * were sucessfully copied.  If a fault is encountered then return the number of
1692 * bytes which were copied.
1693 */
1694size_t iov_iter_copy_from_user_atomic(struct page *page,
1695		struct iov_iter *i, unsigned long offset, size_t bytes)
1696{
1697	char *kaddr;
1698	size_t copied;
1699
1700	BUG_ON(!in_atomic());
1701	kaddr = kmap_atomic(page, KM_USER0);
1702	if (likely(i->nr_segs == 1)) {
1703		int left;
1704		char __user *buf = i->iov->iov_base + i->iov_offset;
1705		left = __copy_from_user_inatomic_nocache(kaddr + offset,
1706							buf, bytes);
1707		copied = bytes - left;
1708	} else {
1709		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1710						i->iov, i->iov_offset, bytes);
1711	}
1712	kunmap_atomic(kaddr, KM_USER0);
1713
1714	return copied;
1715}
1716EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1717
1718/*
1719 * This has the same sideeffects and return value as
1720 * iov_iter_copy_from_user_atomic().
1721 * The difference is that it attempts to resolve faults.
1722 * Page must not be locked.
1723 */
1724size_t iov_iter_copy_from_user(struct page *page,
1725		struct iov_iter *i, unsigned long offset, size_t bytes)
1726{
1727	char *kaddr;
1728	size_t copied;
1729
1730	kaddr = kmap(page);
1731	if (likely(i->nr_segs == 1)) {
1732		int left;
1733		char __user *buf = i->iov->iov_base + i->iov_offset;
1734		left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
1735		copied = bytes - left;
1736	} else {
1737		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1738						i->iov, i->iov_offset, bytes);
1739	}
1740	kunmap(page);
1741	return copied;
1742}
1743EXPORT_SYMBOL(iov_iter_copy_from_user);
1744
1745static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
1746{
1747	if (likely(i->nr_segs == 1)) {
1748		i->iov_offset += bytes;
1749	} else {
1750		const struct iovec *iov = i->iov;
1751		size_t base = i->iov_offset;
1752
1753		/*
1754		 * The !iov->iov_len check ensures we skip over unlikely
1755		 * zero-length segments.
1756		 */
1757		while (bytes || !iov->iov_len) {
1758			int copy = min(bytes, iov->iov_len - base);
1759
1760			bytes -= copy;
1761			base += copy;
1762			if (iov->iov_len == base) {
1763				iov++;
1764				base = 0;
1765			}
1766		}
1767		i->iov = iov;
1768		i->iov_offset = base;
1769	}
1770}
1771
1772void iov_iter_advance(struct iov_iter *i, size_t bytes)
1773{
1774	BUG_ON(i->count < bytes);
1775
1776	__iov_iter_advance_iov(i, bytes);
1777	i->count -= bytes;
1778}
1779EXPORT_SYMBOL(iov_iter_advance);
1780
1781/*
1782 * Fault in the first iovec of the given iov_iter, to a maximum length
1783 * of bytes. Returns 0 on success, or non-zero if the memory could not be
1784 * accessed (ie. because it is an invalid address).
1785 *
1786 * writev-intensive code may want this to prefault several iovecs -- that
1787 * would be possible (callers must not rely on the fact that _only_ the
1788 * first iovec will be faulted with the current implementation).
1789 */
1790int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
1791{
1792	char __user *buf = i->iov->iov_base + i->iov_offset;
1793	bytes = min(bytes, i->iov->iov_len - i->iov_offset);
1794	return fault_in_pages_readable(buf, bytes);
1795}
1796EXPORT_SYMBOL(iov_iter_fault_in_readable);
1797
1798/*
1799 * Return the count of just the current iov_iter segment.
1800 */
1801size_t iov_iter_single_seg_count(struct iov_iter *i)
1802{
1803	const struct iovec *iov = i->iov;
1804	if (i->nr_segs == 1)
1805		return i->count;
1806	else
1807		return min(i->count, iov->iov_len - i->iov_offset);
1808}
1809EXPORT_SYMBOL(iov_iter_single_seg_count);
1810
1811/*
1812 * Performs necessary checks before doing a write
1813 *
1814 * Can adjust writing position or amount of bytes to write.
1815 * Returns appropriate error code that caller should return or
1816 * zero in case that write should be allowed.
1817 */
1818inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1819{
1820	struct inode *inode = file->f_mapping->host;
1821	unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1822
1823        if (unlikely(*pos < 0))
1824                return -EINVAL;
1825
1826	if (!isblk) {
1827		/* FIXME: this is for backwards compatibility with 2.4 */
1828		if (file->f_flags & O_APPEND)
1829                        *pos = i_size_read(inode);
1830
1831		if (limit != RLIM_INFINITY) {
1832			if (*pos >= limit) {
1833				send_sig(SIGXFSZ, current, 0);
1834				return -EFBIG;
1835			}
1836			if (*count > limit - (typeof(limit))*pos) {
1837				*count = limit - (typeof(limit))*pos;
1838			}
1839		}
1840	}
1841
1842	/*
1843	 * LFS rule
1844	 */
1845	if (unlikely(*pos + *count > MAX_NON_LFS &&
1846				!(file->f_flags & O_LARGEFILE))) {
1847		if (*pos >= MAX_NON_LFS) {
1848			return -EFBIG;
1849		}
1850		if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1851			*count = MAX_NON_LFS - (unsigned long)*pos;
1852		}
1853	}
1854
1855	/*
1856	 * Are we about to exceed the fs block limit ?
1857	 *
1858	 * If we have written data it becomes a short write.  If we have
1859	 * exceeded without writing data we send a signal and return EFBIG.
1860	 * Linus frestrict idea will clean these up nicely..
1861	 */
1862	if (likely(!isblk)) {
1863		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1864			if (*count || *pos > inode->i_sb->s_maxbytes) {
1865				return -EFBIG;
1866			}
1867			/* zero-length writes at ->s_maxbytes are OK */
1868		}
1869
1870		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1871			*count = inode->i_sb->s_maxbytes - *pos;
1872	} else {
1873#ifdef CONFIG_BLOCK
1874		loff_t isize;
1875		if (bdev_read_only(I_BDEV(inode)))
1876			return -EPERM;
1877		isize = i_size_read(inode);
1878		if (*pos >= isize) {
1879			if (*count || *pos > isize)
1880				return -ENOSPC;
1881		}
1882
1883		if (*pos + *count > isize)
1884			*count = isize - *pos;
1885#else
1886		return -EPERM;
1887#endif
1888	}
1889	return 0;
1890}
1891EXPORT_SYMBOL(generic_write_checks);
1892
1893int pagecache_write_begin(struct file *file, struct address_space *mapping,
1894				loff_t pos, unsigned len, unsigned flags,
1895				struct page **pagep, void **fsdata)
1896{
1897	const struct address_space_operations *aops = mapping->a_ops;
1898
1899	if (aops->write_begin) {
1900		return aops->write_begin(file, mapping, pos, len, flags,
1901							pagep, fsdata);
1902	} else {
1903		int ret;
1904		pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1905		unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1906		struct inode *inode = mapping->host;
1907		struct page *page;
1908again:
1909		page = __grab_cache_page(mapping, index);
1910		*pagep = page;
1911		if (!page)
1912			return -ENOMEM;
1913
1914		if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
1915			/*
1916			 * There is no way to resolve a short write situation
1917			 * for a !Uptodate page (except by double copying in
1918			 * the caller done by generic_perform_write_2copy).
1919			 *
1920			 * Instead, we have to bring it uptodate here.
1921			 */
1922			ret = aops->readpage(file, page);
1923			page_cache_release(page);
1924			if (ret) {
1925				if (ret == AOP_TRUNCATED_PAGE)
1926					goto again;
1927				return ret;
1928			}
1929			goto again;
1930		}
1931
1932		ret = aops->prepare_write(file, page, offset, offset+len);
1933		if (ret) {
1934			unlock_page(page);
1935			page_cache_release(page);
1936			if (pos + len > inode->i_size)
1937				vmtruncate(inode, inode->i_size);
1938		}
1939		return ret;
1940	}
1941}
1942EXPORT_SYMBOL(pagecache_write_begin);
1943
1944int pagecache_write_end(struct file *file, struct address_space *mapping,
1945				loff_t pos, unsigned len, unsigned copied,
1946				struct page *page, void *fsdata)
1947{
1948	const struct address_space_operations *aops = mapping->a_ops;
1949	int ret;
1950
1951	if (aops->write_end) {
1952		mark_page_accessed(page);
1953		ret = aops->write_end(file, mapping, pos, len, copied,
1954							page, fsdata);
1955	} else {
1956		unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1957		struct inode *inode = mapping->host;
1958
1959		flush_dcache_page(page);
1960		ret = aops->commit_write(file, page, offset, offset+len);
1961		unlock_page(page);
1962		mark_page_accessed(page);
1963		page_cache_release(page);
1964
1965		if (ret < 0) {
1966			if (pos + len > inode->i_size)
1967				vmtruncate(inode, inode->i_size);
1968		} else if (ret > 0)
1969			ret = min_t(size_t, copied, ret);
1970		else
1971			ret = copied;
1972	}
1973
1974	return ret;
1975}
1976EXPORT_SYMBOL(pagecache_write_end);
1977
1978ssize_t
1979generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1980		unsigned long *nr_segs, loff_t pos, loff_t *ppos,
1981		size_t count, size_t ocount)
1982{
1983	struct file	*file = iocb->ki_filp;
1984	struct address_space *mapping = file->f_mapping;
1985	struct inode	*inode = mapping->host;
1986	ssize_t		written;
1987
1988	if (count != ocount)
1989		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
1990
1991	written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
1992	if (written > 0) {
1993		loff_t end = pos + written;
1994		if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
1995			i_size_write(inode,  end);
1996			mark_inode_dirty(inode);
1997		}
1998		*ppos = end;
1999	}
2000
2001	/*
2002	 * Sync the fs metadata but not the minor inode changes and
2003	 * of course not the data as we did direct DMA for the IO.
2004	 * i_mutex is held, which protects generic_osync_inode() from
2005	 * livelocking.  AIO O_DIRECT ops attempt to sync metadata here.
2006	 */
2007	if ((written >= 0 || written == -EIOCBQUEUED) &&
2008	    ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2009		int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
2010		if (err < 0)
2011			written = err;
2012	}
2013	return written;
2014}
2015EXPORT_SYMBOL(generic_file_direct_write);
2016
2017/*
2018 * Find or create a page at the given pagecache position. Return the locked
2019 * page. This function is specifically for buffered writes.
2020 */
2021struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
2022{
2023	int status;
2024	struct page *page;
2025repeat:
2026	page = find_lock_page(mapping, index);
2027	if (likely(page))
2028		return page;
2029
2030	page = page_cache_alloc(mapping);
2031	if (!page)
2032		return NULL;
2033	status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
2034	if (unlikely(status)) {
2035		page_cache_release(page);
2036		if (status == -EEXIST)
2037			goto repeat;
2038		return NULL;
2039	}
2040	return page;
2041}
2042EXPORT_SYMBOL(__grab_cache_page);
2043
2044static ssize_t generic_perform_write_2copy(struct file *file,
2045				struct iov_iter *i, loff_t pos)
2046{
2047	struct address_space *mapping = file->f_mapping;
2048	const struct address_space_operations *a_ops = mapping->a_ops;
2049	struct inode *inode = mapping->host;
2050	long status = 0;
2051	ssize_t written = 0;
2052
2053	do {
2054		struct page *src_page;
2055		struct page *page;
2056		pgoff_t index;		/* Pagecache index for current page */
2057		unsigned long offset;	/* Offset into pagecache page */
2058		unsigned long bytes;	/* Bytes to write to page */
2059		size_t copied;		/* Bytes copied from user */
2060
2061		offset = (pos & (PAGE_CACHE_SIZE - 1));
2062		index = pos >> PAGE_CACHE_SHIFT;
2063		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2064						iov_iter_count(i));
2065
2066		/*
2067		 * a non-NULL src_page indicates that we're doing the
2068		 * copy via get_user_pages and kmap.
2069		 */
2070		src_page = NULL;
2071
2072		/*
2073		 * Bring in the user page that we will copy from _first_.
2074		 * Otherwise there's a nasty deadlock on copying from the
2075		 * same page as we're writing to, without it being marked
2076		 * up-to-date.
2077		 *
2078		 * Not only is this an optimisation, but it is also required
2079		 * to check that the address is actually valid, when atomic
2080		 * usercopies are used, below.
2081		 */
2082		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2083			status = -EFAULT;
2084			break;
2085		}
2086
2087		page = __grab_cache_page(mapping, index);
2088		if (!page) {
2089			status = -ENOMEM;
2090			break;
2091		}
2092
2093		/*
2094		 * non-uptodate pages cannot cope with short copies, and we
2095		 * cannot take a pagefault with the destination page locked.
2096		 * So pin the source page to copy it.
2097		 */
2098		if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
2099			unlock_page(page);
2100
2101			src_page = alloc_page(GFP_KERNEL);
2102			if (!src_page) {
2103				page_cache_release(page);
2104				status = -ENOMEM;
2105				break;
2106			}
2107
2108			/*
2109			 * Cannot get_user_pages with a page locked for the
2110			 * same reason as we can't take a page fault with a
2111			 * page locked (as explained below).
2112			 */
2113			copied = iov_iter_copy_from_user(src_page, i,
2114								offset, bytes);
2115			if (unlikely(copied == 0)) {
2116				status = -EFAULT;
2117				page_cache_release(page);
2118				page_cache_release(src_page);
2119				break;
2120			}
2121			bytes = copied;
2122
2123			lock_page(page);
2124			/*
2125			 * Can't handle the page going uptodate here, because
2126			 * that means we would use non-atomic usercopies, which
2127			 * zero out the tail of the page, which can cause
2128			 * zeroes to become transiently visible. We could just
2129			 * use a non-zeroing copy, but the APIs aren't too
2130			 * consistent.
2131			 */
2132			if (unlikely(!page->mapping || PageUptodate(page))) {
2133				unlock_page(page);
2134				page_cache_release(page);
2135				page_cache_release(src_page);
2136				continue;
2137			}
2138		}
2139
2140		status = a_ops->prepare_write(file, page, offset, offset+bytes);
2141		if (unlikely(status))
2142			goto fs_write_aop_error;
2143
2144		if (!src_page) {
2145			/*
2146			 * Must not enter the pagefault handler here, because
2147			 * we hold the page lock, so we might recursively
2148			 * deadlock on the same lock, or get an ABBA deadlock
2149			 * against a different lock, or against the mmap_sem
2150			 * (which nests outside the page lock).  So increment
2151			 * preempt count, and use _atomic usercopies.
2152			 *
2153			 * The page is uptodate so we are OK to encounter a
2154			 * short copy: if unmodified parts of the page are
2155			 * marked dirty and written out to disk, it doesn't
2156			 * really matter.
2157			 */
2158			pagefault_disable();
2159			copied = iov_iter_copy_from_user_atomic(page, i,
2160								offset, bytes);
2161			pagefault_enable();
2162		} else {
2163			void *src, *dst;
2164			src = kmap_atomic(src_page, KM_USER0);
2165			dst = kmap_atomic(page, KM_USER1);
2166			memcpy(dst + offset, src + offset, bytes);
2167			kunmap_atomic(dst, KM_USER1);
2168			kunmap_atomic(src, KM_USER0);
2169			copied = bytes;
2170		}
2171		flush_dcache_page(page);
2172
2173		status = a_ops->commit_write(file, page, offset, offset+bytes);
2174		if (unlikely(status < 0))
2175			goto fs_write_aop_error;
2176		if (unlikely(status > 0)) /* filesystem did partial write */
2177			copied = min_t(size_t, copied, status);
2178
2179		unlock_page(page);
2180		mark_page_accessed(page);
2181		page_cache_release(page);
2182		if (src_page)
2183			page_cache_release(src_page);
2184
2185		iov_iter_advance(i, copied);
2186		pos += copied;
2187		written += copied;
2188
2189		balance_dirty_pages_ratelimited(mapping);
2190		cond_resched();
2191		continue;
2192
2193fs_write_aop_error:
2194		unlock_page(page);
2195		page_cache_release(page);
2196		if (src_page)
2197			page_cache_release(src_page);
2198
2199		/*
2200		 * prepare_write() may have instantiated a few blocks
2201		 * outside i_size.  Trim these off again. Don't need
2202		 * i_size_read because we hold i_mutex.
2203		 */
2204		if (pos + bytes > inode->i_size)
2205			vmtruncate(inode, inode->i_size);
2206		break;
2207	} while (iov_iter_count(i));
2208
2209	return written ? written : status;
2210}
2211
2212static ssize_t generic_perform_write(struct file *file,
2213				struct iov_iter *i, loff_t pos)
2214{
2215	struct address_space *mapping = file->f_mapping;
2216	const struct address_space_operations *a_ops = mapping->a_ops;
2217	long status = 0;
2218	ssize_t written = 0;
2219	unsigned int flags = 0;
2220
2221	/*
2222	 * Copies from kernel address space cannot fail (NFSD is a big user).
2223	 */
2224	if (segment_eq(get_fs(), KERNEL_DS))
2225		flags |= AOP_FLAG_UNINTERRUPTIBLE;
2226
2227	do {
2228		struct page *page;
2229		pgoff_t index;		/* Pagecache index for current page */
2230		unsigned long offset;	/* Offset into pagecache page */
2231		unsigned long bytes;	/* Bytes to write to page */
2232		size_t copied;		/* Bytes copied from user */
2233		void *fsdata;
2234
2235		offset = (pos & (PAGE_CACHE_SIZE - 1));
2236		index = pos >> PAGE_CACHE_SHIFT;
2237		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2238						iov_iter_count(i));
2239
2240again:
2241
2242		/*
2243		 * Bring in the user page that we will copy from _first_.
2244		 * Otherwise there's a nasty deadlock on copying from the
2245		 * same page as we're writing to, without it being marked
2246		 * up-to-date.
2247		 *
2248		 * Not only is this an optimisation, but it is also required
2249		 * to check that the address is actually valid, when atomic
2250		 * usercopies are used, below.
2251		 */
2252		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2253			status = -EFAULT;
2254			break;
2255		}
2256
2257		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2258						&page, &fsdata);
2259		if (unlikely(status))
2260			break;
2261
2262		pagefault_disable();
2263		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2264		pagefault_enable();
2265		flush_dcache_page(page);
2266
2267		status = a_ops->write_end(file, mapping, pos, bytes, copied,
2268						page, fsdata);
2269		if (unlikely(status < 0))
2270			break;
2271		copied = status;
2272
2273		cond_resched();
2274
2275		iov_iter_advance(i, copied);
2276		if (unlikely(copied == 0)) {
2277			/*
2278			 * If we were unable to copy any data at all, we must
2279			 * fall back to a single segment length write.
2280			 *
2281			 * If we didn't fallback here, we could livelock
2282			 * because not all segments in the iov can be copied at
2283			 * once without a pagefault.
2284			 */
2285			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2286						iov_iter_single_seg_count(i));
2287			goto again;
2288		}
2289		pos += copied;
2290		written += copied;
2291
2292		balance_dirty_pages_ratelimited(mapping);
2293
2294	} while (iov_iter_count(i));
2295
2296	return written ? written : status;
2297}
2298
2299ssize_t
2300generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2301		unsigned long nr_segs, loff_t pos, loff_t *ppos,
2302		size_t count, ssize_t written)
2303{
2304	struct file *file = iocb->ki_filp;
2305	struct address_space *mapping = file->f_mapping;
2306	const struct address_space_operations *a_ops = mapping->a_ops;
2307	struct inode *inode = mapping->host;
2308	ssize_t status;
2309	struct iov_iter i;
2310
2311	iov_iter_init(&i, iov, nr_segs, count, written);
2312	if (a_ops->write_begin)
2313		status = generic_perform_write(file, &i, pos);
2314	else
2315		status = generic_perform_write_2copy(file, &i, pos);
2316
2317	if (likely(status >= 0)) {
2318		written += status;
2319		*ppos = pos + status;
2320
2321		/*
2322		 * For now, when the user asks for O_SYNC, we'll actually give
2323		 * O_DSYNC
2324		 */
2325		if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2326			if (!a_ops->writepage || !is_sync_kiocb(iocb))
2327				status = generic_osync_inode(inode, mapping,
2328						OSYNC_METADATA|OSYNC_DATA);
2329		}
2330  	}
2331
2332	/*
2333	 * If we get here for O_DIRECT writes then we must have fallen through
2334	 * to buffered writes (block instantiation inside i_size).  So we sync
2335	 * the file data here, to try to honour O_DIRECT expectations.
2336	 */
2337	if (unlikely(file->f_flags & O_DIRECT) && written)
2338		status = filemap_write_and_wait(mapping);
2339
2340	return written ? written : status;
2341}
2342EXPORT_SYMBOL(generic_file_buffered_write);
2343
2344static ssize_t
2345__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2346				unsigned long nr_segs, loff_t *ppos)
2347{
2348	struct file *file = iocb->ki_filp;
2349	struct address_space * mapping = file->f_mapping;
2350	size_t ocount;		/* original count */
2351	size_t count;		/* after file limit checks */
2352	struct inode 	*inode = mapping->host;
2353	loff_t		pos;
2354	ssize_t		written;
2355	ssize_t		err;
2356
2357	ocount = 0;
2358	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2359	if (err)
2360		return err;
2361
2362	count = ocount;
2363	pos = *ppos;
2364
2365	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2366
2367	/* We can write back this queue in page reclaim */
2368	current->backing_dev_info = mapping->backing_dev_info;
2369	written = 0;
2370
2371	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2372	if (err)
2373		goto out;
2374
2375	if (count == 0)
2376		goto out;
2377
2378	err = remove_suid(file->f_path.dentry);
2379	if (err)
2380		goto out;
2381
2382	file_update_time(file);
2383
2384	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2385	if (unlikely(file->f_flags & O_DIRECT)) {
2386		loff_t endbyte;
2387		ssize_t written_buffered;
2388
2389		written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2390							ppos, count, ocount);
2391		if (written < 0 || written == count)
2392			goto out;
2393		/*
2394		 * direct-io write to a hole: fall through to buffered I/O
2395		 * for completing the rest of the request.
2396		 */
2397		pos += written;
2398		count -= written;
2399		written_buffered = generic_file_buffered_write(iocb, iov,
2400						nr_segs, pos, ppos, count,
2401						written);
2402		/*
2403		 * If generic_file_buffered_write() retuned a synchronous error
2404		 * then we want to return the number of bytes which were
2405		 * direct-written, or the error code if that was zero.  Note
2406		 * that this differs from normal direct-io semantics, which
2407		 * will return -EFOO even if some bytes were written.
2408		 */
2409		if (written_buffered < 0) {
2410			err = written_buffered;
2411			goto out;
2412		}
2413
2414		/*
2415		 * We need to ensure that the page cache pages are written to
2416		 * disk and invalidated to preserve the expected O_DIRECT
2417		 * semantics.
2418		 */
2419		endbyte = pos + written_buffered - written - 1;
2420		err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
2421					    SYNC_FILE_RANGE_WAIT_BEFORE|
2422					    SYNC_FILE_RANGE_WRITE|
2423					    SYNC_FILE_RANGE_WAIT_AFTER);
2424		if (err == 0) {
2425			written = written_buffered;
2426			invalidate_mapping_pages(mapping,
2427						 pos >> PAGE_CACHE_SHIFT,
2428						 endbyte >> PAGE_CACHE_SHIFT);
2429		} else {
2430			/*
2431			 * We don't know how much we wrote, so just return
2432			 * the number of bytes which were direct-written
2433			 */
2434		}
2435	} else {
2436		written = generic_file_buffered_write(iocb, iov, nr_segs,
2437				pos, ppos, count, written);
2438	}
2439out:
2440	current->backing_dev_info = NULL;
2441	return written ? written : err;
2442}
2443
2444ssize_t generic_file_aio_write_nolock(struct kiocb *iocb,
2445		const struct iovec *iov, unsigned long nr_segs, loff_t pos)
2446{
2447	struct file *file = iocb->ki_filp;
2448	struct address_space *mapping = file->f_mapping;
2449	struct inode *inode = mapping->host;
2450	ssize_t ret;
2451
2452	BUG_ON(iocb->ki_pos != pos);
2453
2454	ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
2455			&iocb->ki_pos);
2456
2457	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2458		ssize_t err;
2459
2460		err = sync_page_range_nolock(inode, mapping, pos, ret);
2461		if (err < 0)
2462			ret = err;
2463	}
2464	return ret;
2465}
2466EXPORT_SYMBOL(generic_file_aio_write_nolock);
2467
2468ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2469		unsigned long nr_segs, loff_t pos)
2470{
2471	struct file *file = iocb->ki_filp;
2472	struct address_space *mapping = file->f_mapping;
2473	struct inode *inode = mapping->host;
2474	ssize_t ret;
2475
2476	BUG_ON(iocb->ki_pos != pos);
2477
2478	mutex_lock(&inode->i_mutex);
2479	ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
2480			&iocb->ki_pos);
2481	mutex_unlock(&inode->i_mutex);
2482
2483	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2484		ssize_t err;
2485
2486		err = sync_page_range(inode, mapping, pos, ret);
2487		if (err < 0)
2488			ret = err;
2489	}
2490	return ret;
2491}
2492EXPORT_SYMBOL(generic_file_aio_write);
2493
2494/*
2495 * Called under i_mutex for writes to S_ISREG files.   Returns -EIO if something
2496 * went wrong during pagecache shootdown.
2497 */
2498static ssize_t
2499generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2500	loff_t offset, unsigned long nr_segs)
2501{
2502	struct file *file = iocb->ki_filp;
2503	struct address_space *mapping = file->f_mapping;
2504	ssize_t retval;
2505	size_t write_len;
2506	pgoff_t end = 0; /* silence gcc */
2507
2508	/*
2509	 * If it's a write, unmap all mmappings of the file up-front.  This
2510	 * will cause any pte dirty bits to be propagated into the pageframes
2511	 * for the subsequent filemap_write_and_wait().
2512	 */
2513	if (rw == WRITE) {
2514		write_len = iov_length(iov, nr_segs);
2515		end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
2516	       	if (mapping_mapped(mapping))
2517			unmap_mapping_range(mapping, offset, write_len, 0);
2518	}
2519
2520	retval = filemap_write_and_wait(mapping);
2521	if (retval)
2522		goto out;
2523
2524	/*
2525	 * After a write we want buffered reads to be sure to go to disk to get
2526	 * the new data.  We invalidate clean cached page from the region we're
2527	 * about to write.  We do this *before* the write so that we can return
2528	 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
2529	 */
2530	if (rw == WRITE && mapping->nrpages) {
2531		retval = invalidate_inode_pages2_range(mapping,
2532					offset >> PAGE_CACHE_SHIFT, end);
2533		if (retval)
2534			goto out;
2535	}
2536
2537	retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
2538
2539	/*
2540	 * Finally, try again to invalidate clean pages which might have been
2541	 * cached by non-direct readahead, or faulted in by get_user_pages()
2542	 * if the source of the write was an mmap'ed region of the file
2543	 * we're writing.  Either one is a pretty crazy thing to do,
2544	 * so we don't support it 100%.  If this invalidation
2545	 * fails, tough, the write still worked...
2546	 */
2547	if (rw == WRITE && mapping->nrpages) {
2548		invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
2549	}
2550out:
2551	return retval;
2552}
2553
2554/**
2555 * try_to_release_page() - release old fs-specific metadata on a page
2556 *
2557 * @page: the page which the kernel is trying to free
2558 * @gfp_mask: memory allocation flags (and I/O mode)
2559 *
2560 * The address_space is to try to release any data against the page
2561 * (presumably at page->private).  If the release was successful, return `1'.
2562 * Otherwise return zero.
2563 *
2564 * The @gfp_mask argument specifies whether I/O may be performed to release
2565 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
2566 *
2567 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
2568 */
2569int try_to_release_page(struct page *page, gfp_t gfp_mask)
2570{
2571	struct address_space * const mapping = page->mapping;
2572
2573	BUG_ON(!PageLocked(page));
2574	if (PageWriteback(page))
2575		return 0;
2576
2577	if (mapping && mapping->a_ops->releasepage)
2578		return mapping->a_ops->releasepage(page, gfp_mask);
2579	return try_to_free_buffers(page);
2580}
2581
2582EXPORT_SYMBOL(try_to_release_page);
2583