shmem.c revision 6d9d88d07e132259c35f9493b15429e19198489c
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 *		 2000 Transmeta Corp.
6 *		 2000-2001 Christoph Rohland
7 *		 2000-2001 SAP AG
8 *		 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/pagemap.h>
29#include <linux/file.h>
30#include <linux/mm.h>
31#include <linux/export.h>
32#include <linux/swap.h>
33
34static struct vfsmount *shm_mnt;
35
36#ifdef CONFIG_SHMEM
37/*
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
41 */
42
43#include <linux/xattr.h>
44#include <linux/exportfs.h>
45#include <linux/posix_acl.h>
46#include <linux/generic_acl.h>
47#include <linux/mman.h>
48#include <linux/string.h>
49#include <linux/slab.h>
50#include <linux/backing-dev.h>
51#include <linux/shmem_fs.h>
52#include <linux/writeback.h>
53#include <linux/blkdev.h>
54#include <linux/pagevec.h>
55#include <linux/percpu_counter.h>
56#include <linux/splice.h>
57#include <linux/security.h>
58#include <linux/swapops.h>
59#include <linux/mempolicy.h>
60#include <linux/namei.h>
61#include <linux/ctype.h>
62#include <linux/migrate.h>
63#include <linux/highmem.h>
64#include <linux/seq_file.h>
65#include <linux/magic.h>
66
67#include <asm/uaccess.h>
68#include <asm/pgtable.h>
69
70#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
71#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
72
73/* Pretend that each entry is of this size in directory's i_size */
74#define BOGO_DIRENT_SIZE 20
75
76/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77#define SHORT_SYMLINK_LEN 128
78
79struct shmem_xattr {
80	struct list_head list;	/* anchored by shmem_inode_info->xattr_list */
81	char *name;		/* xattr name */
82	size_t size;
83	char value[0];
84};
85
86/* Flag allocation requirements to shmem_getpage */
87enum sgp_type {
88	SGP_READ,	/* don't exceed i_size, don't allocate page */
89	SGP_CACHE,	/* don't exceed i_size, may allocate page */
90	SGP_DIRTY,	/* like SGP_CACHE, but set new page dirty */
91	SGP_WRITE,	/* may exceed i_size, may allocate page */
92};
93
94#ifdef CONFIG_TMPFS
95static unsigned long shmem_default_max_blocks(void)
96{
97	return totalram_pages / 2;
98}
99
100static unsigned long shmem_default_max_inodes(void)
101{
102	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
103}
104#endif
105
106static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
107	struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
108
109static inline int shmem_getpage(struct inode *inode, pgoff_t index,
110	struct page **pagep, enum sgp_type sgp, int *fault_type)
111{
112	return shmem_getpage_gfp(inode, index, pagep, sgp,
113			mapping_gfp_mask(inode->i_mapping), fault_type);
114}
115
116static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
117{
118	return sb->s_fs_info;
119}
120
121/*
122 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
123 * for shared memory and for shared anonymous (/dev/zero) mappings
124 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
125 * consistent with the pre-accounting of private mappings ...
126 */
127static inline int shmem_acct_size(unsigned long flags, loff_t size)
128{
129	return (flags & VM_NORESERVE) ?
130		0 : security_vm_enough_memory_kern(VM_ACCT(size));
131}
132
133static inline void shmem_unacct_size(unsigned long flags, loff_t size)
134{
135	if (!(flags & VM_NORESERVE))
136		vm_unacct_memory(VM_ACCT(size));
137}
138
139/*
140 * ... whereas tmpfs objects are accounted incrementally as
141 * pages are allocated, in order to allow huge sparse files.
142 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
143 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
144 */
145static inline int shmem_acct_block(unsigned long flags)
146{
147	return (flags & VM_NORESERVE) ?
148		security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
149}
150
151static inline void shmem_unacct_blocks(unsigned long flags, long pages)
152{
153	if (flags & VM_NORESERVE)
154		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
155}
156
157static const struct super_operations shmem_ops;
158static const struct address_space_operations shmem_aops;
159static const struct file_operations shmem_file_operations;
160static const struct inode_operations shmem_inode_operations;
161static const struct inode_operations shmem_dir_inode_operations;
162static const struct inode_operations shmem_special_inode_operations;
163static const struct vm_operations_struct shmem_vm_ops;
164
165static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
166	.ra_pages	= 0,	/* No readahead */
167	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
168};
169
170static LIST_HEAD(shmem_swaplist);
171static DEFINE_MUTEX(shmem_swaplist_mutex);
172
173static int shmem_reserve_inode(struct super_block *sb)
174{
175	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
176	if (sbinfo->max_inodes) {
177		spin_lock(&sbinfo->stat_lock);
178		if (!sbinfo->free_inodes) {
179			spin_unlock(&sbinfo->stat_lock);
180			return -ENOSPC;
181		}
182		sbinfo->free_inodes--;
183		spin_unlock(&sbinfo->stat_lock);
184	}
185	return 0;
186}
187
188static void shmem_free_inode(struct super_block *sb)
189{
190	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
191	if (sbinfo->max_inodes) {
192		spin_lock(&sbinfo->stat_lock);
193		sbinfo->free_inodes++;
194		spin_unlock(&sbinfo->stat_lock);
195	}
196}
197
198/**
199 * shmem_recalc_inode - recalculate the block usage of an inode
200 * @inode: inode to recalc
201 *
202 * We have to calculate the free blocks since the mm can drop
203 * undirtied hole pages behind our back.
204 *
205 * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
206 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
207 *
208 * It has to be called with the spinlock held.
209 */
210static void shmem_recalc_inode(struct inode *inode)
211{
212	struct shmem_inode_info *info = SHMEM_I(inode);
213	long freed;
214
215	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
216	if (freed > 0) {
217		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
218		if (sbinfo->max_blocks)
219			percpu_counter_add(&sbinfo->used_blocks, -freed);
220		info->alloced -= freed;
221		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
222		shmem_unacct_blocks(info->flags, freed);
223	}
224}
225
226/*
227 * Replace item expected in radix tree by a new item, while holding tree lock.
228 */
229static int shmem_radix_tree_replace(struct address_space *mapping,
230			pgoff_t index, void *expected, void *replacement)
231{
232	void **pslot;
233	void *item = NULL;
234
235	VM_BUG_ON(!expected);
236	pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
237	if (pslot)
238		item = radix_tree_deref_slot_protected(pslot,
239							&mapping->tree_lock);
240	if (item != expected)
241		return -ENOENT;
242	if (replacement)
243		radix_tree_replace_slot(pslot, replacement);
244	else
245		radix_tree_delete(&mapping->page_tree, index);
246	return 0;
247}
248
249/*
250 * Like add_to_page_cache_locked, but error if expected item has gone.
251 */
252static int shmem_add_to_page_cache(struct page *page,
253				   struct address_space *mapping,
254				   pgoff_t index, gfp_t gfp, void *expected)
255{
256	int error = 0;
257
258	VM_BUG_ON(!PageLocked(page));
259	VM_BUG_ON(!PageSwapBacked(page));
260
261	if (!expected)
262		error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
263	if (!error) {
264		page_cache_get(page);
265		page->mapping = mapping;
266		page->index = index;
267
268		spin_lock_irq(&mapping->tree_lock);
269		if (!expected)
270			error = radix_tree_insert(&mapping->page_tree,
271							index, page);
272		else
273			error = shmem_radix_tree_replace(mapping, index,
274							expected, page);
275		if (!error) {
276			mapping->nrpages++;
277			__inc_zone_page_state(page, NR_FILE_PAGES);
278			__inc_zone_page_state(page, NR_SHMEM);
279			spin_unlock_irq(&mapping->tree_lock);
280		} else {
281			page->mapping = NULL;
282			spin_unlock_irq(&mapping->tree_lock);
283			page_cache_release(page);
284		}
285		if (!expected)
286			radix_tree_preload_end();
287	}
288	if (error)
289		mem_cgroup_uncharge_cache_page(page);
290	return error;
291}
292
293/*
294 * Like delete_from_page_cache, but substitutes swap for page.
295 */
296static void shmem_delete_from_page_cache(struct page *page, void *radswap)
297{
298	struct address_space *mapping = page->mapping;
299	int error;
300
301	spin_lock_irq(&mapping->tree_lock);
302	error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
303	page->mapping = NULL;
304	mapping->nrpages--;
305	__dec_zone_page_state(page, NR_FILE_PAGES);
306	__dec_zone_page_state(page, NR_SHMEM);
307	spin_unlock_irq(&mapping->tree_lock);
308	page_cache_release(page);
309	BUG_ON(error);
310}
311
312/*
313 * Like find_get_pages, but collecting swap entries as well as pages.
314 */
315static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
316					pgoff_t start, unsigned int nr_pages,
317					struct page **pages, pgoff_t *indices)
318{
319	unsigned int i;
320	unsigned int ret;
321	unsigned int nr_found;
322
323	rcu_read_lock();
324restart:
325	nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
326				(void ***)pages, indices, start, nr_pages);
327	ret = 0;
328	for (i = 0; i < nr_found; i++) {
329		struct page *page;
330repeat:
331		page = radix_tree_deref_slot((void **)pages[i]);
332		if (unlikely(!page))
333			continue;
334		if (radix_tree_exception(page)) {
335			if (radix_tree_deref_retry(page))
336				goto restart;
337			/*
338			 * Otherwise, we must be storing a swap entry
339			 * here as an exceptional entry: so return it
340			 * without attempting to raise page count.
341			 */
342			goto export;
343		}
344		if (!page_cache_get_speculative(page))
345			goto repeat;
346
347		/* Has the page moved? */
348		if (unlikely(page != *((void **)pages[i]))) {
349			page_cache_release(page);
350			goto repeat;
351		}
352export:
353		indices[ret] = indices[i];
354		pages[ret] = page;
355		ret++;
356	}
357	if (unlikely(!ret && nr_found))
358		goto restart;
359	rcu_read_unlock();
360	return ret;
361}
362
363/*
364 * Remove swap entry from radix tree, free the swap and its page cache.
365 */
366static int shmem_free_swap(struct address_space *mapping,
367			   pgoff_t index, void *radswap)
368{
369	int error;
370
371	spin_lock_irq(&mapping->tree_lock);
372	error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
373	spin_unlock_irq(&mapping->tree_lock);
374	if (!error)
375		free_swap_and_cache(radix_to_swp_entry(radswap));
376	return error;
377}
378
379/*
380 * Pagevec may contain swap entries, so shuffle up pages before releasing.
381 */
382static void shmem_deswap_pagevec(struct pagevec *pvec)
383{
384	int i, j;
385
386	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
387		struct page *page = pvec->pages[i];
388		if (!radix_tree_exceptional_entry(page))
389			pvec->pages[j++] = page;
390	}
391	pvec->nr = j;
392}
393
394/*
395 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
396 */
397void shmem_unlock_mapping(struct address_space *mapping)
398{
399	struct pagevec pvec;
400	pgoff_t indices[PAGEVEC_SIZE];
401	pgoff_t index = 0;
402
403	pagevec_init(&pvec, 0);
404	/*
405	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
406	 */
407	while (!mapping_unevictable(mapping)) {
408		/*
409		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
410		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
411		 */
412		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
413					PAGEVEC_SIZE, pvec.pages, indices);
414		if (!pvec.nr)
415			break;
416		index = indices[pvec.nr - 1] + 1;
417		shmem_deswap_pagevec(&pvec);
418		check_move_unevictable_pages(pvec.pages, pvec.nr);
419		pagevec_release(&pvec);
420		cond_resched();
421	}
422}
423
424/*
425 * Remove range of pages and swap entries from radix tree, and free them.
426 */
427void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
428{
429	struct address_space *mapping = inode->i_mapping;
430	struct shmem_inode_info *info = SHMEM_I(inode);
431	pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
432	unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
433	pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
434	struct pagevec pvec;
435	pgoff_t indices[PAGEVEC_SIZE];
436	long nr_swaps_freed = 0;
437	pgoff_t index;
438	int i;
439
440	BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
441
442	pagevec_init(&pvec, 0);
443	index = start;
444	while (index <= end) {
445		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
446			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
447							pvec.pages, indices);
448		if (!pvec.nr)
449			break;
450		mem_cgroup_uncharge_start();
451		for (i = 0; i < pagevec_count(&pvec); i++) {
452			struct page *page = pvec.pages[i];
453
454			index = indices[i];
455			if (index > end)
456				break;
457
458			if (radix_tree_exceptional_entry(page)) {
459				nr_swaps_freed += !shmem_free_swap(mapping,
460								index, page);
461				continue;
462			}
463
464			if (!trylock_page(page))
465				continue;
466			if (page->mapping == mapping) {
467				VM_BUG_ON(PageWriteback(page));
468				truncate_inode_page(mapping, page);
469			}
470			unlock_page(page);
471		}
472		shmem_deswap_pagevec(&pvec);
473		pagevec_release(&pvec);
474		mem_cgroup_uncharge_end();
475		cond_resched();
476		index++;
477	}
478
479	if (partial) {
480		struct page *page = NULL;
481		shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
482		if (page) {
483			zero_user_segment(page, partial, PAGE_CACHE_SIZE);
484			set_page_dirty(page);
485			unlock_page(page);
486			page_cache_release(page);
487		}
488	}
489
490	index = start;
491	for ( ; ; ) {
492		cond_resched();
493		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
494			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
495							pvec.pages, indices);
496		if (!pvec.nr) {
497			if (index == start)
498				break;
499			index = start;
500			continue;
501		}
502		if (index == start && indices[0] > end) {
503			shmem_deswap_pagevec(&pvec);
504			pagevec_release(&pvec);
505			break;
506		}
507		mem_cgroup_uncharge_start();
508		for (i = 0; i < pagevec_count(&pvec); i++) {
509			struct page *page = pvec.pages[i];
510
511			index = indices[i];
512			if (index > end)
513				break;
514
515			if (radix_tree_exceptional_entry(page)) {
516				nr_swaps_freed += !shmem_free_swap(mapping,
517								index, page);
518				continue;
519			}
520
521			lock_page(page);
522			if (page->mapping == mapping) {
523				VM_BUG_ON(PageWriteback(page));
524				truncate_inode_page(mapping, page);
525			}
526			unlock_page(page);
527		}
528		shmem_deswap_pagevec(&pvec);
529		pagevec_release(&pvec);
530		mem_cgroup_uncharge_end();
531		index++;
532	}
533
534	spin_lock(&info->lock);
535	info->swapped -= nr_swaps_freed;
536	shmem_recalc_inode(inode);
537	spin_unlock(&info->lock);
538
539	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
540}
541EXPORT_SYMBOL_GPL(shmem_truncate_range);
542
543static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
544{
545	struct inode *inode = dentry->d_inode;
546	int error;
547
548	error = inode_change_ok(inode, attr);
549	if (error)
550		return error;
551
552	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
553		loff_t oldsize = inode->i_size;
554		loff_t newsize = attr->ia_size;
555
556		if (newsize != oldsize) {
557			i_size_write(inode, newsize);
558			inode->i_ctime = inode->i_mtime = CURRENT_TIME;
559		}
560		if (newsize < oldsize) {
561			loff_t holebegin = round_up(newsize, PAGE_SIZE);
562			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
563			shmem_truncate_range(inode, newsize, (loff_t)-1);
564			/* unmap again to remove racily COWed private pages */
565			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
566		}
567	}
568
569	setattr_copy(inode, attr);
570#ifdef CONFIG_TMPFS_POSIX_ACL
571	if (attr->ia_valid & ATTR_MODE)
572		error = generic_acl_chmod(inode);
573#endif
574	return error;
575}
576
577static void shmem_evict_inode(struct inode *inode)
578{
579	struct shmem_inode_info *info = SHMEM_I(inode);
580	struct shmem_xattr *xattr, *nxattr;
581
582	if (inode->i_mapping->a_ops == &shmem_aops) {
583		shmem_unacct_size(info->flags, inode->i_size);
584		inode->i_size = 0;
585		shmem_truncate_range(inode, 0, (loff_t)-1);
586		if (!list_empty(&info->swaplist)) {
587			mutex_lock(&shmem_swaplist_mutex);
588			list_del_init(&info->swaplist);
589			mutex_unlock(&shmem_swaplist_mutex);
590		}
591	} else
592		kfree(info->symlink);
593
594	list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
595		kfree(xattr->name);
596		kfree(xattr);
597	}
598	BUG_ON(inode->i_blocks);
599	shmem_free_inode(inode->i_sb);
600	end_writeback(inode);
601}
602
603/*
604 * If swap found in inode, free it and move page from swapcache to filecache.
605 */
606static int shmem_unuse_inode(struct shmem_inode_info *info,
607			     swp_entry_t swap, struct page *page)
608{
609	struct address_space *mapping = info->vfs_inode.i_mapping;
610	void *radswap;
611	pgoff_t index;
612	int error;
613
614	radswap = swp_to_radix_entry(swap);
615	index = radix_tree_locate_item(&mapping->page_tree, radswap);
616	if (index == -1)
617		return 0;
618
619	/*
620	 * Move _head_ to start search for next from here.
621	 * But be careful: shmem_evict_inode checks list_empty without taking
622	 * mutex, and there's an instant in list_move_tail when info->swaplist
623	 * would appear empty, if it were the only one on shmem_swaplist.
624	 */
625	if (shmem_swaplist.next != &info->swaplist)
626		list_move_tail(&shmem_swaplist, &info->swaplist);
627
628	/*
629	 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
630	 * but also to hold up shmem_evict_inode(): so inode cannot be freed
631	 * beneath us (pagelock doesn't help until the page is in pagecache).
632	 */
633	error = shmem_add_to_page_cache(page, mapping, index,
634						GFP_NOWAIT, radswap);
635	/* which does mem_cgroup_uncharge_cache_page on error */
636
637	if (error != -ENOMEM) {
638		/*
639		 * Truncation and eviction use free_swap_and_cache(), which
640		 * only does trylock page: if we raced, best clean up here.
641		 */
642		delete_from_swap_cache(page);
643		set_page_dirty(page);
644		if (!error) {
645			spin_lock(&info->lock);
646			info->swapped--;
647			spin_unlock(&info->lock);
648			swap_free(swap);
649		}
650		error = 1;	/* not an error, but entry was found */
651	}
652	return error;
653}
654
655/*
656 * Search through swapped inodes to find and replace swap by page.
657 */
658int shmem_unuse(swp_entry_t swap, struct page *page)
659{
660	struct list_head *this, *next;
661	struct shmem_inode_info *info;
662	int found = 0;
663	int error;
664
665	/*
666	 * Charge page using GFP_KERNEL while we can wait, before taking
667	 * the shmem_swaplist_mutex which might hold up shmem_writepage().
668	 * Charged back to the user (not to caller) when swap account is used.
669	 */
670	error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
671	if (error)
672		goto out;
673	/* No radix_tree_preload: swap entry keeps a place for page in tree */
674
675	mutex_lock(&shmem_swaplist_mutex);
676	list_for_each_safe(this, next, &shmem_swaplist) {
677		info = list_entry(this, struct shmem_inode_info, swaplist);
678		if (info->swapped)
679			found = shmem_unuse_inode(info, swap, page);
680		else
681			list_del_init(&info->swaplist);
682		cond_resched();
683		if (found)
684			break;
685	}
686	mutex_unlock(&shmem_swaplist_mutex);
687
688	if (!found)
689		mem_cgroup_uncharge_cache_page(page);
690	if (found < 0)
691		error = found;
692out:
693	unlock_page(page);
694	page_cache_release(page);
695	return error;
696}
697
698/*
699 * Move the page from the page cache to the swap cache.
700 */
701static int shmem_writepage(struct page *page, struct writeback_control *wbc)
702{
703	struct shmem_inode_info *info;
704	struct address_space *mapping;
705	struct inode *inode;
706	swp_entry_t swap;
707	pgoff_t index;
708
709	BUG_ON(!PageLocked(page));
710	mapping = page->mapping;
711	index = page->index;
712	inode = mapping->host;
713	info = SHMEM_I(inode);
714	if (info->flags & VM_LOCKED)
715		goto redirty;
716	if (!total_swap_pages)
717		goto redirty;
718
719	/*
720	 * shmem_backing_dev_info's capabilities prevent regular writeback or
721	 * sync from ever calling shmem_writepage; but a stacking filesystem
722	 * might use ->writepage of its underlying filesystem, in which case
723	 * tmpfs should write out to swap only in response to memory pressure,
724	 * and not for the writeback threads or sync.
725	 */
726	if (!wbc->for_reclaim) {
727		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
728		goto redirty;
729	}
730	swap = get_swap_page();
731	if (!swap.val)
732		goto redirty;
733
734	/*
735	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
736	 * if it's not already there.  Do it now before the page is
737	 * moved to swap cache, when its pagelock no longer protects
738	 * the inode from eviction.  But don't unlock the mutex until
739	 * we've incremented swapped, because shmem_unuse_inode() will
740	 * prune a !swapped inode from the swaplist under this mutex.
741	 */
742	mutex_lock(&shmem_swaplist_mutex);
743	if (list_empty(&info->swaplist))
744		list_add_tail(&info->swaplist, &shmem_swaplist);
745
746	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
747		swap_shmem_alloc(swap);
748		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
749
750		spin_lock(&info->lock);
751		info->swapped++;
752		shmem_recalc_inode(inode);
753		spin_unlock(&info->lock);
754
755		mutex_unlock(&shmem_swaplist_mutex);
756		BUG_ON(page_mapped(page));
757		swap_writepage(page, wbc);
758		return 0;
759	}
760
761	mutex_unlock(&shmem_swaplist_mutex);
762	swapcache_free(swap, NULL);
763redirty:
764	set_page_dirty(page);
765	if (wbc->for_reclaim)
766		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
767	unlock_page(page);
768	return 0;
769}
770
771#ifdef CONFIG_NUMA
772#ifdef CONFIG_TMPFS
773static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
774{
775	char buffer[64];
776
777	if (!mpol || mpol->mode == MPOL_DEFAULT)
778		return;		/* show nothing */
779
780	mpol_to_str(buffer, sizeof(buffer), mpol, 1);
781
782	seq_printf(seq, ",mpol=%s", buffer);
783}
784
785static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
786{
787	struct mempolicy *mpol = NULL;
788	if (sbinfo->mpol) {
789		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
790		mpol = sbinfo->mpol;
791		mpol_get(mpol);
792		spin_unlock(&sbinfo->stat_lock);
793	}
794	return mpol;
795}
796#endif /* CONFIG_TMPFS */
797
798static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
799			struct shmem_inode_info *info, pgoff_t index)
800{
801	struct mempolicy mpol, *spol;
802	struct vm_area_struct pvma;
803
804	spol = mpol_cond_copy(&mpol,
805			mpol_shared_policy_lookup(&info->policy, index));
806
807	/* Create a pseudo vma that just contains the policy */
808	pvma.vm_start = 0;
809	pvma.vm_pgoff = index;
810	pvma.vm_ops = NULL;
811	pvma.vm_policy = spol;
812	return swapin_readahead(swap, gfp, &pvma, 0);
813}
814
815static struct page *shmem_alloc_page(gfp_t gfp,
816			struct shmem_inode_info *info, pgoff_t index)
817{
818	struct vm_area_struct pvma;
819
820	/* Create a pseudo vma that just contains the policy */
821	pvma.vm_start = 0;
822	pvma.vm_pgoff = index;
823	pvma.vm_ops = NULL;
824	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
825
826	/*
827	 * alloc_page_vma() will drop the shared policy reference
828	 */
829	return alloc_page_vma(gfp, &pvma, 0);
830}
831#else /* !CONFIG_NUMA */
832#ifdef CONFIG_TMPFS
833static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
834{
835}
836#endif /* CONFIG_TMPFS */
837
838static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
839			struct shmem_inode_info *info, pgoff_t index)
840{
841	return swapin_readahead(swap, gfp, NULL, 0);
842}
843
844static inline struct page *shmem_alloc_page(gfp_t gfp,
845			struct shmem_inode_info *info, pgoff_t index)
846{
847	return alloc_page(gfp);
848}
849#endif /* CONFIG_NUMA */
850
851#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
852static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
853{
854	return NULL;
855}
856#endif
857
858/*
859 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
860 *
861 * If we allocate a new one we do not mark it dirty. That's up to the
862 * vm. If we swap it in we mark it dirty since we also free the swap
863 * entry since a page cannot live in both the swap and page cache
864 */
865static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
866	struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
867{
868	struct address_space *mapping = inode->i_mapping;
869	struct shmem_inode_info *info;
870	struct shmem_sb_info *sbinfo;
871	struct page *page;
872	swp_entry_t swap;
873	int error;
874	int once = 0;
875
876	if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
877		return -EFBIG;
878repeat:
879	swap.val = 0;
880	page = find_lock_page(mapping, index);
881	if (radix_tree_exceptional_entry(page)) {
882		swap = radix_to_swp_entry(page);
883		page = NULL;
884	}
885
886	if (sgp != SGP_WRITE &&
887	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
888		error = -EINVAL;
889		goto failed;
890	}
891
892	if (page || (sgp == SGP_READ && !swap.val)) {
893		/*
894		 * Once we can get the page lock, it must be uptodate:
895		 * if there were an error in reading back from swap,
896		 * the page would not be inserted into the filecache.
897		 */
898		BUG_ON(page && !PageUptodate(page));
899		*pagep = page;
900		return 0;
901	}
902
903	/*
904	 * Fast cache lookup did not find it:
905	 * bring it back from swap or allocate.
906	 */
907	info = SHMEM_I(inode);
908	sbinfo = SHMEM_SB(inode->i_sb);
909
910	if (swap.val) {
911		/* Look it up and read it in.. */
912		page = lookup_swap_cache(swap);
913		if (!page) {
914			/* here we actually do the io */
915			if (fault_type)
916				*fault_type |= VM_FAULT_MAJOR;
917			page = shmem_swapin(swap, gfp, info, index);
918			if (!page) {
919				error = -ENOMEM;
920				goto failed;
921			}
922		}
923
924		/* We have to do this with page locked to prevent races */
925		lock_page(page);
926		if (!PageUptodate(page)) {
927			error = -EIO;
928			goto failed;
929		}
930		wait_on_page_writeback(page);
931
932		/* Someone may have already done it for us */
933		if (page->mapping) {
934			if (page->mapping == mapping &&
935			    page->index == index)
936				goto done;
937			error = -EEXIST;
938			goto failed;
939		}
940
941		error = mem_cgroup_cache_charge(page, current->mm,
942						gfp & GFP_RECLAIM_MASK);
943		if (!error)
944			error = shmem_add_to_page_cache(page, mapping, index,
945						gfp, swp_to_radix_entry(swap));
946		if (error)
947			goto failed;
948
949		spin_lock(&info->lock);
950		info->swapped--;
951		shmem_recalc_inode(inode);
952		spin_unlock(&info->lock);
953
954		delete_from_swap_cache(page);
955		set_page_dirty(page);
956		swap_free(swap);
957
958	} else {
959		if (shmem_acct_block(info->flags)) {
960			error = -ENOSPC;
961			goto failed;
962		}
963		if (sbinfo->max_blocks) {
964			if (percpu_counter_compare(&sbinfo->used_blocks,
965						sbinfo->max_blocks) >= 0) {
966				error = -ENOSPC;
967				goto unacct;
968			}
969			percpu_counter_inc(&sbinfo->used_blocks);
970		}
971
972		page = shmem_alloc_page(gfp, info, index);
973		if (!page) {
974			error = -ENOMEM;
975			goto decused;
976		}
977
978		SetPageSwapBacked(page);
979		__set_page_locked(page);
980		error = mem_cgroup_cache_charge(page, current->mm,
981						gfp & GFP_RECLAIM_MASK);
982		if (!error)
983			error = shmem_add_to_page_cache(page, mapping, index,
984						gfp, NULL);
985		if (error)
986			goto decused;
987		lru_cache_add_anon(page);
988
989		spin_lock(&info->lock);
990		info->alloced++;
991		inode->i_blocks += BLOCKS_PER_PAGE;
992		shmem_recalc_inode(inode);
993		spin_unlock(&info->lock);
994
995		clear_highpage(page);
996		flush_dcache_page(page);
997		SetPageUptodate(page);
998		if (sgp == SGP_DIRTY)
999			set_page_dirty(page);
1000	}
1001done:
1002	/* Perhaps the file has been truncated since we checked */
1003	if (sgp != SGP_WRITE &&
1004	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1005		error = -EINVAL;
1006		goto trunc;
1007	}
1008	*pagep = page;
1009	return 0;
1010
1011	/*
1012	 * Error recovery.
1013	 */
1014trunc:
1015	ClearPageDirty(page);
1016	delete_from_page_cache(page);
1017	spin_lock(&info->lock);
1018	info->alloced--;
1019	inode->i_blocks -= BLOCKS_PER_PAGE;
1020	spin_unlock(&info->lock);
1021decused:
1022	if (sbinfo->max_blocks)
1023		percpu_counter_add(&sbinfo->used_blocks, -1);
1024unacct:
1025	shmem_unacct_blocks(info->flags, 1);
1026failed:
1027	if (swap.val && error != -EINVAL) {
1028		struct page *test = find_get_page(mapping, index);
1029		if (test && !radix_tree_exceptional_entry(test))
1030			page_cache_release(test);
1031		/* Have another try if the entry has changed */
1032		if (test != swp_to_radix_entry(swap))
1033			error = -EEXIST;
1034	}
1035	if (page) {
1036		unlock_page(page);
1037		page_cache_release(page);
1038	}
1039	if (error == -ENOSPC && !once++) {
1040		info = SHMEM_I(inode);
1041		spin_lock(&info->lock);
1042		shmem_recalc_inode(inode);
1043		spin_unlock(&info->lock);
1044		goto repeat;
1045	}
1046	if (error == -EEXIST)
1047		goto repeat;
1048	return error;
1049}
1050
1051static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1052{
1053	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1054	int error;
1055	int ret = VM_FAULT_LOCKED;
1056
1057	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1058	if (error)
1059		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1060
1061	if (ret & VM_FAULT_MAJOR) {
1062		count_vm_event(PGMAJFAULT);
1063		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1064	}
1065	return ret;
1066}
1067
1068#ifdef CONFIG_NUMA
1069static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1070{
1071	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1072	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1073}
1074
1075static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1076					  unsigned long addr)
1077{
1078	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1079	pgoff_t index;
1080
1081	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1082	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1083}
1084#endif
1085
1086int shmem_lock(struct file *file, int lock, struct user_struct *user)
1087{
1088	struct inode *inode = file->f_path.dentry->d_inode;
1089	struct shmem_inode_info *info = SHMEM_I(inode);
1090	int retval = -ENOMEM;
1091
1092	spin_lock(&info->lock);
1093	if (lock && !(info->flags & VM_LOCKED)) {
1094		if (!user_shm_lock(inode->i_size, user))
1095			goto out_nomem;
1096		info->flags |= VM_LOCKED;
1097		mapping_set_unevictable(file->f_mapping);
1098	}
1099	if (!lock && (info->flags & VM_LOCKED) && user) {
1100		user_shm_unlock(inode->i_size, user);
1101		info->flags &= ~VM_LOCKED;
1102		mapping_clear_unevictable(file->f_mapping);
1103	}
1104	retval = 0;
1105
1106out_nomem:
1107	spin_unlock(&info->lock);
1108	return retval;
1109}
1110
1111static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1112{
1113	file_accessed(file);
1114	vma->vm_ops = &shmem_vm_ops;
1115	vma->vm_flags |= VM_CAN_NONLINEAR;
1116	return 0;
1117}
1118
1119static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1120				     umode_t mode, dev_t dev, unsigned long flags)
1121{
1122	struct inode *inode;
1123	struct shmem_inode_info *info;
1124	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1125
1126	if (shmem_reserve_inode(sb))
1127		return NULL;
1128
1129	inode = new_inode(sb);
1130	if (inode) {
1131		inode->i_ino = get_next_ino();
1132		inode_init_owner(inode, dir, mode);
1133		inode->i_blocks = 0;
1134		inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1135		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1136		inode->i_generation = get_seconds();
1137		info = SHMEM_I(inode);
1138		memset(info, 0, (char *)inode - (char *)info);
1139		spin_lock_init(&info->lock);
1140		info->flags = flags & VM_NORESERVE;
1141		INIT_LIST_HEAD(&info->swaplist);
1142		INIT_LIST_HEAD(&info->xattr_list);
1143		cache_no_acl(inode);
1144
1145		switch (mode & S_IFMT) {
1146		default:
1147			inode->i_op = &shmem_special_inode_operations;
1148			init_special_inode(inode, mode, dev);
1149			break;
1150		case S_IFREG:
1151			inode->i_mapping->a_ops = &shmem_aops;
1152			inode->i_op = &shmem_inode_operations;
1153			inode->i_fop = &shmem_file_operations;
1154			mpol_shared_policy_init(&info->policy,
1155						 shmem_get_sbmpol(sbinfo));
1156			break;
1157		case S_IFDIR:
1158			inc_nlink(inode);
1159			/* Some things misbehave if size == 0 on a directory */
1160			inode->i_size = 2 * BOGO_DIRENT_SIZE;
1161			inode->i_op = &shmem_dir_inode_operations;
1162			inode->i_fop = &simple_dir_operations;
1163			break;
1164		case S_IFLNK:
1165			/*
1166			 * Must not load anything in the rbtree,
1167			 * mpol_free_shared_policy will not be called.
1168			 */
1169			mpol_shared_policy_init(&info->policy, NULL);
1170			break;
1171		}
1172	} else
1173		shmem_free_inode(sb);
1174	return inode;
1175}
1176
1177#ifdef CONFIG_TMPFS
1178static const struct inode_operations shmem_symlink_inode_operations;
1179static const struct inode_operations shmem_short_symlink_operations;
1180
1181#ifdef CONFIG_TMPFS_XATTR
1182static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1183#else
1184#define shmem_initxattrs NULL
1185#endif
1186
1187static int
1188shmem_write_begin(struct file *file, struct address_space *mapping,
1189			loff_t pos, unsigned len, unsigned flags,
1190			struct page **pagep, void **fsdata)
1191{
1192	struct inode *inode = mapping->host;
1193	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1194	return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1195}
1196
1197static int
1198shmem_write_end(struct file *file, struct address_space *mapping,
1199			loff_t pos, unsigned len, unsigned copied,
1200			struct page *page, void *fsdata)
1201{
1202	struct inode *inode = mapping->host;
1203
1204	if (pos + copied > inode->i_size)
1205		i_size_write(inode, pos + copied);
1206
1207	set_page_dirty(page);
1208	unlock_page(page);
1209	page_cache_release(page);
1210
1211	return copied;
1212}
1213
1214static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1215{
1216	struct inode *inode = filp->f_path.dentry->d_inode;
1217	struct address_space *mapping = inode->i_mapping;
1218	pgoff_t index;
1219	unsigned long offset;
1220	enum sgp_type sgp = SGP_READ;
1221
1222	/*
1223	 * Might this read be for a stacking filesystem?  Then when reading
1224	 * holes of a sparse file, we actually need to allocate those pages,
1225	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1226	 */
1227	if (segment_eq(get_fs(), KERNEL_DS))
1228		sgp = SGP_DIRTY;
1229
1230	index = *ppos >> PAGE_CACHE_SHIFT;
1231	offset = *ppos & ~PAGE_CACHE_MASK;
1232
1233	for (;;) {
1234		struct page *page = NULL;
1235		pgoff_t end_index;
1236		unsigned long nr, ret;
1237		loff_t i_size = i_size_read(inode);
1238
1239		end_index = i_size >> PAGE_CACHE_SHIFT;
1240		if (index > end_index)
1241			break;
1242		if (index == end_index) {
1243			nr = i_size & ~PAGE_CACHE_MASK;
1244			if (nr <= offset)
1245				break;
1246		}
1247
1248		desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1249		if (desc->error) {
1250			if (desc->error == -EINVAL)
1251				desc->error = 0;
1252			break;
1253		}
1254		if (page)
1255			unlock_page(page);
1256
1257		/*
1258		 * We must evaluate after, since reads (unlike writes)
1259		 * are called without i_mutex protection against truncate
1260		 */
1261		nr = PAGE_CACHE_SIZE;
1262		i_size = i_size_read(inode);
1263		end_index = i_size >> PAGE_CACHE_SHIFT;
1264		if (index == end_index) {
1265			nr = i_size & ~PAGE_CACHE_MASK;
1266			if (nr <= offset) {
1267				if (page)
1268					page_cache_release(page);
1269				break;
1270			}
1271		}
1272		nr -= offset;
1273
1274		if (page) {
1275			/*
1276			 * If users can be writing to this page using arbitrary
1277			 * virtual addresses, take care about potential aliasing
1278			 * before reading the page on the kernel side.
1279			 */
1280			if (mapping_writably_mapped(mapping))
1281				flush_dcache_page(page);
1282			/*
1283			 * Mark the page accessed if we read the beginning.
1284			 */
1285			if (!offset)
1286				mark_page_accessed(page);
1287		} else {
1288			page = ZERO_PAGE(0);
1289			page_cache_get(page);
1290		}
1291
1292		/*
1293		 * Ok, we have the page, and it's up-to-date, so
1294		 * now we can copy it to user space...
1295		 *
1296		 * The actor routine returns how many bytes were actually used..
1297		 * NOTE! This may not be the same as how much of a user buffer
1298		 * we filled up (we may be padding etc), so we can only update
1299		 * "pos" here (the actor routine has to update the user buffer
1300		 * pointers and the remaining count).
1301		 */
1302		ret = actor(desc, page, offset, nr);
1303		offset += ret;
1304		index += offset >> PAGE_CACHE_SHIFT;
1305		offset &= ~PAGE_CACHE_MASK;
1306
1307		page_cache_release(page);
1308		if (ret != nr || !desc->count)
1309			break;
1310
1311		cond_resched();
1312	}
1313
1314	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1315	file_accessed(filp);
1316}
1317
1318static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1319		const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1320{
1321	struct file *filp = iocb->ki_filp;
1322	ssize_t retval;
1323	unsigned long seg;
1324	size_t count;
1325	loff_t *ppos = &iocb->ki_pos;
1326
1327	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1328	if (retval)
1329		return retval;
1330
1331	for (seg = 0; seg < nr_segs; seg++) {
1332		read_descriptor_t desc;
1333
1334		desc.written = 0;
1335		desc.arg.buf = iov[seg].iov_base;
1336		desc.count = iov[seg].iov_len;
1337		if (desc.count == 0)
1338			continue;
1339		desc.error = 0;
1340		do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1341		retval += desc.written;
1342		if (desc.error) {
1343			retval = retval ?: desc.error;
1344			break;
1345		}
1346		if (desc.count > 0)
1347			break;
1348	}
1349	return retval;
1350}
1351
1352static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1353				struct pipe_inode_info *pipe, size_t len,
1354				unsigned int flags)
1355{
1356	struct address_space *mapping = in->f_mapping;
1357	struct inode *inode = mapping->host;
1358	unsigned int loff, nr_pages, req_pages;
1359	struct page *pages[PIPE_DEF_BUFFERS];
1360	struct partial_page partial[PIPE_DEF_BUFFERS];
1361	struct page *page;
1362	pgoff_t index, end_index;
1363	loff_t isize, left;
1364	int error, page_nr;
1365	struct splice_pipe_desc spd = {
1366		.pages = pages,
1367		.partial = partial,
1368		.flags = flags,
1369		.ops = &page_cache_pipe_buf_ops,
1370		.spd_release = spd_release_page,
1371	};
1372
1373	isize = i_size_read(inode);
1374	if (unlikely(*ppos >= isize))
1375		return 0;
1376
1377	left = isize - *ppos;
1378	if (unlikely(left < len))
1379		len = left;
1380
1381	if (splice_grow_spd(pipe, &spd))
1382		return -ENOMEM;
1383
1384	index = *ppos >> PAGE_CACHE_SHIFT;
1385	loff = *ppos & ~PAGE_CACHE_MASK;
1386	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1387	nr_pages = min(req_pages, pipe->buffers);
1388
1389	spd.nr_pages = find_get_pages_contig(mapping, index,
1390						nr_pages, spd.pages);
1391	index += spd.nr_pages;
1392	error = 0;
1393
1394	while (spd.nr_pages < nr_pages) {
1395		error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1396		if (error)
1397			break;
1398		unlock_page(page);
1399		spd.pages[spd.nr_pages++] = page;
1400		index++;
1401	}
1402
1403	index = *ppos >> PAGE_CACHE_SHIFT;
1404	nr_pages = spd.nr_pages;
1405	spd.nr_pages = 0;
1406
1407	for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1408		unsigned int this_len;
1409
1410		if (!len)
1411			break;
1412
1413		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1414		page = spd.pages[page_nr];
1415
1416		if (!PageUptodate(page) || page->mapping != mapping) {
1417			error = shmem_getpage(inode, index, &page,
1418							SGP_CACHE, NULL);
1419			if (error)
1420				break;
1421			unlock_page(page);
1422			page_cache_release(spd.pages[page_nr]);
1423			spd.pages[page_nr] = page;
1424		}
1425
1426		isize = i_size_read(inode);
1427		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1428		if (unlikely(!isize || index > end_index))
1429			break;
1430
1431		if (end_index == index) {
1432			unsigned int plen;
1433
1434			plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1435			if (plen <= loff)
1436				break;
1437
1438			this_len = min(this_len, plen - loff);
1439			len = this_len;
1440		}
1441
1442		spd.partial[page_nr].offset = loff;
1443		spd.partial[page_nr].len = this_len;
1444		len -= this_len;
1445		loff = 0;
1446		spd.nr_pages++;
1447		index++;
1448	}
1449
1450	while (page_nr < nr_pages)
1451		page_cache_release(spd.pages[page_nr++]);
1452
1453	if (spd.nr_pages)
1454		error = splice_to_pipe(pipe, &spd);
1455
1456	splice_shrink_spd(pipe, &spd);
1457
1458	if (error > 0) {
1459		*ppos += error;
1460		file_accessed(in);
1461	}
1462	return error;
1463}
1464
1465static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1466{
1467	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1468
1469	buf->f_type = TMPFS_MAGIC;
1470	buf->f_bsize = PAGE_CACHE_SIZE;
1471	buf->f_namelen = NAME_MAX;
1472	if (sbinfo->max_blocks) {
1473		buf->f_blocks = sbinfo->max_blocks;
1474		buf->f_bavail =
1475		buf->f_bfree  = sbinfo->max_blocks -
1476				percpu_counter_sum(&sbinfo->used_blocks);
1477	}
1478	if (sbinfo->max_inodes) {
1479		buf->f_files = sbinfo->max_inodes;
1480		buf->f_ffree = sbinfo->free_inodes;
1481	}
1482	/* else leave those fields 0 like simple_statfs */
1483	return 0;
1484}
1485
1486/*
1487 * File creation. Allocate an inode, and we're done..
1488 */
1489static int
1490shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1491{
1492	struct inode *inode;
1493	int error = -ENOSPC;
1494
1495	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1496	if (inode) {
1497		error = security_inode_init_security(inode, dir,
1498						     &dentry->d_name,
1499						     shmem_initxattrs, NULL);
1500		if (error) {
1501			if (error != -EOPNOTSUPP) {
1502				iput(inode);
1503				return error;
1504			}
1505		}
1506#ifdef CONFIG_TMPFS_POSIX_ACL
1507		error = generic_acl_init(inode, dir);
1508		if (error) {
1509			iput(inode);
1510			return error;
1511		}
1512#else
1513		error = 0;
1514#endif
1515		dir->i_size += BOGO_DIRENT_SIZE;
1516		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1517		d_instantiate(dentry, inode);
1518		dget(dentry); /* Extra count - pin the dentry in core */
1519	}
1520	return error;
1521}
1522
1523static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1524{
1525	int error;
1526
1527	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1528		return error;
1529	inc_nlink(dir);
1530	return 0;
1531}
1532
1533static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1534		struct nameidata *nd)
1535{
1536	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1537}
1538
1539/*
1540 * Link a file..
1541 */
1542static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1543{
1544	struct inode *inode = old_dentry->d_inode;
1545	int ret;
1546
1547	/*
1548	 * No ordinary (disk based) filesystem counts links as inodes;
1549	 * but each new link needs a new dentry, pinning lowmem, and
1550	 * tmpfs dentries cannot be pruned until they are unlinked.
1551	 */
1552	ret = shmem_reserve_inode(inode->i_sb);
1553	if (ret)
1554		goto out;
1555
1556	dir->i_size += BOGO_DIRENT_SIZE;
1557	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1558	inc_nlink(inode);
1559	ihold(inode);	/* New dentry reference */
1560	dget(dentry);		/* Extra pinning count for the created dentry */
1561	d_instantiate(dentry, inode);
1562out:
1563	return ret;
1564}
1565
1566static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1567{
1568	struct inode *inode = dentry->d_inode;
1569
1570	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1571		shmem_free_inode(inode->i_sb);
1572
1573	dir->i_size -= BOGO_DIRENT_SIZE;
1574	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1575	drop_nlink(inode);
1576	dput(dentry);	/* Undo the count from "create" - this does all the work */
1577	return 0;
1578}
1579
1580static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1581{
1582	if (!simple_empty(dentry))
1583		return -ENOTEMPTY;
1584
1585	drop_nlink(dentry->d_inode);
1586	drop_nlink(dir);
1587	return shmem_unlink(dir, dentry);
1588}
1589
1590/*
1591 * The VFS layer already does all the dentry stuff for rename,
1592 * we just have to decrement the usage count for the target if
1593 * it exists so that the VFS layer correctly free's it when it
1594 * gets overwritten.
1595 */
1596static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1597{
1598	struct inode *inode = old_dentry->d_inode;
1599	int they_are_dirs = S_ISDIR(inode->i_mode);
1600
1601	if (!simple_empty(new_dentry))
1602		return -ENOTEMPTY;
1603
1604	if (new_dentry->d_inode) {
1605		(void) shmem_unlink(new_dir, new_dentry);
1606		if (they_are_dirs)
1607			drop_nlink(old_dir);
1608	} else if (they_are_dirs) {
1609		drop_nlink(old_dir);
1610		inc_nlink(new_dir);
1611	}
1612
1613	old_dir->i_size -= BOGO_DIRENT_SIZE;
1614	new_dir->i_size += BOGO_DIRENT_SIZE;
1615	old_dir->i_ctime = old_dir->i_mtime =
1616	new_dir->i_ctime = new_dir->i_mtime =
1617	inode->i_ctime = CURRENT_TIME;
1618	return 0;
1619}
1620
1621static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1622{
1623	int error;
1624	int len;
1625	struct inode *inode;
1626	struct page *page;
1627	char *kaddr;
1628	struct shmem_inode_info *info;
1629
1630	len = strlen(symname) + 1;
1631	if (len > PAGE_CACHE_SIZE)
1632		return -ENAMETOOLONG;
1633
1634	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1635	if (!inode)
1636		return -ENOSPC;
1637
1638	error = security_inode_init_security(inode, dir, &dentry->d_name,
1639					     shmem_initxattrs, NULL);
1640	if (error) {
1641		if (error != -EOPNOTSUPP) {
1642			iput(inode);
1643			return error;
1644		}
1645		error = 0;
1646	}
1647
1648	info = SHMEM_I(inode);
1649	inode->i_size = len-1;
1650	if (len <= SHORT_SYMLINK_LEN) {
1651		info->symlink = kmemdup(symname, len, GFP_KERNEL);
1652		if (!info->symlink) {
1653			iput(inode);
1654			return -ENOMEM;
1655		}
1656		inode->i_op = &shmem_short_symlink_operations;
1657	} else {
1658		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1659		if (error) {
1660			iput(inode);
1661			return error;
1662		}
1663		inode->i_mapping->a_ops = &shmem_aops;
1664		inode->i_op = &shmem_symlink_inode_operations;
1665		kaddr = kmap_atomic(page);
1666		memcpy(kaddr, symname, len);
1667		kunmap_atomic(kaddr);
1668		set_page_dirty(page);
1669		unlock_page(page);
1670		page_cache_release(page);
1671	}
1672	dir->i_size += BOGO_DIRENT_SIZE;
1673	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1674	d_instantiate(dentry, inode);
1675	dget(dentry);
1676	return 0;
1677}
1678
1679static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1680{
1681	nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
1682	return NULL;
1683}
1684
1685static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1686{
1687	struct page *page = NULL;
1688	int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1689	nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
1690	if (page)
1691		unlock_page(page);
1692	return page;
1693}
1694
1695static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1696{
1697	if (!IS_ERR(nd_get_link(nd))) {
1698		struct page *page = cookie;
1699		kunmap(page);
1700		mark_page_accessed(page);
1701		page_cache_release(page);
1702	}
1703}
1704
1705#ifdef CONFIG_TMPFS_XATTR
1706/*
1707 * Superblocks without xattr inode operations may get some security.* xattr
1708 * support from the LSM "for free". As soon as we have any other xattrs
1709 * like ACLs, we also need to implement the security.* handlers at
1710 * filesystem level, though.
1711 */
1712
1713/*
1714 * Allocate new xattr and copy in the value; but leave the name to callers.
1715 */
1716static struct shmem_xattr *shmem_xattr_alloc(const void *value, size_t size)
1717{
1718	struct shmem_xattr *new_xattr;
1719	size_t len;
1720
1721	/* wrap around? */
1722	len = sizeof(*new_xattr) + size;
1723	if (len <= sizeof(*new_xattr))
1724		return NULL;
1725
1726	new_xattr = kmalloc(len, GFP_KERNEL);
1727	if (!new_xattr)
1728		return NULL;
1729
1730	new_xattr->size = size;
1731	memcpy(new_xattr->value, value, size);
1732	return new_xattr;
1733}
1734
1735/*
1736 * Callback for security_inode_init_security() for acquiring xattrs.
1737 */
1738static int shmem_initxattrs(struct inode *inode,
1739			    const struct xattr *xattr_array,
1740			    void *fs_info)
1741{
1742	struct shmem_inode_info *info = SHMEM_I(inode);
1743	const struct xattr *xattr;
1744	struct shmem_xattr *new_xattr;
1745	size_t len;
1746
1747	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
1748		new_xattr = shmem_xattr_alloc(xattr->value, xattr->value_len);
1749		if (!new_xattr)
1750			return -ENOMEM;
1751
1752		len = strlen(xattr->name) + 1;
1753		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
1754					  GFP_KERNEL);
1755		if (!new_xattr->name) {
1756			kfree(new_xattr);
1757			return -ENOMEM;
1758		}
1759
1760		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
1761		       XATTR_SECURITY_PREFIX_LEN);
1762		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
1763		       xattr->name, len);
1764
1765		spin_lock(&info->lock);
1766		list_add(&new_xattr->list, &info->xattr_list);
1767		spin_unlock(&info->lock);
1768	}
1769
1770	return 0;
1771}
1772
1773static int shmem_xattr_get(struct dentry *dentry, const char *name,
1774			   void *buffer, size_t size)
1775{
1776	struct shmem_inode_info *info;
1777	struct shmem_xattr *xattr;
1778	int ret = -ENODATA;
1779
1780	info = SHMEM_I(dentry->d_inode);
1781
1782	spin_lock(&info->lock);
1783	list_for_each_entry(xattr, &info->xattr_list, list) {
1784		if (strcmp(name, xattr->name))
1785			continue;
1786
1787		ret = xattr->size;
1788		if (buffer) {
1789			if (size < xattr->size)
1790				ret = -ERANGE;
1791			else
1792				memcpy(buffer, xattr->value, xattr->size);
1793		}
1794		break;
1795	}
1796	spin_unlock(&info->lock);
1797	return ret;
1798}
1799
1800static int shmem_xattr_set(struct inode *inode, const char *name,
1801			   const void *value, size_t size, int flags)
1802{
1803	struct shmem_inode_info *info = SHMEM_I(inode);
1804	struct shmem_xattr *xattr;
1805	struct shmem_xattr *new_xattr = NULL;
1806	int err = 0;
1807
1808	/* value == NULL means remove */
1809	if (value) {
1810		new_xattr = shmem_xattr_alloc(value, size);
1811		if (!new_xattr)
1812			return -ENOMEM;
1813
1814		new_xattr->name = kstrdup(name, GFP_KERNEL);
1815		if (!new_xattr->name) {
1816			kfree(new_xattr);
1817			return -ENOMEM;
1818		}
1819	}
1820
1821	spin_lock(&info->lock);
1822	list_for_each_entry(xattr, &info->xattr_list, list) {
1823		if (!strcmp(name, xattr->name)) {
1824			if (flags & XATTR_CREATE) {
1825				xattr = new_xattr;
1826				err = -EEXIST;
1827			} else if (new_xattr) {
1828				list_replace(&xattr->list, &new_xattr->list);
1829			} else {
1830				list_del(&xattr->list);
1831			}
1832			goto out;
1833		}
1834	}
1835	if (flags & XATTR_REPLACE) {
1836		xattr = new_xattr;
1837		err = -ENODATA;
1838	} else {
1839		list_add(&new_xattr->list, &info->xattr_list);
1840		xattr = NULL;
1841	}
1842out:
1843	spin_unlock(&info->lock);
1844	if (xattr)
1845		kfree(xattr->name);
1846	kfree(xattr);
1847	return err;
1848}
1849
1850static const struct xattr_handler *shmem_xattr_handlers[] = {
1851#ifdef CONFIG_TMPFS_POSIX_ACL
1852	&generic_acl_access_handler,
1853	&generic_acl_default_handler,
1854#endif
1855	NULL
1856};
1857
1858static int shmem_xattr_validate(const char *name)
1859{
1860	struct { const char *prefix; size_t len; } arr[] = {
1861		{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1862		{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1863	};
1864	int i;
1865
1866	for (i = 0; i < ARRAY_SIZE(arr); i++) {
1867		size_t preflen = arr[i].len;
1868		if (strncmp(name, arr[i].prefix, preflen) == 0) {
1869			if (!name[preflen])
1870				return -EINVAL;
1871			return 0;
1872		}
1873	}
1874	return -EOPNOTSUPP;
1875}
1876
1877static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1878			      void *buffer, size_t size)
1879{
1880	int err;
1881
1882	/*
1883	 * If this is a request for a synthetic attribute in the system.*
1884	 * namespace use the generic infrastructure to resolve a handler
1885	 * for it via sb->s_xattr.
1886	 */
1887	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1888		return generic_getxattr(dentry, name, buffer, size);
1889
1890	err = shmem_xattr_validate(name);
1891	if (err)
1892		return err;
1893
1894	return shmem_xattr_get(dentry, name, buffer, size);
1895}
1896
1897static int shmem_setxattr(struct dentry *dentry, const char *name,
1898			  const void *value, size_t size, int flags)
1899{
1900	int err;
1901
1902	/*
1903	 * If this is a request for a synthetic attribute in the system.*
1904	 * namespace use the generic infrastructure to resolve a handler
1905	 * for it via sb->s_xattr.
1906	 */
1907	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1908		return generic_setxattr(dentry, name, value, size, flags);
1909
1910	err = shmem_xattr_validate(name);
1911	if (err)
1912		return err;
1913
1914	if (size == 0)
1915		value = "";  /* empty EA, do not remove */
1916
1917	return shmem_xattr_set(dentry->d_inode, name, value, size, flags);
1918
1919}
1920
1921static int shmem_removexattr(struct dentry *dentry, const char *name)
1922{
1923	int err;
1924
1925	/*
1926	 * If this is a request for a synthetic attribute in the system.*
1927	 * namespace use the generic infrastructure to resolve a handler
1928	 * for it via sb->s_xattr.
1929	 */
1930	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1931		return generic_removexattr(dentry, name);
1932
1933	err = shmem_xattr_validate(name);
1934	if (err)
1935		return err;
1936
1937	return shmem_xattr_set(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
1938}
1939
1940static bool xattr_is_trusted(const char *name)
1941{
1942	return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
1943}
1944
1945static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
1946{
1947	bool trusted = capable(CAP_SYS_ADMIN);
1948	struct shmem_xattr *xattr;
1949	struct shmem_inode_info *info;
1950	size_t used = 0;
1951
1952	info = SHMEM_I(dentry->d_inode);
1953
1954	spin_lock(&info->lock);
1955	list_for_each_entry(xattr, &info->xattr_list, list) {
1956		size_t len;
1957
1958		/* skip "trusted." attributes for unprivileged callers */
1959		if (!trusted && xattr_is_trusted(xattr->name))
1960			continue;
1961
1962		len = strlen(xattr->name) + 1;
1963		used += len;
1964		if (buffer) {
1965			if (size < used) {
1966				used = -ERANGE;
1967				break;
1968			}
1969			memcpy(buffer, xattr->name, len);
1970			buffer += len;
1971		}
1972	}
1973	spin_unlock(&info->lock);
1974
1975	return used;
1976}
1977#endif /* CONFIG_TMPFS_XATTR */
1978
1979static const struct inode_operations shmem_short_symlink_operations = {
1980	.readlink	= generic_readlink,
1981	.follow_link	= shmem_follow_short_symlink,
1982#ifdef CONFIG_TMPFS_XATTR
1983	.setxattr	= shmem_setxattr,
1984	.getxattr	= shmem_getxattr,
1985	.listxattr	= shmem_listxattr,
1986	.removexattr	= shmem_removexattr,
1987#endif
1988};
1989
1990static const struct inode_operations shmem_symlink_inode_operations = {
1991	.readlink	= generic_readlink,
1992	.follow_link	= shmem_follow_link,
1993	.put_link	= shmem_put_link,
1994#ifdef CONFIG_TMPFS_XATTR
1995	.setxattr	= shmem_setxattr,
1996	.getxattr	= shmem_getxattr,
1997	.listxattr	= shmem_listxattr,
1998	.removexattr	= shmem_removexattr,
1999#endif
2000};
2001
2002static struct dentry *shmem_get_parent(struct dentry *child)
2003{
2004	return ERR_PTR(-ESTALE);
2005}
2006
2007static int shmem_match(struct inode *ino, void *vfh)
2008{
2009	__u32 *fh = vfh;
2010	__u64 inum = fh[2];
2011	inum = (inum << 32) | fh[1];
2012	return ino->i_ino == inum && fh[0] == ino->i_generation;
2013}
2014
2015static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2016		struct fid *fid, int fh_len, int fh_type)
2017{
2018	struct inode *inode;
2019	struct dentry *dentry = NULL;
2020	u64 inum = fid->raw[2];
2021	inum = (inum << 32) | fid->raw[1];
2022
2023	if (fh_len < 3)
2024		return NULL;
2025
2026	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2027			shmem_match, fid->raw);
2028	if (inode) {
2029		dentry = d_find_alias(inode);
2030		iput(inode);
2031	}
2032
2033	return dentry;
2034}
2035
2036static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2037				int connectable)
2038{
2039	struct inode *inode = dentry->d_inode;
2040
2041	if (*len < 3) {
2042		*len = 3;
2043		return 255;
2044	}
2045
2046	if (inode_unhashed(inode)) {
2047		/* Unfortunately insert_inode_hash is not idempotent,
2048		 * so as we hash inodes here rather than at creation
2049		 * time, we need a lock to ensure we only try
2050		 * to do it once
2051		 */
2052		static DEFINE_SPINLOCK(lock);
2053		spin_lock(&lock);
2054		if (inode_unhashed(inode))
2055			__insert_inode_hash(inode,
2056					    inode->i_ino + inode->i_generation);
2057		spin_unlock(&lock);
2058	}
2059
2060	fh[0] = inode->i_generation;
2061	fh[1] = inode->i_ino;
2062	fh[2] = ((__u64)inode->i_ino) >> 32;
2063
2064	*len = 3;
2065	return 1;
2066}
2067
2068static const struct export_operations shmem_export_ops = {
2069	.get_parent     = shmem_get_parent,
2070	.encode_fh      = shmem_encode_fh,
2071	.fh_to_dentry	= shmem_fh_to_dentry,
2072};
2073
2074static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2075			       bool remount)
2076{
2077	char *this_char, *value, *rest;
2078
2079	while (options != NULL) {
2080		this_char = options;
2081		for (;;) {
2082			/*
2083			 * NUL-terminate this option: unfortunately,
2084			 * mount options form a comma-separated list,
2085			 * but mpol's nodelist may also contain commas.
2086			 */
2087			options = strchr(options, ',');
2088			if (options == NULL)
2089				break;
2090			options++;
2091			if (!isdigit(*options)) {
2092				options[-1] = '\0';
2093				break;
2094			}
2095		}
2096		if (!*this_char)
2097			continue;
2098		if ((value = strchr(this_char,'=')) != NULL) {
2099			*value++ = 0;
2100		} else {
2101			printk(KERN_ERR
2102			    "tmpfs: No value for mount option '%s'\n",
2103			    this_char);
2104			return 1;
2105		}
2106
2107		if (!strcmp(this_char,"size")) {
2108			unsigned long long size;
2109			size = memparse(value,&rest);
2110			if (*rest == '%') {
2111				size <<= PAGE_SHIFT;
2112				size *= totalram_pages;
2113				do_div(size, 100);
2114				rest++;
2115			}
2116			if (*rest)
2117				goto bad_val;
2118			sbinfo->max_blocks =
2119				DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2120		} else if (!strcmp(this_char,"nr_blocks")) {
2121			sbinfo->max_blocks = memparse(value, &rest);
2122			if (*rest)
2123				goto bad_val;
2124		} else if (!strcmp(this_char,"nr_inodes")) {
2125			sbinfo->max_inodes = memparse(value, &rest);
2126			if (*rest)
2127				goto bad_val;
2128		} else if (!strcmp(this_char,"mode")) {
2129			if (remount)
2130				continue;
2131			sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2132			if (*rest)
2133				goto bad_val;
2134		} else if (!strcmp(this_char,"uid")) {
2135			if (remount)
2136				continue;
2137			sbinfo->uid = simple_strtoul(value, &rest, 0);
2138			if (*rest)
2139				goto bad_val;
2140		} else if (!strcmp(this_char,"gid")) {
2141			if (remount)
2142				continue;
2143			sbinfo->gid = simple_strtoul(value, &rest, 0);
2144			if (*rest)
2145				goto bad_val;
2146		} else if (!strcmp(this_char,"mpol")) {
2147			if (mpol_parse_str(value, &sbinfo->mpol, 1))
2148				goto bad_val;
2149		} else {
2150			printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2151			       this_char);
2152			return 1;
2153		}
2154	}
2155	return 0;
2156
2157bad_val:
2158	printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2159	       value, this_char);
2160	return 1;
2161
2162}
2163
2164static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2165{
2166	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2167	struct shmem_sb_info config = *sbinfo;
2168	unsigned long inodes;
2169	int error = -EINVAL;
2170
2171	if (shmem_parse_options(data, &config, true))
2172		return error;
2173
2174	spin_lock(&sbinfo->stat_lock);
2175	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2176	if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2177		goto out;
2178	if (config.max_inodes < inodes)
2179		goto out;
2180	/*
2181	 * Those tests disallow limited->unlimited while any are in use;
2182	 * but we must separately disallow unlimited->limited, because
2183	 * in that case we have no record of how much is already in use.
2184	 */
2185	if (config.max_blocks && !sbinfo->max_blocks)
2186		goto out;
2187	if (config.max_inodes && !sbinfo->max_inodes)
2188		goto out;
2189
2190	error = 0;
2191	sbinfo->max_blocks  = config.max_blocks;
2192	sbinfo->max_inodes  = config.max_inodes;
2193	sbinfo->free_inodes = config.max_inodes - inodes;
2194
2195	mpol_put(sbinfo->mpol);
2196	sbinfo->mpol        = config.mpol;	/* transfers initial ref */
2197out:
2198	spin_unlock(&sbinfo->stat_lock);
2199	return error;
2200}
2201
2202static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2203{
2204	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
2205
2206	if (sbinfo->max_blocks != shmem_default_max_blocks())
2207		seq_printf(seq, ",size=%luk",
2208			sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2209	if (sbinfo->max_inodes != shmem_default_max_inodes())
2210		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2211	if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2212		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
2213	if (sbinfo->uid != 0)
2214		seq_printf(seq, ",uid=%u", sbinfo->uid);
2215	if (sbinfo->gid != 0)
2216		seq_printf(seq, ",gid=%u", sbinfo->gid);
2217	shmem_show_mpol(seq, sbinfo->mpol);
2218	return 0;
2219}
2220#endif /* CONFIG_TMPFS */
2221
2222static void shmem_put_super(struct super_block *sb)
2223{
2224	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2225
2226	percpu_counter_destroy(&sbinfo->used_blocks);
2227	kfree(sbinfo);
2228	sb->s_fs_info = NULL;
2229}
2230
2231int shmem_fill_super(struct super_block *sb, void *data, int silent)
2232{
2233	struct inode *inode;
2234	struct dentry *root;
2235	struct shmem_sb_info *sbinfo;
2236	int err = -ENOMEM;
2237
2238	/* Round up to L1_CACHE_BYTES to resist false sharing */
2239	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2240				L1_CACHE_BYTES), GFP_KERNEL);
2241	if (!sbinfo)
2242		return -ENOMEM;
2243
2244	sbinfo->mode = S_IRWXUGO | S_ISVTX;
2245	sbinfo->uid = current_fsuid();
2246	sbinfo->gid = current_fsgid();
2247	sb->s_fs_info = sbinfo;
2248
2249#ifdef CONFIG_TMPFS
2250	/*
2251	 * Per default we only allow half of the physical ram per
2252	 * tmpfs instance, limiting inodes to one per page of lowmem;
2253	 * but the internal instance is left unlimited.
2254	 */
2255	if (!(sb->s_flags & MS_NOUSER)) {
2256		sbinfo->max_blocks = shmem_default_max_blocks();
2257		sbinfo->max_inodes = shmem_default_max_inodes();
2258		if (shmem_parse_options(data, sbinfo, false)) {
2259			err = -EINVAL;
2260			goto failed;
2261		}
2262	}
2263	sb->s_export_op = &shmem_export_ops;
2264#else
2265	sb->s_flags |= MS_NOUSER;
2266#endif
2267
2268	spin_lock_init(&sbinfo->stat_lock);
2269	if (percpu_counter_init(&sbinfo->used_blocks, 0))
2270		goto failed;
2271	sbinfo->free_inodes = sbinfo->max_inodes;
2272
2273	sb->s_maxbytes = MAX_LFS_FILESIZE;
2274	sb->s_blocksize = PAGE_CACHE_SIZE;
2275	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2276	sb->s_magic = TMPFS_MAGIC;
2277	sb->s_op = &shmem_ops;
2278	sb->s_time_gran = 1;
2279#ifdef CONFIG_TMPFS_XATTR
2280	sb->s_xattr = shmem_xattr_handlers;
2281#endif
2282#ifdef CONFIG_TMPFS_POSIX_ACL
2283	sb->s_flags |= MS_POSIXACL;
2284#endif
2285
2286	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2287	if (!inode)
2288		goto failed;
2289	inode->i_uid = sbinfo->uid;
2290	inode->i_gid = sbinfo->gid;
2291	root = d_alloc_root(inode);
2292	if (!root)
2293		goto failed_iput;
2294	sb->s_root = root;
2295	return 0;
2296
2297failed_iput:
2298	iput(inode);
2299failed:
2300	shmem_put_super(sb);
2301	return err;
2302}
2303
2304static struct kmem_cache *shmem_inode_cachep;
2305
2306static struct inode *shmem_alloc_inode(struct super_block *sb)
2307{
2308	struct shmem_inode_info *info;
2309	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2310	if (!info)
2311		return NULL;
2312	return &info->vfs_inode;
2313}
2314
2315static void shmem_destroy_callback(struct rcu_head *head)
2316{
2317	struct inode *inode = container_of(head, struct inode, i_rcu);
2318	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2319}
2320
2321static void shmem_destroy_inode(struct inode *inode)
2322{
2323	if (S_ISREG(inode->i_mode))
2324		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2325	call_rcu(&inode->i_rcu, shmem_destroy_callback);
2326}
2327
2328static void shmem_init_inode(void *foo)
2329{
2330	struct shmem_inode_info *info = foo;
2331	inode_init_once(&info->vfs_inode);
2332}
2333
2334static int shmem_init_inodecache(void)
2335{
2336	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2337				sizeof(struct shmem_inode_info),
2338				0, SLAB_PANIC, shmem_init_inode);
2339	return 0;
2340}
2341
2342static void shmem_destroy_inodecache(void)
2343{
2344	kmem_cache_destroy(shmem_inode_cachep);
2345}
2346
2347static const struct address_space_operations shmem_aops = {
2348	.writepage	= shmem_writepage,
2349	.set_page_dirty	= __set_page_dirty_no_writeback,
2350#ifdef CONFIG_TMPFS
2351	.write_begin	= shmem_write_begin,
2352	.write_end	= shmem_write_end,
2353#endif
2354	.migratepage	= migrate_page,
2355	.error_remove_page = generic_error_remove_page,
2356};
2357
2358static const struct file_operations shmem_file_operations = {
2359	.mmap		= shmem_mmap,
2360#ifdef CONFIG_TMPFS
2361	.llseek		= generic_file_llseek,
2362	.read		= do_sync_read,
2363	.write		= do_sync_write,
2364	.aio_read	= shmem_file_aio_read,
2365	.aio_write	= generic_file_aio_write,
2366	.fsync		= noop_fsync,
2367	.splice_read	= shmem_file_splice_read,
2368	.splice_write	= generic_file_splice_write,
2369#endif
2370};
2371
2372static const struct inode_operations shmem_inode_operations = {
2373	.setattr	= shmem_setattr,
2374	.truncate_range	= shmem_truncate_range,
2375#ifdef CONFIG_TMPFS_XATTR
2376	.setxattr	= shmem_setxattr,
2377	.getxattr	= shmem_getxattr,
2378	.listxattr	= shmem_listxattr,
2379	.removexattr	= shmem_removexattr,
2380#endif
2381};
2382
2383static const struct inode_operations shmem_dir_inode_operations = {
2384#ifdef CONFIG_TMPFS
2385	.create		= shmem_create,
2386	.lookup		= simple_lookup,
2387	.link		= shmem_link,
2388	.unlink		= shmem_unlink,
2389	.symlink	= shmem_symlink,
2390	.mkdir		= shmem_mkdir,
2391	.rmdir		= shmem_rmdir,
2392	.mknod		= shmem_mknod,
2393	.rename		= shmem_rename,
2394#endif
2395#ifdef CONFIG_TMPFS_XATTR
2396	.setxattr	= shmem_setxattr,
2397	.getxattr	= shmem_getxattr,
2398	.listxattr	= shmem_listxattr,
2399	.removexattr	= shmem_removexattr,
2400#endif
2401#ifdef CONFIG_TMPFS_POSIX_ACL
2402	.setattr	= shmem_setattr,
2403#endif
2404};
2405
2406static const struct inode_operations shmem_special_inode_operations = {
2407#ifdef CONFIG_TMPFS_XATTR
2408	.setxattr	= shmem_setxattr,
2409	.getxattr	= shmem_getxattr,
2410	.listxattr	= shmem_listxattr,
2411	.removexattr	= shmem_removexattr,
2412#endif
2413#ifdef CONFIG_TMPFS_POSIX_ACL
2414	.setattr	= shmem_setattr,
2415#endif
2416};
2417
2418static const struct super_operations shmem_ops = {
2419	.alloc_inode	= shmem_alloc_inode,
2420	.destroy_inode	= shmem_destroy_inode,
2421#ifdef CONFIG_TMPFS
2422	.statfs		= shmem_statfs,
2423	.remount_fs	= shmem_remount_fs,
2424	.show_options	= shmem_show_options,
2425#endif
2426	.evict_inode	= shmem_evict_inode,
2427	.drop_inode	= generic_delete_inode,
2428	.put_super	= shmem_put_super,
2429};
2430
2431static const struct vm_operations_struct shmem_vm_ops = {
2432	.fault		= shmem_fault,
2433#ifdef CONFIG_NUMA
2434	.set_policy     = shmem_set_policy,
2435	.get_policy     = shmem_get_policy,
2436#endif
2437};
2438
2439static struct dentry *shmem_mount(struct file_system_type *fs_type,
2440	int flags, const char *dev_name, void *data)
2441{
2442	return mount_nodev(fs_type, flags, data, shmem_fill_super);
2443}
2444
2445static struct file_system_type shmem_fs_type = {
2446	.owner		= THIS_MODULE,
2447	.name		= "tmpfs",
2448	.mount		= shmem_mount,
2449	.kill_sb	= kill_litter_super,
2450};
2451
2452int __init shmem_init(void)
2453{
2454	int error;
2455
2456	error = bdi_init(&shmem_backing_dev_info);
2457	if (error)
2458		goto out4;
2459
2460	error = shmem_init_inodecache();
2461	if (error)
2462		goto out3;
2463
2464	error = register_filesystem(&shmem_fs_type);
2465	if (error) {
2466		printk(KERN_ERR "Could not register tmpfs\n");
2467		goto out2;
2468	}
2469
2470	shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2471				 shmem_fs_type.name, NULL);
2472	if (IS_ERR(shm_mnt)) {
2473		error = PTR_ERR(shm_mnt);
2474		printk(KERN_ERR "Could not kern_mount tmpfs\n");
2475		goto out1;
2476	}
2477	return 0;
2478
2479out1:
2480	unregister_filesystem(&shmem_fs_type);
2481out2:
2482	shmem_destroy_inodecache();
2483out3:
2484	bdi_destroy(&shmem_backing_dev_info);
2485out4:
2486	shm_mnt = ERR_PTR(error);
2487	return error;
2488}
2489
2490#else /* !CONFIG_SHMEM */
2491
2492/*
2493 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2494 *
2495 * This is intended for small system where the benefits of the full
2496 * shmem code (swap-backed and resource-limited) are outweighed by
2497 * their complexity. On systems without swap this code should be
2498 * effectively equivalent, but much lighter weight.
2499 */
2500
2501#include <linux/ramfs.h>
2502
2503static struct file_system_type shmem_fs_type = {
2504	.name		= "tmpfs",
2505	.mount		= ramfs_mount,
2506	.kill_sb	= kill_litter_super,
2507};
2508
2509int __init shmem_init(void)
2510{
2511	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2512
2513	shm_mnt = kern_mount(&shmem_fs_type);
2514	BUG_ON(IS_ERR(shm_mnt));
2515
2516	return 0;
2517}
2518
2519int shmem_unuse(swp_entry_t swap, struct page *page)
2520{
2521	return 0;
2522}
2523
2524int shmem_lock(struct file *file, int lock, struct user_struct *user)
2525{
2526	return 0;
2527}
2528
2529void shmem_unlock_mapping(struct address_space *mapping)
2530{
2531}
2532
2533void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2534{
2535	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2536}
2537EXPORT_SYMBOL_GPL(shmem_truncate_range);
2538
2539#define shmem_vm_ops				generic_file_vm_ops
2540#define shmem_file_operations			ramfs_file_operations
2541#define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
2542#define shmem_acct_size(flags, size)		0
2543#define shmem_unacct_size(flags, size)		do {} while (0)
2544
2545#endif /* CONFIG_SHMEM */
2546
2547/* common code */
2548
2549/**
2550 * shmem_file_setup - get an unlinked file living in tmpfs
2551 * @name: name for dentry (to be seen in /proc/<pid>/maps
2552 * @size: size to be set for the file
2553 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2554 */
2555struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2556{
2557	int error;
2558	struct file *file;
2559	struct inode *inode;
2560	struct path path;
2561	struct dentry *root;
2562	struct qstr this;
2563
2564	if (IS_ERR(shm_mnt))
2565		return (void *)shm_mnt;
2566
2567	if (size < 0 || size > MAX_LFS_FILESIZE)
2568		return ERR_PTR(-EINVAL);
2569
2570	if (shmem_acct_size(flags, size))
2571		return ERR_PTR(-ENOMEM);
2572
2573	error = -ENOMEM;
2574	this.name = name;
2575	this.len = strlen(name);
2576	this.hash = 0; /* will go */
2577	root = shm_mnt->mnt_root;
2578	path.dentry = d_alloc(root, &this);
2579	if (!path.dentry)
2580		goto put_memory;
2581	path.mnt = mntget(shm_mnt);
2582
2583	error = -ENOSPC;
2584	inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2585	if (!inode)
2586		goto put_dentry;
2587
2588	d_instantiate(path.dentry, inode);
2589	inode->i_size = size;
2590	clear_nlink(inode);	/* It is unlinked */
2591#ifndef CONFIG_MMU
2592	error = ramfs_nommu_expand_for_mapping(inode, size);
2593	if (error)
2594		goto put_dentry;
2595#endif
2596
2597	error = -ENFILE;
2598	file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2599		  &shmem_file_operations);
2600	if (!file)
2601		goto put_dentry;
2602
2603	return file;
2604
2605put_dentry:
2606	path_put(&path);
2607put_memory:
2608	shmem_unacct_size(flags, size);
2609	return ERR_PTR(error);
2610}
2611EXPORT_SYMBOL_GPL(shmem_file_setup);
2612
2613/**
2614 * shmem_zero_setup - setup a shared anonymous mapping
2615 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2616 */
2617int shmem_zero_setup(struct vm_area_struct *vma)
2618{
2619	struct file *file;
2620	loff_t size = vma->vm_end - vma->vm_start;
2621
2622	file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2623	if (IS_ERR(file))
2624		return PTR_ERR(file);
2625
2626	if (vma->vm_file)
2627		fput(vma->vm_file);
2628	vma->vm_file = file;
2629	vma->vm_ops = &shmem_vm_ops;
2630	vma->vm_flags |= VM_CAN_NONLINEAR;
2631	return 0;
2632}
2633
2634/**
2635 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2636 * @mapping:	the page's address_space
2637 * @index:	the page index
2638 * @gfp:	the page allocator flags to use if allocating
2639 *
2640 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2641 * with any new page allocations done using the specified allocation flags.
2642 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2643 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2644 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2645 *
2646 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2647 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2648 */
2649struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2650					 pgoff_t index, gfp_t gfp)
2651{
2652#ifdef CONFIG_SHMEM
2653	struct inode *inode = mapping->host;
2654	struct page *page;
2655	int error;
2656
2657	BUG_ON(mapping->a_ops != &shmem_aops);
2658	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2659	if (error)
2660		page = ERR_PTR(error);
2661	else
2662		unlock_page(page);
2663	return page;
2664#else
2665	/*
2666	 * The tiny !SHMEM case uses ramfs without swap
2667	 */
2668	return read_cache_page_gfp(mapping, index, gfp);
2669#endif
2670}
2671EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
2672